problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_21404
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-6578
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fatal 'Failed to upgrade database' error on startup
As of Synapse 1.7.0, when I start synapse with an old database version, I get this rather cryptic error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `synapse/storage/engines/sqlite.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2015, 2016 OpenMarket Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import struct
17 import threading
18
19 from synapse.storage.prepare_database import prepare_database
20
21
22 class Sqlite3Engine(object):
23 single_threaded = True
24
25 def __init__(self, database_module, database_config):
26 self.module = database_module
27
28 # The current max state_group, or None if we haven't looked
29 # in the DB yet.
30 self._current_state_group_id = None
31 self._current_state_group_id_lock = threading.Lock()
32
33 @property
34 def can_native_upsert(self):
35 """
36 Do we support native UPSERTs? This requires SQLite3 3.24+, plus some
37 more work we haven't done yet to tell what was inserted vs updated.
38 """
39 return self.module.sqlite_version_info >= (3, 24, 0)
40
41 @property
42 def supports_tuple_comparison(self):
43 """
44 Do we support comparing tuples, i.e. `(a, b) > (c, d)`? This requires
45 SQLite 3.15+.
46 """
47 return self.module.sqlite_version_info >= (3, 15, 0)
48
49 @property
50 def supports_using_any_list(self):
51 """Do we support using `a = ANY(?)` and passing a list
52 """
53 return False
54
55 def check_database(self, txn):
56 pass
57
58 def convert_param_style(self, sql):
59 return sql
60
61 def on_new_connection(self, db_conn):
62 prepare_database(db_conn, self, config=None)
63 db_conn.create_function("rank", 1, _rank)
64
65 def is_deadlock(self, error):
66 return False
67
68 def is_connection_closed(self, conn):
69 return False
70
71 def lock_table(self, txn, table):
72 return
73
74 def get_next_state_group_id(self, txn):
75 """Returns an int that can be used as a new state_group ID
76 """
77 # We do application locking here since if we're using sqlite then
78 # we are a single process synapse.
79 with self._current_state_group_id_lock:
80 if self._current_state_group_id is None:
81 txn.execute("SELECT COALESCE(max(id), 0) FROM state_groups")
82 self._current_state_group_id = txn.fetchone()[0]
83
84 self._current_state_group_id += 1
85 return self._current_state_group_id
86
87 @property
88 def server_version(self):
89 """Gets a string giving the server version. For example: '3.22.0'
90
91 Returns:
92 string
93 """
94 return "%i.%i.%i" % self.module.sqlite_version_info
95
96
97 # Following functions taken from: https://github.com/coleifer/peewee
98
99
100 def _parse_match_info(buf):
101 bufsize = len(buf)
102 return [struct.unpack("@I", buf[i : i + 4])[0] for i in range(0, bufsize, 4)]
103
104
105 def _rank(raw_match_info):
106 """Handle match_info called w/default args 'pcx' - based on the example rank
107 function http://sqlite.org/fts3.html#appendix_a
108 """
109 match_info = _parse_match_info(raw_match_info)
110 score = 0.0
111 p, c = match_info[:2]
112 for phrase_num in range(p):
113 phrase_info_idx = 2 + (phrase_num * c * 3)
114 for col_num in range(c):
115 col_idx = phrase_info_idx + (col_num * 3)
116 x1, x2 = match_info[col_idx : col_idx + 2]
117 if x1 > 0:
118 score += float(x1) / x2
119 return score
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py
--- a/synapse/storage/engines/sqlite.py
+++ b/synapse/storage/engines/sqlite.py
@@ -25,6 +25,9 @@
def __init__(self, database_module, database_config):
self.module = database_module
+ database = database_config.get("args", {}).get("database")
+ self._is_in_memory = database in (None, ":memory:",)
+
# The current max state_group, or None if we haven't looked
# in the DB yet.
self._current_state_group_id = None
@@ -59,7 +62,12 @@
return sql
def on_new_connection(self, db_conn):
- prepare_database(db_conn, self, config=None)
+ if self._is_in_memory:
+ # In memory databases need to be rebuilt each time. Ideally we'd
+ # reuse the same connection as we do when starting up, but that
+ # would involve using adbapi before we have started the reactor.
+ prepare_database(db_conn, self, config=None)
+
db_conn.create_function("rank", 1, _rank)
def is_deadlock(self, error):
|
{"golden_diff": "diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py\n--- a/synapse/storage/engines/sqlite.py\n+++ b/synapse/storage/engines/sqlite.py\n@@ -25,6 +25,9 @@\n def __init__(self, database_module, database_config):\n self.module = database_module\n \n+ database = database_config.get(\"args\", {}).get(\"database\")\n+ self._is_in_memory = database in (None, \":memory:\",)\n+\n # The current max state_group, or None if we haven't looked\n # in the DB yet.\n self._current_state_group_id = None\n@@ -59,7 +62,12 @@\n return sql\n \n def on_new_connection(self, db_conn):\n- prepare_database(db_conn, self, config=None)\n+ if self._is_in_memory:\n+ # In memory databases need to be rebuilt each time. Ideally we'd\n+ # reuse the same connection as we do when starting up, but that\n+ # would involve using adbapi before we have started the reactor.\n+ prepare_database(db_conn, self, config=None)\n+\n db_conn.create_function(\"rank\", 1, _rank)\n \n def is_deadlock(self, error):\n", "issue": "Fatal 'Failed to upgrade database' error on startup\nAs of Synapse 1.7.0, when I start synapse with an old database version, I get this rather cryptic error.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015, 2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport struct\nimport threading\n\nfrom synapse.storage.prepare_database import prepare_database\n\n\nclass Sqlite3Engine(object):\n single_threaded = True\n\n def __init__(self, database_module, database_config):\n self.module = database_module\n\n # The current max state_group, or None if we haven't looked\n # in the DB yet.\n self._current_state_group_id = None\n self._current_state_group_id_lock = threading.Lock()\n\n @property\n def can_native_upsert(self):\n \"\"\"\n Do we support native UPSERTs? This requires SQLite3 3.24+, plus some\n more work we haven't done yet to tell what was inserted vs updated.\n \"\"\"\n return self.module.sqlite_version_info >= (3, 24, 0)\n\n @property\n def supports_tuple_comparison(self):\n \"\"\"\n Do we support comparing tuples, i.e. `(a, b) > (c, d)`? This requires\n SQLite 3.15+.\n \"\"\"\n return self.module.sqlite_version_info >= (3, 15, 0)\n\n @property\n def supports_using_any_list(self):\n \"\"\"Do we support using `a = ANY(?)` and passing a list\n \"\"\"\n return False\n\n def check_database(self, txn):\n pass\n\n def convert_param_style(self, sql):\n return sql\n\n def on_new_connection(self, db_conn):\n prepare_database(db_conn, self, config=None)\n db_conn.create_function(\"rank\", 1, _rank)\n\n def is_deadlock(self, error):\n return False\n\n def is_connection_closed(self, conn):\n return False\n\n def lock_table(self, txn, table):\n return\n\n def get_next_state_group_id(self, txn):\n \"\"\"Returns an int that can be used as a new state_group ID\n \"\"\"\n # We do application locking here since if we're using sqlite then\n # we are a single process synapse.\n with self._current_state_group_id_lock:\n if self._current_state_group_id is None:\n txn.execute(\"SELECT COALESCE(max(id), 0) FROM state_groups\")\n self._current_state_group_id = txn.fetchone()[0]\n\n self._current_state_group_id += 1\n return self._current_state_group_id\n\n @property\n def server_version(self):\n \"\"\"Gets a string giving the server version. For example: '3.22.0'\n\n Returns:\n string\n \"\"\"\n return \"%i.%i.%i\" % self.module.sqlite_version_info\n\n\n# Following functions taken from: https://github.com/coleifer/peewee\n\n\ndef _parse_match_info(buf):\n bufsize = len(buf)\n return [struct.unpack(\"@I\", buf[i : i + 4])[0] for i in range(0, bufsize, 4)]\n\n\ndef _rank(raw_match_info):\n \"\"\"Handle match_info called w/default args 'pcx' - based on the example rank\n function http://sqlite.org/fts3.html#appendix_a\n \"\"\"\n match_info = _parse_match_info(raw_match_info)\n score = 0.0\n p, c = match_info[:2]\n for phrase_num in range(p):\n phrase_info_idx = 2 + (phrase_num * c * 3)\n for col_num in range(c):\n col_idx = phrase_info_idx + (col_num * 3)\n x1, x2 = match_info[col_idx : col_idx + 2]\n if x1 > 0:\n score += float(x1) / x2\n return score\n", "path": "synapse/storage/engines/sqlite.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015, 2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport struct\nimport threading\n\nfrom synapse.storage.prepare_database import prepare_database\n\n\nclass Sqlite3Engine(object):\n single_threaded = True\n\n def __init__(self, database_module, database_config):\n self.module = database_module\n\n database = database_config.get(\"args\", {}).get(\"database\")\n self._is_in_memory = database in (None, \":memory:\",)\n\n # The current max state_group, or None if we haven't looked\n # in the DB yet.\n self._current_state_group_id = None\n self._current_state_group_id_lock = threading.Lock()\n\n @property\n def can_native_upsert(self):\n \"\"\"\n Do we support native UPSERTs? This requires SQLite3 3.24+, plus some\n more work we haven't done yet to tell what was inserted vs updated.\n \"\"\"\n return self.module.sqlite_version_info >= (3, 24, 0)\n\n @property\n def supports_tuple_comparison(self):\n \"\"\"\n Do we support comparing tuples, i.e. `(a, b) > (c, d)`? This requires\n SQLite 3.15+.\n \"\"\"\n return self.module.sqlite_version_info >= (3, 15, 0)\n\n @property\n def supports_using_any_list(self):\n \"\"\"Do we support using `a = ANY(?)` and passing a list\n \"\"\"\n return False\n\n def check_database(self, txn):\n pass\n\n def convert_param_style(self, sql):\n return sql\n\n def on_new_connection(self, db_conn):\n if self._is_in_memory:\n # In memory databases need to be rebuilt each time. Ideally we'd\n # reuse the same connection as we do when starting up, but that\n # would involve using adbapi before we have started the reactor.\n prepare_database(db_conn, self, config=None)\n\n db_conn.create_function(\"rank\", 1, _rank)\n\n def is_deadlock(self, error):\n return False\n\n def is_connection_closed(self, conn):\n return False\n\n def lock_table(self, txn, table):\n return\n\n def get_next_state_group_id(self, txn):\n \"\"\"Returns an int that can be used as a new state_group ID\n \"\"\"\n # We do application locking here since if we're using sqlite then\n # we are a single process synapse.\n with self._current_state_group_id_lock:\n if self._current_state_group_id is None:\n txn.execute(\"SELECT COALESCE(max(id), 0) FROM state_groups\")\n self._current_state_group_id = txn.fetchone()[0]\n\n self._current_state_group_id += 1\n return self._current_state_group_id\n\n @property\n def server_version(self):\n \"\"\"Gets a string giving the server version. For example: '3.22.0'\n\n Returns:\n string\n \"\"\"\n return \"%i.%i.%i\" % self.module.sqlite_version_info\n\n\n# Following functions taken from: https://github.com/coleifer/peewee\n\n\ndef _parse_match_info(buf):\n bufsize = len(buf)\n return [struct.unpack(\"@I\", buf[i : i + 4])[0] for i in range(0, bufsize, 4)]\n\n\ndef _rank(raw_match_info):\n \"\"\"Handle match_info called w/default args 'pcx' - based on the example rank\n function http://sqlite.org/fts3.html#appendix_a\n \"\"\"\n match_info = _parse_match_info(raw_match_info)\n score = 0.0\n p, c = match_info[:2]\n for phrase_num in range(p):\n phrase_info_idx = 2 + (phrase_num * c * 3)\n for col_num in range(c):\n col_idx = phrase_info_idx + (col_num * 3)\n x1, x2 = match_info[col_idx : col_idx + 2]\n if x1 > 0:\n score += float(x1) / x2\n return score\n", "path": "synapse/storage/engines/sqlite.py"}]}
| 1,499 | 284 |
gh_patches_debug_39466
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-1877
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RedTube DL don't work any more
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
_No response_
### Description
only downloaded a small file (893 btyes) which is obviously not the real video file...
sample url:
https://www.redtube.com/39016781
### Verbose log
```shell
Download Log for url: https://www.redtube.com/39016781
Waiting
[RedTube] 39016781: Downloading webpage
[RedTube] 39016781: Downloading m3u8 information
[info] 39016781: Downloading 1 format(s): 1
[download] Destination: C:\Users\Sony\xxx\Youtube-Dl\Cute Asian is fucked & creampied!-39016781.mp4
[download] 893.00B at 436.07KiB/s (00:02)
[download] 100% of 893.00B in 00:02
Download Worker Finished.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/redtube.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 determine_ext,
8 ExtractorError,
9 int_or_none,
10 merge_dicts,
11 str_to_int,
12 unified_strdate,
13 url_or_none,
14 )
15
16
17 class RedTubeIE(InfoExtractor):
18 _VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
19 _TESTS = [{
20 'url': 'http://www.redtube.com/66418',
21 'md5': 'fc08071233725f26b8f014dba9590005',
22 'info_dict': {
23 'id': '66418',
24 'ext': 'mp4',
25 'title': 'Sucked on a toilet',
26 'upload_date': '20110811',
27 'duration': 596,
28 'view_count': int,
29 'age_limit': 18,
30 }
31 }, {
32 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
33 'only_matching': True,
34 }, {
35 'url': 'http://it.redtube.com/66418',
36 'only_matching': True,
37 }]
38
39 @staticmethod
40 def _extract_urls(webpage):
41 return re.findall(
42 r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//embed\.redtube\.com/\?.*?\bid=\d+)',
43 webpage)
44
45 def _real_extract(self, url):
46 video_id = self._match_id(url)
47 webpage = self._download_webpage(
48 'http://www.redtube.com/%s' % video_id, video_id)
49
50 ERRORS = (
51 (('video-deleted-info', '>This video has been removed'), 'has been removed'),
52 (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),
53 )
54
55 for patterns, message in ERRORS:
56 if any(p in webpage for p in patterns):
57 raise ExtractorError(
58 'Video %s %s' % (video_id, message), expected=True)
59
60 info = self._search_json_ld(webpage, video_id, default={})
61
62 if not info.get('title'):
63 info['title'] = self._html_search_regex(
64 (r'<h(\d)[^>]+class="(?:video_title_text|videoTitle|video_title)[^"]*">(?P<title>(?:(?!\1).)+)</h\1>',
65 r'(?:videoTitle|title)\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',),
66 webpage, 'title', group='title',
67 default=None) or self._og_search_title(webpage)
68
69 formats = []
70 sources = self._parse_json(
71 self._search_regex(
72 r'sources\s*:\s*({.+?})', webpage, 'source', default='{}'),
73 video_id, fatal=False)
74 if sources and isinstance(sources, dict):
75 for format_id, format_url in sources.items():
76 if format_url:
77 formats.append({
78 'url': format_url,
79 'format_id': format_id,
80 'height': int_or_none(format_id),
81 })
82 medias = self._parse_json(
83 self._search_regex(
84 r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage,
85 'media definitions', default='{}'),
86 video_id, fatal=False)
87 if medias and isinstance(medias, list):
88 for media in medias:
89 format_url = url_or_none(media.get('videoUrl'))
90 if not format_url:
91 continue
92 if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':
93 formats.extend(self._extract_m3u8_formats(
94 format_url, video_id, 'mp4',
95 entry_protocol='m3u8_native', m3u8_id='hls',
96 fatal=False))
97 continue
98 format_id = media.get('quality')
99 formats.append({
100 'url': format_url,
101 'ext': 'mp4',
102 'format_id': format_id,
103 'height': int_or_none(format_id),
104 })
105 if not formats:
106 video_url = self._html_search_regex(
107 r'<source src="(.+?)" type="video/mp4">', webpage, 'video URL')
108 formats.append({'url': video_url, 'ext': 'mp4'})
109 self._sort_formats(formats)
110
111 thumbnail = self._og_search_thumbnail(webpage)
112 upload_date = unified_strdate(self._search_regex(
113 r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',
114 webpage, 'upload date', default=None))
115 duration = int_or_none(self._og_search_property(
116 'video:duration', webpage, default=None) or self._search_regex(
117 r'videoDuration\s*:\s*(\d+)', webpage, 'duration', default=None))
118 view_count = str_to_int(self._search_regex(
119 (r'<div[^>]*>Views</div>\s*<div[^>]*>\s*([\d,.]+)',
120 r'<span[^>]*>VIEWS</span>\s*</td>\s*<td>\s*([\d,.]+)',
121 r'<span[^>]+\bclass=["\']video_view_count[^>]*>\s*([\d,.]+)'),
122 webpage, 'view count', default=None))
123
124 # No self-labeling, but they describe themselves as
125 # "Home of Videos Porno"
126 age_limit = 18
127
128 return merge_dicts(info, {
129 'id': video_id,
130 'ext': 'mp4',
131 'thumbnail': thumbnail,
132 'upload_date': upload_date,
133 'duration': duration,
134 'view_count': view_count,
135 'age_limit': age_limit,
136 'formats': formats,
137 })
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yt_dlp/extractor/redtube.py b/yt_dlp/extractor/redtube.py
--- a/yt_dlp/extractor/redtube.py
+++ b/yt_dlp/extractor/redtube.py
@@ -17,17 +17,20 @@
class RedTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:\w+\.)?redtube\.com/|embed\.redtube\.com/\?.*?\bid=)(?P<id>[0-9]+)'
_TESTS = [{
- 'url': 'http://www.redtube.com/66418',
- 'md5': 'fc08071233725f26b8f014dba9590005',
+ 'url': 'https://www.redtube.com/38864951',
+ 'md5': '4fba70cbca3aefd25767ab4b523c9878',
'info_dict': {
- 'id': '66418',
+ 'id': '38864951',
'ext': 'mp4',
- 'title': 'Sucked on a toilet',
- 'upload_date': '20110811',
- 'duration': 596,
+ 'title': 'Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu',
+ 'description': 'Watch video Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu on Redtube, home of free Blowjob porn videos and Blonde sex movies online. Video length: (10:46) - Uploaded by leolulu - Verified User - Starring Pornstar: Leolulu',
+ 'upload_date': '20210111',
+ 'timestamp': 1610343109,
+ 'duration': 646,
'view_count': int,
'age_limit': 18,
- }
+ 'thumbnail': r're:https://\wi-ph\.rdtcdn\.com/videos/.+/.+\.jpg',
+ },
}, {
'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',
'only_matching': True,
@@ -84,15 +87,25 @@
r'mediaDefinition["\']?\s*:\s*(\[.+?}\s*\])', webpage,
'media definitions', default='{}'),
video_id, fatal=False)
- if medias and isinstance(medias, list):
- for media in medias:
+ for media in medias if isinstance(medias, list) else []:
+ format_url = url_or_none(media.get('videoUrl'))
+ if not format_url:
+ continue
+ format_id = media.get('format')
+ quality = media.get('quality')
+ if format_id == 'hls' or (format_id == 'mp4' and not quality):
+ more_media = self._download_json(format_url, video_id, fatal=False)
+ else:
+ more_media = [media]
+ for media in more_media if isinstance(more_media, list) else []:
format_url = url_or_none(media.get('videoUrl'))
if not format_url:
continue
- if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':
+ format_id = media.get('format')
+ if format_id == 'hls' or determine_ext(format_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4',
- entry_protocol='m3u8_native', m3u8_id='hls',
+ entry_protocol='m3u8_native', m3u8_id=format_id or 'hls',
fatal=False))
continue
format_id = media.get('quality')
|
{"golden_diff": "diff --git a/yt_dlp/extractor/redtube.py b/yt_dlp/extractor/redtube.py\n--- a/yt_dlp/extractor/redtube.py\n+++ b/yt_dlp/extractor/redtube.py\n@@ -17,17 +17,20 @@\n class RedTubeIE(InfoExtractor):\n _VALID_URL = r'https?://(?:(?:\\w+\\.)?redtube\\.com/|embed\\.redtube\\.com/\\?.*?\\bid=)(?P<id>[0-9]+)'\n _TESTS = [{\n- 'url': 'http://www.redtube.com/66418',\n- 'md5': 'fc08071233725f26b8f014dba9590005',\n+ 'url': 'https://www.redtube.com/38864951',\n+ 'md5': '4fba70cbca3aefd25767ab4b523c9878',\n 'info_dict': {\n- 'id': '66418',\n+ 'id': '38864951',\n 'ext': 'mp4',\n- 'title': 'Sucked on a toilet',\n- 'upload_date': '20110811',\n- 'duration': 596,\n+ 'title': 'Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu',\n+ 'description': 'Watch video Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu on Redtube, home of free Blowjob porn videos and Blonde sex movies online. Video length: (10:46) - Uploaded by leolulu - Verified User - Starring Pornstar: Leolulu',\n+ 'upload_date': '20210111',\n+ 'timestamp': 1610343109,\n+ 'duration': 646,\n 'view_count': int,\n 'age_limit': 18,\n- }\n+ 'thumbnail': r're:https://\\wi-ph\\.rdtcdn\\.com/videos/.+/.+\\.jpg',\n+ },\n }, {\n 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',\n 'only_matching': True,\n@@ -84,15 +87,25 @@\n r'mediaDefinition[\"\\']?\\s*:\\s*(\\[.+?}\\s*\\])', webpage,\n 'media definitions', default='{}'),\n video_id, fatal=False)\n- if medias and isinstance(medias, list):\n- for media in medias:\n+ for media in medias if isinstance(medias, list) else []:\n+ format_url = url_or_none(media.get('videoUrl'))\n+ if not format_url:\n+ continue\n+ format_id = media.get('format')\n+ quality = media.get('quality')\n+ if format_id == 'hls' or (format_id == 'mp4' and not quality):\n+ more_media = self._download_json(format_url, video_id, fatal=False)\n+ else:\n+ more_media = [media]\n+ for media in more_media if isinstance(more_media, list) else []:\n format_url = url_or_none(media.get('videoUrl'))\n if not format_url:\n continue\n- if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':\n+ format_id = media.get('format')\n+ if format_id == 'hls' or determine_ext(format_url) == 'm3u8':\n formats.extend(self._extract_m3u8_formats(\n format_url, video_id, 'mp4',\n- entry_protocol='m3u8_native', m3u8_id='hls',\n+ entry_protocol='m3u8_native', m3u8_id=format_id or 'hls',\n fatal=False))\n continue\n format_id = media.get('quality')\n", "issue": "RedTube DL don't work any more\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2021.11.10.1**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\n_No response_\n\n### Description\n\nonly downloaded a small file (893 btyes) which is obviously not the real video file...\r\nsample url:\r\nhttps://www.redtube.com/39016781\n\n### Verbose log\n\n```shell\nDownload Log for url: https://www.redtube.com/39016781\r\n\r\nWaiting\r\n[RedTube] 39016781: Downloading webpage\r\n[RedTube] 39016781: Downloading m3u8 information\r\n[info] 39016781: Downloading 1 format(s): 1\r\n[download] Destination: C:\\Users\\Sony\\xxx\\Youtube-Dl\\Cute Asian is fucked & creampied!-39016781.mp4\r\n[download] 893.00B at 436.07KiB/s (00:02)\r\n[download] 100% of 893.00B in 00:02\r\nDownload Worker Finished.\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n int_or_none,\n merge_dicts,\n str_to_int,\n unified_strdate,\n url_or_none,\n)\n\n\nclass RedTubeIE(InfoExtractor):\n _VALID_URL = r'https?://(?:(?:\\w+\\.)?redtube\\.com/|embed\\.redtube\\.com/\\?.*?\\bid=)(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'http://www.redtube.com/66418',\n 'md5': 'fc08071233725f26b8f014dba9590005',\n 'info_dict': {\n 'id': '66418',\n 'ext': 'mp4',\n 'title': 'Sucked on a toilet',\n 'upload_date': '20110811',\n 'duration': 596,\n 'view_count': int,\n 'age_limit': 18,\n }\n }, {\n 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',\n 'only_matching': True,\n }, {\n 'url': 'http://it.redtube.com/66418',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _extract_urls(webpage):\n return re.findall(\n r'<iframe[^>]+?src=[\"\\'](?P<url>(?:https?:)?//embed\\.redtube\\.com/\\?.*?\\bid=\\d+)',\n webpage)\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(\n 'http://www.redtube.com/%s' % video_id, video_id)\n\n ERRORS = (\n (('video-deleted-info', '>This video has been removed'), 'has been removed'),\n (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),\n )\n\n for patterns, message in ERRORS:\n if any(p in webpage for p in patterns):\n raise ExtractorError(\n 'Video %s %s' % (video_id, message), expected=True)\n\n info = self._search_json_ld(webpage, video_id, default={})\n\n if not info.get('title'):\n info['title'] = self._html_search_regex(\n (r'<h(\\d)[^>]+class=\"(?:video_title_text|videoTitle|video_title)[^\"]*\">(?P<title>(?:(?!\\1).)+)</h\\1>',\n r'(?:videoTitle|title)\\s*:\\s*([\"\\'])(?P<title>(?:(?!\\1).)+)\\1',),\n webpage, 'title', group='title',\n default=None) or self._og_search_title(webpage)\n\n formats = []\n sources = self._parse_json(\n self._search_regex(\n r'sources\\s*:\\s*({.+?})', webpage, 'source', default='{}'),\n video_id, fatal=False)\n if sources and isinstance(sources, dict):\n for format_id, format_url in sources.items():\n if format_url:\n formats.append({\n 'url': format_url,\n 'format_id': format_id,\n 'height': int_or_none(format_id),\n })\n medias = self._parse_json(\n self._search_regex(\n r'mediaDefinition[\"\\']?\\s*:\\s*(\\[.+?}\\s*\\])', webpage,\n 'media definitions', default='{}'),\n video_id, fatal=False)\n if medias and isinstance(medias, list):\n for media in medias:\n format_url = url_or_none(media.get('videoUrl'))\n if not format_url:\n continue\n if media.get('format') == 'hls' or determine_ext(format_url) == 'm3u8':\n formats.extend(self._extract_m3u8_formats(\n format_url, video_id, 'mp4',\n entry_protocol='m3u8_native', m3u8_id='hls',\n fatal=False))\n continue\n format_id = media.get('quality')\n formats.append({\n 'url': format_url,\n 'ext': 'mp4',\n 'format_id': format_id,\n 'height': int_or_none(format_id),\n })\n if not formats:\n video_url = self._html_search_regex(\n r'<source src=\"(.+?)\" type=\"video/mp4\">', webpage, 'video URL')\n formats.append({'url': video_url, 'ext': 'mp4'})\n self._sort_formats(formats)\n\n thumbnail = self._og_search_thumbnail(webpage)\n upload_date = unified_strdate(self._search_regex(\n r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',\n webpage, 'upload date', default=None))\n duration = int_or_none(self._og_search_property(\n 'video:duration', webpage, default=None) or self._search_regex(\n r'videoDuration\\s*:\\s*(\\d+)', webpage, 'duration', default=None))\n view_count = str_to_int(self._search_regex(\n (r'<div[^>]*>Views</div>\\s*<div[^>]*>\\s*([\\d,.]+)',\n r'<span[^>]*>VIEWS</span>\\s*</td>\\s*<td>\\s*([\\d,.]+)',\n r'<span[^>]+\\bclass=[\"\\']video_view_count[^>]*>\\s*([\\d,.]+)'),\n webpage, 'view count', default=None))\n\n # No self-labeling, but they describe themselves as\n # \"Home of Videos Porno\"\n age_limit = 18\n\n return merge_dicts(info, {\n 'id': video_id,\n 'ext': 'mp4',\n 'thumbnail': thumbnail,\n 'upload_date': upload_date,\n 'duration': duration,\n 'view_count': view_count,\n 'age_limit': age_limit,\n 'formats': formats,\n })\n", "path": "yt_dlp/extractor/redtube.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n int_or_none,\n merge_dicts,\n str_to_int,\n unified_strdate,\n url_or_none,\n)\n\n\nclass RedTubeIE(InfoExtractor):\n _VALID_URL = r'https?://(?:(?:\\w+\\.)?redtube\\.com/|embed\\.redtube\\.com/\\?.*?\\bid=)(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'https://www.redtube.com/38864951',\n 'md5': '4fba70cbca3aefd25767ab4b523c9878',\n 'info_dict': {\n 'id': '38864951',\n 'ext': 'mp4',\n 'title': 'Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu',\n 'description': 'Watch video Public Sex on the Balcony in Freezing Paris! Amateur Couple LeoLulu on Redtube, home of free Blowjob porn videos and Blonde sex movies online. Video length: (10:46) - Uploaded by leolulu - Verified User - Starring Pornstar: Leolulu',\n 'upload_date': '20210111',\n 'timestamp': 1610343109,\n 'duration': 646,\n 'view_count': int,\n 'age_limit': 18,\n 'thumbnail': r're:https://\\wi-ph\\.rdtcdn\\.com/videos/.+/.+\\.jpg',\n },\n }, {\n 'url': 'http://embed.redtube.com/?bgcolor=000000&id=1443286',\n 'only_matching': True,\n }, {\n 'url': 'http://it.redtube.com/66418',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _extract_urls(webpage):\n return re.findall(\n r'<iframe[^>]+?src=[\"\\'](?P<url>(?:https?:)?//embed\\.redtube\\.com/\\?.*?\\bid=\\d+)',\n webpage)\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(\n 'http://www.redtube.com/%s' % video_id, video_id)\n\n ERRORS = (\n (('video-deleted-info', '>This video has been removed'), 'has been removed'),\n (('private_video_text', '>This video is private', '>Send a friend request to its owner to be able to view it'), 'is private'),\n )\n\n for patterns, message in ERRORS:\n if any(p in webpage for p in patterns):\n raise ExtractorError(\n 'Video %s %s' % (video_id, message), expected=True)\n\n info = self._search_json_ld(webpage, video_id, default={})\n\n if not info.get('title'):\n info['title'] = self._html_search_regex(\n (r'<h(\\d)[^>]+class=\"(?:video_title_text|videoTitle|video_title)[^\"]*\">(?P<title>(?:(?!\\1).)+)</h\\1>',\n r'(?:videoTitle|title)\\s*:\\s*([\"\\'])(?P<title>(?:(?!\\1).)+)\\1',),\n webpage, 'title', group='title',\n default=None) or self._og_search_title(webpage)\n\n formats = []\n sources = self._parse_json(\n self._search_regex(\n r'sources\\s*:\\s*({.+?})', webpage, 'source', default='{}'),\n video_id, fatal=False)\n if sources and isinstance(sources, dict):\n for format_id, format_url in sources.items():\n if format_url:\n formats.append({\n 'url': format_url,\n 'format_id': format_id,\n 'height': int_or_none(format_id),\n })\n medias = self._parse_json(\n self._search_regex(\n r'mediaDefinition[\"\\']?\\s*:\\s*(\\[.+?}\\s*\\])', webpage,\n 'media definitions', default='{}'),\n video_id, fatal=False)\n for media in medias if isinstance(medias, list) else []:\n format_url = url_or_none(media.get('videoUrl'))\n if not format_url:\n continue\n format_id = media.get('format')\n quality = media.get('quality')\n if format_id == 'hls' or (format_id == 'mp4' and not quality):\n more_media = self._download_json(format_url, video_id, fatal=False)\n else:\n more_media = [media]\n for media in more_media if isinstance(more_media, list) else []:\n format_url = url_or_none(media.get('videoUrl'))\n if not format_url:\n continue\n format_id = media.get('format')\n if format_id == 'hls' or determine_ext(format_url) == 'm3u8':\n formats.extend(self._extract_m3u8_formats(\n format_url, video_id, 'mp4',\n entry_protocol='m3u8_native', m3u8_id=format_id or 'hls',\n fatal=False))\n continue\n format_id = media.get('quality')\n formats.append({\n 'url': format_url,\n 'ext': 'mp4',\n 'format_id': format_id,\n 'height': int_or_none(format_id),\n })\n if not formats:\n video_url = self._html_search_regex(\n r'<source src=\"(.+?)\" type=\"video/mp4\">', webpage, 'video URL')\n formats.append({'url': video_url, 'ext': 'mp4'})\n self._sort_formats(formats)\n\n thumbnail = self._og_search_thumbnail(webpage)\n upload_date = unified_strdate(self._search_regex(\n r'<span[^>]+>(?:ADDED|Published on) ([^<]+)<',\n webpage, 'upload date', default=None))\n duration = int_or_none(self._og_search_property(\n 'video:duration', webpage, default=None) or self._search_regex(\n r'videoDuration\\s*:\\s*(\\d+)', webpage, 'duration', default=None))\n view_count = str_to_int(self._search_regex(\n (r'<div[^>]*>Views</div>\\s*<div[^>]*>\\s*([\\d,.]+)',\n r'<span[^>]*>VIEWS</span>\\s*</td>\\s*<td>\\s*([\\d,.]+)',\n r'<span[^>]+\\bclass=[\"\\']video_view_count[^>]*>\\s*([\\d,.]+)'),\n webpage, 'view count', default=None))\n\n # No self-labeling, but they describe themselves as\n # \"Home of Videos Porno\"\n age_limit = 18\n\n return merge_dicts(info, {\n 'id': video_id,\n 'ext': 'mp4',\n 'thumbnail': thumbnail,\n 'upload_date': upload_date,\n 'duration': duration,\n 'view_count': view_count,\n 'age_limit': age_limit,\n 'formats': formats,\n })\n", "path": "yt_dlp/extractor/redtube.py"}]}
| 2,477 | 910 |
gh_patches_debug_19514
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-5684
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Growl not registering
2018-11-10 08:21:42 INFO CHECKVERSION :: [0c0a735] Checking for updates using GIT
**What you did: Input ip:port to register gowl
**What happened: Nothing!
**What you expected: Successful registration.
**Logs:**
2018-11-10 08:22:04 WARNING Thread_1 :: [0c0a735] GROWL: Unable to send growl to 192.168.1.4:23053 - u"error b'encode() takes exactly 1 argument (2 given)'"
2018-11-10 08:22:04 WARNING Thread_1 :: [0c0a735] GROWL: Unable to send growl to 192.168.1.4:23053 - u"error b'encode() takes exactly 1 argument (2 given)'"
Same IP:port work perfectly in rage/chill.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/notifiers/growl.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import print_function
4 from __future__ import unicode_literals
5
6 import logging
7 import socket
8 from builtins import object
9
10 import gntp.core
11
12 from medusa import app, common
13 from medusa.helper.exceptions import ex
14 from medusa.logger.adapters.style import BraceAdapter
15
16 log = BraceAdapter(logging.getLogger(__name__))
17 log.logger.addHandler(logging.NullHandler())
18
19
20 class Notifier(object):
21 def test_notify(self, host, password):
22 self._sendRegistration(host, password)
23 return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,
24 force=True)
25
26 def notify_snatch(self, title, message):
27 if app.GROWL_NOTIFY_ONSNATCH:
28 self._sendGrowl(title, message)
29
30 def notify_download(self, ep_obj):
31 if app.GROWL_NOTIFY_ONDOWNLOAD:
32 self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_obj.pretty_name_with_quality())
33
34 def notify_subtitle_download(self, ep_obj, lang):
35 if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:
36 self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_obj.pretty_name() + ': ' + lang)
37
38 def notify_git_update(self, new_version='??'):
39 update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
40 title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
41 self._sendGrowl(title, update_text + new_version)
42
43 def notify_login(self, ipaddress=''):
44 update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
45 title = common.notifyStrings[common.NOTIFY_LOGIN]
46 self._sendGrowl(title, update_text.format(ipaddress))
47
48 def _send_growl(self, options, message=None):
49
50 # Initialize Notification
51 notice = gntp.core.GNTPNotice(
52 app=options['app'],
53 name=options['name'],
54 title=options['title'],
55 password=options['password'],
56 )
57
58 # Optional
59 if options['sticky']:
60 notice.add_header('Notification-Sticky', options['sticky'])
61 if options['priority']:
62 notice.add_header('Notification-Priority', options['priority'])
63 if options['icon']:
64 notice.add_header('Notification-Icon', app.LOGO_URL)
65
66 if message:
67 notice.add_header('Notification-Text', message)
68
69 response = self._send(options['host'], options['port'], notice.encode('utf-8'), options['debug'])
70 return True if isinstance(response, gntp.core.GNTPOK) else False
71
72 @staticmethod
73 def _send(host, port, data, debug=False):
74 if debug:
75 print('<Sending>\n', data, '\n</Sending>')
76
77 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
78 s.connect((host, port))
79 s.send(data)
80 response = gntp.core.parse_gntp(s.recv(1024))
81 s.close()
82
83 if debug:
84 print('<Received>\n', response, '\n</Received>')
85
86 return response
87
88 def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,
89 force=False):
90 if not app.USE_GROWL and not force:
91 return False
92
93 if name is None:
94 name = title
95
96 if host is None:
97 hostParts = app.GROWL_HOST.split(':')
98 else:
99 hostParts = host.split(':')
100
101 if len(hostParts) != 2 or hostParts[1] == '':
102 port = 23053
103 else:
104 port = int(hostParts[1])
105
106 growlHosts = [(hostParts[0], port)]
107
108 opts = {
109 'name': name,
110 'title': title,
111 'app': 'Medusa',
112 'sticky': None,
113 'priority': None,
114 'debug': False
115 }
116
117 if password is None:
118 opts['password'] = app.GROWL_PASSWORD
119 else:
120 opts['password'] = password
121
122 opts['icon'] = True
123
124 for pc in growlHosts:
125 opts['host'] = pc[0]
126 opts['port'] = pc[1]
127 log.debug(
128 u'GROWL: Sending growl to {host}:{port} - {msg!r}',
129 {'msg': message, 'host': opts['host'], 'port': opts['port']}
130 )
131 try:
132 if self._send_growl(opts, message):
133 return True
134 else:
135 if self._sendRegistration(host, password):
136 return self._send_growl(opts, message)
137 else:
138 return False
139 except Exception as error:
140 log.warning(
141 u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',
142 {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}
143 )
144 return False
145
146 def _sendRegistration(self, host=None, password=None):
147 opts = {}
148
149 if host is None:
150 hostParts = app.GROWL_HOST.split(':')
151 else:
152 hostParts = host.split(':')
153
154 if len(hostParts) != 2 or hostParts[1] == '':
155 port = 23053
156 else:
157 port = int(hostParts[1])
158
159 opts['host'] = hostParts[0]
160 opts['port'] = port
161
162 if password is None:
163 opts['password'] = app.GROWL_PASSWORD
164 else:
165 opts['password'] = password
166
167 opts['app'] = 'Medusa'
168 opts['debug'] = False
169
170 # Send Registration
171 register = gntp.core.GNTPRegister()
172 register.add_header('Application-Name', opts['app'])
173 register.add_header('Application-Icon', app.LOGO_URL)
174
175 register.add_notification('Test', True)
176 register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)
177 register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)
178 register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)
179
180 if opts['password']:
181 register.set_password(opts['password'])
182
183 try:
184 return self._send(opts['host'], opts['port'], register.encode('utf-8'), opts['debug'])
185 except Exception as error:
186 log.warning(
187 u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',
188 {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}
189 )
190 return False
191
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py
--- a/medusa/notifiers/growl.py
+++ b/medusa/notifiers/growl.py
@@ -66,7 +66,7 @@
if message:
notice.add_header('Notification-Text', message)
- response = self._send(options['host'], options['port'], notice.encode('utf-8'), options['debug'])
+ response = self._send(options['host'], options['port'], notice.encode(), options['debug'])
return True if isinstance(response, gntp.core.GNTPOK) else False
@staticmethod
@@ -181,7 +181,7 @@
register.set_password(opts['password'])
try:
- return self._send(opts['host'], opts['port'], register.encode('utf-8'), opts['debug'])
+ return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])
except Exception as error:
log.warning(
u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',
|
{"golden_diff": "diff --git a/medusa/notifiers/growl.py b/medusa/notifiers/growl.py\n--- a/medusa/notifiers/growl.py\n+++ b/medusa/notifiers/growl.py\n@@ -66,7 +66,7 @@\n if message:\n notice.add_header('Notification-Text', message)\n \n- response = self._send(options['host'], options['port'], notice.encode('utf-8'), options['debug'])\n+ response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.core.GNTPOK) else False\n \n @staticmethod\n@@ -181,7 +181,7 @@\n register.set_password(opts['password'])\n \n try:\n- return self._send(opts['host'], opts['port'], register.encode('utf-8'), opts['debug'])\n+ return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n", "issue": "Growl not registering\n2018-11-10 08:21:42 INFO CHECKVERSION :: [0c0a735] Checking for updates using GIT\r\n\r\n**What you did: Input ip:port to register gowl\r\n**What happened: Nothing!\r\n**What you expected: Successful registration.\r\n\r\n**Logs:**\r\n2018-11-10 08:22:04 WARNING Thread_1 :: [0c0a735] GROWL: Unable to send growl to 192.168.1.4:23053 - u\"error b'encode() takes exactly 1 argument (2 given)'\"\r\n2018-11-10 08:22:04 WARNING Thread_1 :: [0c0a735] GROWL: Unable to send growl to 192.168.1.4:23053 - u\"error b'encode() takes exactly 1 argument (2 given)'\"\r\n\r\nSame IP:port work perfectly in rage/chill.\r\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp.core\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, title, message):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(title, message)\n\n def notify_download(self, ep_obj):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_obj.pretty_name_with_quality())\n\n def notify_subtitle_download(self, ep_obj, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_obj.pretty_name() + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Initialize Notification\n notice = gntp.core.GNTPNotice(\n app=options['app'],\n name=options['name'],\n title=options['title'],\n password=options['password'],\n )\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode('utf-8'), options['debug'])\n return True if isinstance(response, gntp.core.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode('utf-8'), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport socket\nfrom builtins import object\n\nimport gntp.core\n\nfrom medusa import app, common\nfrom medusa.helper.exceptions import ex\nfrom medusa.logger.adapters.style import BraceAdapter\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass Notifier(object):\n def test_notify(self, host, password):\n self._sendRegistration(host, password)\n return self._sendGrowl('Test Growl', 'Testing Growl settings from Medusa', 'Test', host, password,\n force=True)\n\n def notify_snatch(self, title, message):\n if app.GROWL_NOTIFY_ONSNATCH:\n self._sendGrowl(title, message)\n\n def notify_download(self, ep_obj):\n if app.GROWL_NOTIFY_ONDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_DOWNLOAD], ep_obj.pretty_name_with_quality())\n\n def notify_subtitle_download(self, ep_obj, lang):\n if app.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD:\n self._sendGrowl(common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD], ep_obj.pretty_name() + ': ' + lang)\n\n def notify_git_update(self, new_version='??'):\n update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]\n title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]\n self._sendGrowl(title, update_text + new_version)\n\n def notify_login(self, ipaddress=''):\n update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]\n title = common.notifyStrings[common.NOTIFY_LOGIN]\n self._sendGrowl(title, update_text.format(ipaddress))\n\n def _send_growl(self, options, message=None):\n\n # Initialize Notification\n notice = gntp.core.GNTPNotice(\n app=options['app'],\n name=options['name'],\n title=options['title'],\n password=options['password'],\n )\n\n # Optional\n if options['sticky']:\n notice.add_header('Notification-Sticky', options['sticky'])\n if options['priority']:\n notice.add_header('Notification-Priority', options['priority'])\n if options['icon']:\n notice.add_header('Notification-Icon', app.LOGO_URL)\n\n if message:\n notice.add_header('Notification-Text', message)\n\n response = self._send(options['host'], options['port'], notice.encode(), options['debug'])\n return True if isinstance(response, gntp.core.GNTPOK) else False\n\n @staticmethod\n def _send(host, port, data, debug=False):\n if debug:\n print('<Sending>\\n', data, '\\n</Sending>')\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n s.send(data)\n response = gntp.core.parse_gntp(s.recv(1024))\n s.close()\n\n if debug:\n print('<Received>\\n', response, '\\n</Received>')\n\n return response\n\n def _sendGrowl(self, title='Medusa Notification', message=None, name=None, host=None, password=None,\n force=False):\n if not app.USE_GROWL and not force:\n return False\n\n if name is None:\n name = title\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n growlHosts = [(hostParts[0], port)]\n\n opts = {\n 'name': name,\n 'title': title,\n 'app': 'Medusa',\n 'sticky': None,\n 'priority': None,\n 'debug': False\n }\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['icon'] = True\n\n for pc in growlHosts:\n opts['host'] = pc[0]\n opts['port'] = pc[1]\n log.debug(\n u'GROWL: Sending growl to {host}:{port} - {msg!r}',\n {'msg': message, 'host': opts['host'], 'port': opts['port']}\n )\n try:\n if self._send_growl(opts, message):\n return True\n else:\n if self._sendRegistration(host, password):\n return self._send_growl(opts, message)\n else:\n return False\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n\n def _sendRegistration(self, host=None, password=None):\n opts = {}\n\n if host is None:\n hostParts = app.GROWL_HOST.split(':')\n else:\n hostParts = host.split(':')\n\n if len(hostParts) != 2 or hostParts[1] == '':\n port = 23053\n else:\n port = int(hostParts[1])\n\n opts['host'] = hostParts[0]\n opts['port'] = port\n\n if password is None:\n opts['password'] = app.GROWL_PASSWORD\n else:\n opts['password'] = password\n\n opts['app'] = 'Medusa'\n opts['debug'] = False\n\n # Send Registration\n register = gntp.core.GNTPRegister()\n register.add_header('Application-Name', opts['app'])\n register.add_header('Application-Icon', app.LOGO_URL)\n\n register.add_notification('Test', True)\n register.add_notification(common.notifyStrings[common.NOTIFY_SNATCH], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_DOWNLOAD], True)\n register.add_notification(common.notifyStrings[common.NOTIFY_GIT_UPDATE], True)\n\n if opts['password']:\n register.set_password(opts['password'])\n\n try:\n return self._send(opts['host'], opts['port'], register.encode(), opts['debug'])\n except Exception as error:\n log.warning(\n u'GROWL: Unable to send growl to {host}:{port} - {msg!r}',\n {'msg': ex(error), 'host': opts['host'], 'port': opts['port']}\n )\n return False\n", "path": "medusa/notifiers/growl.py"}]}
| 2,428 | 252 |
gh_patches_debug_29624
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-826
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nginx-opentracing + libdd_opentracing_plugin: SpanContextCorruptedException: failed to extract span context
I'm trying to set up an integration of nginx + nginx-opentracing module + DataDog tracer plugin + sample python app in order to get working multi-span traces in a manner when an app uses propagated context.
I'm getting the following error on every call:
```
ERROR:root:trace extract failed: failed to extract span context
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/opentracing_instrumentation/http_server.py", line 75, in before_request
format=Format.HTTP_HEADERS, carrier=carrier
File "/usr/local/lib/python2.7/dist-packages/ddtrace-0.20.4-py2.7.egg/ddtrace/opentracer/tracer.py", line 294, in extract
ot_span_ctx = propagator.extract(carrier)
File "/usr/local/lib/python2.7/dist-packages/ddtrace-0.20.4-py2.7.egg/ddtrace/opentracer/propagation/http.py", line 73, in extract
raise SpanContextCorruptedException('failed to extract span context')
SpanContextCorruptedException: failed to extract span context
```
Components used:
- nginx/1.15.7
- nginx-opentracing:
https://github.com/opentracing-contrib/nginx-opentracing/releases/tag/v0.8.0
- DataDog tracer plugin: https://github.com/DataDog/dd-opentracing-cpp/releases/download/v0.4.2/linux-amd64-libdd_opentracing_plugin.so.gz
nginx configuration:
```
# configuration file /etc/nginx/nginx.conf:
load_module modules/ngx_http_opentracing_module.so;
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log debug;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for" $request_id';
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
opentracing_load_tracer /etc/nginx/linux-amd64-libdd_opentracing_plugin.so /etc/nginx/dd-config.json;
opentracing on;
opentracing_trace_locations off;
opentracing_tag http_user_agent $http_user_agent;
opentracing_tag http_uri $request_uri;
opentracing_tag http_request_id $request_id;
include /etc/nginx/conf.d/*.conf;
}
# configuration file /etc/nginx/conf.d/default.conf:
upstream u {
server 62.210.92.35:80;
keepalive 20;
zone u 128k;
}
upstream upload-app {
server 127.0.0.1:8080;
}
server {
listen 80 default_server;
server_name localhost;
opentracing_operation_name $uri;
location / {
opentracing_propagate_context;
proxy_set_header Host nginx.org;
proxy_set_header Connection "";
proxy_http_version 1.1;
proxy_pass http://u;
}
location /upload/ {
opentracing_propagate_context;
proxy_pass http://upload-app;
client_max_body_size 256m;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
```
DataDog tracer configuration (/etc/nginx/dd-config.json):
```
{
"service": "nginx",
"operation_name_override": "nginx.handle",
"agent_host": "localhost",
"agent_port": 8126
}
```
DataDog agent version, OS used:
```
# dpkg -s datadog-agent
Package: datadog-agent
Status: install ok installed
Priority: extra
Section: utils
Installed-Size: 390206
Maintainer: Datadog Packages <[email protected]>
Architecture: amd64
Version: 1:6.9.0-1
Description: Datadog Monitoring Agent
The Datadog Monitoring Agent is a lightweight process that monitors system
processes and services, and sends information back to your Datadog account.
.
This package installs and runs the advanced Agent daemon, which queues and
forwards metrics from your applications as well as system services.
.
See http://www.datadoghq.com/ for more information
License: Apache License Version 2.0
Vendor: Datadog <[email protected]>
Homepage: http://www.datadoghq.com
# lsb_release -a
No LSB modules are available.
Distributor ID: Ubuntu
Description: Ubuntu 18.04.1 LTS
Release: 18.04
Codename: bionic
```
App itself:
```
#!/usr/bin/env python
import logging
import hashlib
from flask import Flask
from flask import request
from werkzeug.debug import get_current_traceback
from opentracing_instrumentation import http_server
from opentracing_instrumentation import config
import opentracing
from ddtrace.opentracer import Tracer
from random import randint
application = Flask(__name__)
tracer = None
def init_dd_tracer(service_name='upload-app'):
print "INIT DATADOG TRACER"
return Tracer(service_name=service_name, config={})
@application.before_request
def before_request():
global tracer
request.stderr = request.environ['wsgi.errors'] if 'wsgi.errors' in request.environ else stderr
headers_summary = "HEADERS:\n\n" + "\n".join(["{0}: {1}".format(k, request.headers[k]) for k in sorted(request.headers.keys())]) + "\n"
request.stderr.write(headers_summary)
request.full_url = request.url
request.remote_ip = request.remote_addr
request.remote_port = request.environ['REMOTE_PORT']
request.caller_name = "n/a"
request.operation = request.method
if not tracer:
tracer = init_dd_tracer()
request.span = http_server.before_request(request=request, tracer=tracer)
@application.route("/", methods=['GET', 'POST'])
def default():
try:
environ_summary = "ENVIRON:\n\n" + "\n".join(["{0}: {1}".format(k, request.environ[k]) for k in sorted(request.environ.keys())]) + "\n"
args = "REQUEST ARGS: %s" % request.args
body = "REQUETS BODY: %s" % request.data
return "%s\n%s\n%s\n" % (args, body, environ_summary)
except Exception, e:
track = get_current_traceback(skip=1, show_hidden_frames=True, ignore_system_exceptions=False)
track.log()
abort(500)
@application.route("/upload/", methods=['GET', 'POST'])
@application.route("/upload-http/", methods=['GET', 'POST'])
def upload():
global tracer
with tracer.start_span('ProcessUpload', child_of=request.span) as span:
span.log_kv({'ProcessUpload': 'started'})
span.set_tag('payload-size', int(request.headers.get('Content-Length')) if 'Content-Length' in request.headers else 0)
if 'Content-Length' in request.headers and int(request.headers.get('Content-Length')):
body = request.stream.read()
for x in range(1, randint(2, 10)):
with tracer.start_span('SubPart%02d' % x, child_of=span) as subpart_span:
subpart_span.log_kv({'subpart_iteration': x, 'action': 'begin'})
m = hashlib.md5()
m.update(body)
response_body = "%d:%s\n" % (len(body), m.hexdigest())
subpart_span.log_kv({'subpart_iteration': x, 'action': 'end'})
request.stderr.write('ProcessUpload finished with %d iterations\n' % x)
else:
response_body = 'no data was uploaded'
try:
span.set_tag('iterations', x)
except NameError:
pass
span.log_kv({'ProcessUpload': 'finished'})
return response_body
@application.errorhandler(500)
def internal_error(error):
return "500 error"
if __name__ == "__main__":
application.debug = True
application.config['PROPAGATE_EXCEPTIONS'] = True
application.run(host='127.0.0.1', port=8080)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/propagation/http.py`
Content:
```
1 import logging
2
3 from ..context import Context
4
5 from .utils import get_wsgi_header
6
7 log = logging.getLogger(__name__)
8
9 # HTTP headers one should set for distributed tracing.
10 # These are cross-language (eg: Python, Go and other implementations should honor these)
11 HTTP_HEADER_TRACE_ID = "x-datadog-trace-id"
12 HTTP_HEADER_PARENT_ID = "x-datadog-parent-id"
13 HTTP_HEADER_SAMPLING_PRIORITY = "x-datadog-sampling-priority"
14
15
16 # Note that due to WSGI spec we have to also check for uppercased and prefixed
17 # versions of these headers
18 POSSIBLE_HTTP_HEADER_TRACE_IDS = frozenset(
19 [HTTP_HEADER_TRACE_ID, get_wsgi_header(HTTP_HEADER_TRACE_ID)]
20 )
21 POSSIBLE_HTTP_HEADER_PARENT_IDS = frozenset(
22 [HTTP_HEADER_PARENT_ID, get_wsgi_header(HTTP_HEADER_PARENT_ID)]
23 )
24 POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES = frozenset(
25 [HTTP_HEADER_SAMPLING_PRIORITY, get_wsgi_header(HTTP_HEADER_SAMPLING_PRIORITY)]
26 )
27
28
29 class HTTPPropagator(object):
30 """A HTTP Propagator using HTTP headers as carrier."""
31
32 def inject(self, span_context, headers):
33 """Inject Context attributes that have to be propagated as HTTP headers.
34
35 Here is an example using `requests`::
36
37 import requests
38 from ddtrace.propagation.http import HTTPPropagator
39
40 def parent_call():
41 with tracer.trace("parent_span") as span:
42 headers = {}
43 propagator = HTTPPropagator()
44 propagator.inject(span.context, headers)
45 url = "<some RPC endpoint>"
46 r = requests.get(url, headers=headers)
47
48 :param Context span_context: Span context to propagate.
49 :param dict headers: HTTP headers to extend with tracing attributes.
50 """
51 headers[HTTP_HEADER_TRACE_ID] = str(span_context.trace_id)
52 headers[HTTP_HEADER_PARENT_ID] = str(span_context.span_id)
53 sampling_priority = span_context.sampling_priority
54 # Propagate priority only if defined
55 if sampling_priority is not None:
56 headers[HTTP_HEADER_SAMPLING_PRIORITY] = str(span_context.sampling_priority)
57
58 @staticmethod
59 def extract_trace_id(headers):
60 trace_id = 0
61
62 for key in POSSIBLE_HTTP_HEADER_TRACE_IDS:
63 if key in headers:
64 trace_id = headers.get(key)
65
66 return int(trace_id)
67
68 @staticmethod
69 def extract_parent_span_id(headers):
70 parent_span_id = 0
71
72 for key in POSSIBLE_HTTP_HEADER_PARENT_IDS:
73 if key in headers:
74 parent_span_id = headers.get(key)
75
76 return int(parent_span_id)
77
78 @staticmethod
79 def extract_sampling_priority(headers):
80 sampling_priority = None
81
82 for key in POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES:
83 if key in headers:
84 sampling_priority = headers.get(key)
85
86 return sampling_priority
87
88 def extract(self, headers):
89 """Extract a Context from HTTP headers into a new Context.
90
91 Here is an example from a web endpoint::
92
93 from ddtrace.propagation.http import HTTPPropagator
94
95 def my_controller(url, headers):
96 propagator = HTTPPropagator()
97 context = propagator.extract(headers)
98 tracer.context_provider.activate(context)
99
100 with tracer.trace("my_controller") as span:
101 span.set_meta('http.url', url)
102
103 :param dict headers: HTTP headers to extract tracing attributes.
104 :return: New `Context` with propagated attributes.
105 """
106 if not headers:
107 return Context()
108
109 try:
110 trace_id = HTTPPropagator.extract_trace_id(headers)
111 parent_span_id = HTTPPropagator.extract_parent_span_id(headers)
112 sampling_priority = HTTPPropagator.extract_sampling_priority(headers)
113
114 if sampling_priority is not None:
115 sampling_priority = int(sampling_priority)
116
117 return Context(
118 trace_id=trace_id,
119 span_id=parent_span_id,
120 sampling_priority=sampling_priority,
121 )
122 # If headers are invalid and cannot be parsed, return a new context and log the issue.
123 except Exception as error:
124 try:
125 log.debug(
126 "invalid x-datadog-* headers, trace-id: %s, parent-id: %s, priority: %s, error: %s",
127 headers.get(HTTP_HEADER_TRACE_ID, 0),
128 headers.get(HTTP_HEADER_PARENT_ID, 0),
129 headers.get(HTTP_HEADER_SAMPLING_PRIORITY),
130 error,
131 )
132 # We might fail on string formatting errors ; in that case only format the first error
133 except Exception:
134 log.debug(error)
135 return Context()
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/propagation/http.py b/ddtrace/propagation/http.py
--- a/ddtrace/propagation/http.py
+++ b/ddtrace/propagation/http.py
@@ -56,34 +56,35 @@
headers[HTTP_HEADER_SAMPLING_PRIORITY] = str(span_context.sampling_priority)
@staticmethod
- def extract_trace_id(headers):
- trace_id = 0
+ def extract_header_value(possible_header_names, headers, default=None):
+ for header, value in headers.items():
+ for header_name in possible_header_names:
+ if header.lower() == header_name.lower():
+ return value
- for key in POSSIBLE_HTTP_HEADER_TRACE_IDS:
- if key in headers:
- trace_id = headers.get(key)
+ return default
- return int(trace_id)
+ @staticmethod
+ def extract_trace_id(headers):
+ return int(
+ HTTPPropagator.extract_header_value(
+ POSSIBLE_HTTP_HEADER_TRACE_IDS, headers, default=0,
+ )
+ )
@staticmethod
def extract_parent_span_id(headers):
- parent_span_id = 0
-
- for key in POSSIBLE_HTTP_HEADER_PARENT_IDS:
- if key in headers:
- parent_span_id = headers.get(key)
-
- return int(parent_span_id)
+ return int(
+ HTTPPropagator.extract_header_value(
+ POSSIBLE_HTTP_HEADER_PARENT_IDS, headers, default=0,
+ )
+ )
@staticmethod
def extract_sampling_priority(headers):
- sampling_priority = None
-
- for key in POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES:
- if key in headers:
- sampling_priority = headers.get(key)
-
- return sampling_priority
+ return HTTPPropagator.extract_header_value(
+ POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES, headers,
+ )
def extract(self, headers):
"""Extract a Context from HTTP headers into a new Context.
|
{"golden_diff": "diff --git a/ddtrace/propagation/http.py b/ddtrace/propagation/http.py\n--- a/ddtrace/propagation/http.py\n+++ b/ddtrace/propagation/http.py\n@@ -56,34 +56,35 @@\n headers[HTTP_HEADER_SAMPLING_PRIORITY] = str(span_context.sampling_priority)\n \n @staticmethod\n- def extract_trace_id(headers):\n- trace_id = 0\n+ def extract_header_value(possible_header_names, headers, default=None):\n+ for header, value in headers.items():\n+ for header_name in possible_header_names:\n+ if header.lower() == header_name.lower():\n+ return value\n \n- for key in POSSIBLE_HTTP_HEADER_TRACE_IDS:\n- if key in headers:\n- trace_id = headers.get(key)\n+ return default\n \n- return int(trace_id)\n+ @staticmethod\n+ def extract_trace_id(headers):\n+ return int(\n+ HTTPPropagator.extract_header_value(\n+ POSSIBLE_HTTP_HEADER_TRACE_IDS, headers, default=0,\n+ )\n+ )\n \n @staticmethod\n def extract_parent_span_id(headers):\n- parent_span_id = 0\n-\n- for key in POSSIBLE_HTTP_HEADER_PARENT_IDS:\n- if key in headers:\n- parent_span_id = headers.get(key)\n-\n- return int(parent_span_id)\n+ return int(\n+ HTTPPropagator.extract_header_value(\n+ POSSIBLE_HTTP_HEADER_PARENT_IDS, headers, default=0,\n+ )\n+ )\n \n @staticmethod\n def extract_sampling_priority(headers):\n- sampling_priority = None\n-\n- for key in POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES:\n- if key in headers:\n- sampling_priority = headers.get(key)\n-\n- return sampling_priority\n+ return HTTPPropagator.extract_header_value(\n+ POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES, headers,\n+ )\n \n def extract(self, headers):\n \"\"\"Extract a Context from HTTP headers into a new Context.\n", "issue": "nginx-opentracing + libdd_opentracing_plugin: SpanContextCorruptedException: failed to extract span context\nI'm trying to set up an integration of nginx + nginx-opentracing module + DataDog tracer plugin + sample python app in order to get working multi-span traces in a manner when an app uses propagated context.\r\n\r\nI'm getting the following error on every call:\r\n\r\n```\r\nERROR:root:trace extract failed: failed to extract span context\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/opentracing_instrumentation/http_server.py\", line 75, in before_request\r\n format=Format.HTTP_HEADERS, carrier=carrier\r\n File \"/usr/local/lib/python2.7/dist-packages/ddtrace-0.20.4-py2.7.egg/ddtrace/opentracer/tracer.py\", line 294, in extract\r\n ot_span_ctx = propagator.extract(carrier)\r\n File \"/usr/local/lib/python2.7/dist-packages/ddtrace-0.20.4-py2.7.egg/ddtrace/opentracer/propagation/http.py\", line 73, in extract\r\n raise SpanContextCorruptedException('failed to extract span context')\r\nSpanContextCorruptedException: failed to extract span context\r\n```\r\n\r\nComponents used:\r\n - nginx/1.15.7\r\n - nginx-opentracing: \r\nhttps://github.com/opentracing-contrib/nginx-opentracing/releases/tag/v0.8.0\r\n - DataDog tracer plugin: https://github.com/DataDog/dd-opentracing-cpp/releases/download/v0.4.2/linux-amd64-libdd_opentracing_plugin.so.gz\r\n\r\nnginx configuration:\r\n\r\n```\r\n# configuration file /etc/nginx/nginx.conf:\r\nload_module modules/ngx_http_opentracing_module.so;\r\n\r\nuser nginx;\r\nworker_processes auto;\r\n\r\nerror_log /var/log/nginx/error.log debug;\r\npid /var/run/nginx.pid;\r\n\r\nevents {\r\n worker_connections 1024;\r\n}\r\n\r\nhttp {\r\n log_format main '$remote_addr - $remote_user [$time_local] \"$request\" '\r\n '$status $body_bytes_sent \"$http_referer\" '\r\n '\"$http_user_agent\" \"$http_x_forwarded_for\" $request_id';\r\n\r\n access_log /var/log/nginx/access.log main;\r\n\r\n sendfile on;\r\n\r\n keepalive_timeout 65;\r\n\r\n opentracing_load_tracer /etc/nginx/linux-amd64-libdd_opentracing_plugin.so /etc/nginx/dd-config.json;\r\n opentracing on;\r\n opentracing_trace_locations off;\r\n opentracing_tag http_user_agent $http_user_agent;\r\n opentracing_tag http_uri $request_uri;\r\n opentracing_tag http_request_id $request_id;\r\n\r\n include /etc/nginx/conf.d/*.conf;\r\n}\r\n\r\n# configuration file /etc/nginx/conf.d/default.conf:\r\nupstream u {\r\n server 62.210.92.35:80;\r\n keepalive 20;\r\n zone u 128k;\r\n}\r\n\r\nupstream upload-app {\r\n server 127.0.0.1:8080;\r\n}\r\n\r\nserver {\r\n listen 80 default_server;\r\n server_name localhost;\r\n\r\n opentracing_operation_name $uri;\r\n\r\n location / {\r\n opentracing_propagate_context;\r\n proxy_set_header Host nginx.org;\r\n proxy_set_header Connection \"\";\r\n proxy_http_version 1.1;\r\n proxy_pass http://u;\r\n }\r\n\r\n location /upload/ {\r\n opentracing_propagate_context;\r\n proxy_pass http://upload-app;\r\n client_max_body_size 256m;\r\n }\r\n\r\n error_page 500 502 503 504 /50x.html;\r\n location = /50x.html {\r\n root /usr/share/nginx/html;\r\n }\r\n}\r\n```\r\n\r\nDataDog tracer configuration (/etc/nginx/dd-config.json):\r\n\r\n```\r\n{\r\n \"service\": \"nginx\",\r\n \"operation_name_override\": \"nginx.handle\",\r\n \"agent_host\": \"localhost\",\r\n \"agent_port\": 8126\r\n}\r\n```\r\n\r\nDataDog agent version, OS used:\r\n\r\n```\r\n# dpkg -s datadog-agent\r\nPackage: datadog-agent\r\nStatus: install ok installed\r\nPriority: extra\r\nSection: utils\r\nInstalled-Size: 390206\r\nMaintainer: Datadog Packages <[email protected]>\r\nArchitecture: amd64\r\nVersion: 1:6.9.0-1\r\nDescription: Datadog Monitoring Agent\r\n The Datadog Monitoring Agent is a lightweight process that monitors system\r\n processes and services, and sends information back to your Datadog account.\r\n .\r\n This package installs and runs the advanced Agent daemon, which queues and\r\n forwards metrics from your applications as well as system services.\r\n .\r\n See http://www.datadoghq.com/ for more information\r\nLicense: Apache License Version 2.0\r\nVendor: Datadog <[email protected]>\r\nHomepage: http://www.datadoghq.com\r\n\r\n# lsb_release -a\r\nNo LSB modules are available.\r\nDistributor ID:\tUbuntu\r\nDescription:\tUbuntu 18.04.1 LTS\r\nRelease:\t18.04\r\nCodename:\tbionic\r\n```\r\n\r\nApp itself:\r\n\r\n```\r\n#!/usr/bin/env python\r\n\r\nimport logging\r\nimport hashlib\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom werkzeug.debug import get_current_traceback\r\nfrom opentracing_instrumentation import http_server\r\nfrom opentracing_instrumentation import config\r\nimport opentracing\r\nfrom ddtrace.opentracer import Tracer\r\nfrom random import randint\r\n\r\napplication = Flask(__name__)\r\ntracer = None \r\n\r\ndef init_dd_tracer(service_name='upload-app'):\r\n print \"INIT DATADOG TRACER\"\r\n return Tracer(service_name=service_name, config={})\r\n\r\[email protected]_request\r\ndef before_request():\r\n global tracer\r\n request.stderr = request.environ['wsgi.errors'] if 'wsgi.errors' in request.environ else stderr\r\n headers_summary = \"HEADERS:\\n\\n\" + \"\\n\".join([\"{0}: {1}\".format(k, request.headers[k]) for k in sorted(request.headers.keys())]) + \"\\n\"\r\n request.stderr.write(headers_summary)\r\n\r\n request.full_url = request.url\r\n request.remote_ip = request.remote_addr\r\n request.remote_port = request.environ['REMOTE_PORT']\r\n request.caller_name = \"n/a\"\r\n request.operation = request.method\r\n\r\n if not tracer:\r\n tracer = init_dd_tracer()\r\n\r\n request.span = http_server.before_request(request=request, tracer=tracer)\r\n\r\[email protected](\"/\", methods=['GET', 'POST'])\r\ndef default():\r\n try:\r\n environ_summary = \"ENVIRON:\\n\\n\" + \"\\n\".join([\"{0}: {1}\".format(k, request.environ[k]) for k in sorted(request.environ.keys())]) + \"\\n\"\r\n args = \"REQUEST ARGS: %s\" % request.args\r\n body = \"REQUETS BODY: %s\" % request.data\r\n return \"%s\\n%s\\n%s\\n\" % (args, body, environ_summary)\r\n except Exception, e:\r\n track = get_current_traceback(skip=1, show_hidden_frames=True, ignore_system_exceptions=False)\r\n track.log()\r\n abort(500)\r\n\r\[email protected](\"/upload/\", methods=['GET', 'POST'])\r\[email protected](\"/upload-http/\", methods=['GET', 'POST'])\r\ndef upload():\r\n global tracer\r\n with tracer.start_span('ProcessUpload', child_of=request.span) as span:\r\n span.log_kv({'ProcessUpload': 'started'})\r\n span.set_tag('payload-size', int(request.headers.get('Content-Length')) if 'Content-Length' in request.headers else 0)\r\n\r\n if 'Content-Length' in request.headers and int(request.headers.get('Content-Length')):\r\n body = request.stream.read()\r\n for x in range(1, randint(2, 10)):\r\n with tracer.start_span('SubPart%02d' % x, child_of=span) as subpart_span:\r\n subpart_span.log_kv({'subpart_iteration': x, 'action': 'begin'})\r\n m = hashlib.md5()\r\n m.update(body)\r\n response_body = \"%d:%s\\n\" % (len(body), m.hexdigest())\r\n subpart_span.log_kv({'subpart_iteration': x, 'action': 'end'})\r\n request.stderr.write('ProcessUpload finished with %d iterations\\n' % x)\r\n\r\n else:\r\n response_body = 'no data was uploaded'\r\n\r\n try:\r\n span.set_tag('iterations', x)\r\n except NameError:\r\n pass\r\n\r\n span.log_kv({'ProcessUpload': 'finished'})\r\n\r\n return response_body\r\n\r\[email protected](500)\r\ndef internal_error(error):\r\n return \"500 error\"\r\n\r\nif __name__ == \"__main__\":\r\n application.debug = True\r\n application.config['PROPAGATE_EXCEPTIONS'] = True\r\n application.run(host='127.0.0.1', port=8080)\r\n```\r\n\n", "before_files": [{"content": "import logging\n\nfrom ..context import Context\n\nfrom .utils import get_wsgi_header\n\nlog = logging.getLogger(__name__)\n\n# HTTP headers one should set for distributed tracing.\n# These are cross-language (eg: Python, Go and other implementations should honor these)\nHTTP_HEADER_TRACE_ID = \"x-datadog-trace-id\"\nHTTP_HEADER_PARENT_ID = \"x-datadog-parent-id\"\nHTTP_HEADER_SAMPLING_PRIORITY = \"x-datadog-sampling-priority\"\n\n\n# Note that due to WSGI spec we have to also check for uppercased and prefixed\n# versions of these headers\nPOSSIBLE_HTTP_HEADER_TRACE_IDS = frozenset(\n [HTTP_HEADER_TRACE_ID, get_wsgi_header(HTTP_HEADER_TRACE_ID)]\n)\nPOSSIBLE_HTTP_HEADER_PARENT_IDS = frozenset(\n [HTTP_HEADER_PARENT_ID, get_wsgi_header(HTTP_HEADER_PARENT_ID)]\n)\nPOSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES = frozenset(\n [HTTP_HEADER_SAMPLING_PRIORITY, get_wsgi_header(HTTP_HEADER_SAMPLING_PRIORITY)]\n)\n\n\nclass HTTPPropagator(object):\n \"\"\"A HTTP Propagator using HTTP headers as carrier.\"\"\"\n\n def inject(self, span_context, headers):\n \"\"\"Inject Context attributes that have to be propagated as HTTP headers.\n\n Here is an example using `requests`::\n\n import requests\n from ddtrace.propagation.http import HTTPPropagator\n\n def parent_call():\n with tracer.trace(\"parent_span\") as span:\n headers = {}\n propagator = HTTPPropagator()\n propagator.inject(span.context, headers)\n url = \"<some RPC endpoint>\"\n r = requests.get(url, headers=headers)\n\n :param Context span_context: Span context to propagate.\n :param dict headers: HTTP headers to extend with tracing attributes.\n \"\"\"\n headers[HTTP_HEADER_TRACE_ID] = str(span_context.trace_id)\n headers[HTTP_HEADER_PARENT_ID] = str(span_context.span_id)\n sampling_priority = span_context.sampling_priority\n # Propagate priority only if defined\n if sampling_priority is not None:\n headers[HTTP_HEADER_SAMPLING_PRIORITY] = str(span_context.sampling_priority)\n\n @staticmethod\n def extract_trace_id(headers):\n trace_id = 0\n\n for key in POSSIBLE_HTTP_HEADER_TRACE_IDS:\n if key in headers:\n trace_id = headers.get(key)\n\n return int(trace_id)\n\n @staticmethod\n def extract_parent_span_id(headers):\n parent_span_id = 0\n\n for key in POSSIBLE_HTTP_HEADER_PARENT_IDS:\n if key in headers:\n parent_span_id = headers.get(key)\n\n return int(parent_span_id)\n\n @staticmethod\n def extract_sampling_priority(headers):\n sampling_priority = None\n\n for key in POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES:\n if key in headers:\n sampling_priority = headers.get(key)\n\n return sampling_priority\n\n def extract(self, headers):\n \"\"\"Extract a Context from HTTP headers into a new Context.\n\n Here is an example from a web endpoint::\n\n from ddtrace.propagation.http import HTTPPropagator\n\n def my_controller(url, headers):\n propagator = HTTPPropagator()\n context = propagator.extract(headers)\n tracer.context_provider.activate(context)\n\n with tracer.trace(\"my_controller\") as span:\n span.set_meta('http.url', url)\n\n :param dict headers: HTTP headers to extract tracing attributes.\n :return: New `Context` with propagated attributes.\n \"\"\"\n if not headers:\n return Context()\n\n try:\n trace_id = HTTPPropagator.extract_trace_id(headers)\n parent_span_id = HTTPPropagator.extract_parent_span_id(headers)\n sampling_priority = HTTPPropagator.extract_sampling_priority(headers)\n\n if sampling_priority is not None:\n sampling_priority = int(sampling_priority)\n\n return Context(\n trace_id=trace_id,\n span_id=parent_span_id,\n sampling_priority=sampling_priority,\n )\n # If headers are invalid and cannot be parsed, return a new context and log the issue.\n except Exception as error:\n try:\n log.debug(\n \"invalid x-datadog-* headers, trace-id: %s, parent-id: %s, priority: %s, error: %s\",\n headers.get(HTTP_HEADER_TRACE_ID, 0),\n headers.get(HTTP_HEADER_PARENT_ID, 0),\n headers.get(HTTP_HEADER_SAMPLING_PRIORITY),\n error,\n )\n # We might fail on string formatting errors ; in that case only format the first error\n except Exception:\n log.debug(error)\n return Context()\n", "path": "ddtrace/propagation/http.py"}], "after_files": [{"content": "import logging\n\nfrom ..context import Context\n\nfrom .utils import get_wsgi_header\n\nlog = logging.getLogger(__name__)\n\n# HTTP headers one should set for distributed tracing.\n# These are cross-language (eg: Python, Go and other implementations should honor these)\nHTTP_HEADER_TRACE_ID = \"x-datadog-trace-id\"\nHTTP_HEADER_PARENT_ID = \"x-datadog-parent-id\"\nHTTP_HEADER_SAMPLING_PRIORITY = \"x-datadog-sampling-priority\"\n\n\n# Note that due to WSGI spec we have to also check for uppercased and prefixed\n# versions of these headers\nPOSSIBLE_HTTP_HEADER_TRACE_IDS = frozenset(\n [HTTP_HEADER_TRACE_ID, get_wsgi_header(HTTP_HEADER_TRACE_ID)]\n)\nPOSSIBLE_HTTP_HEADER_PARENT_IDS = frozenset(\n [HTTP_HEADER_PARENT_ID, get_wsgi_header(HTTP_HEADER_PARENT_ID)]\n)\nPOSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES = frozenset(\n [HTTP_HEADER_SAMPLING_PRIORITY, get_wsgi_header(HTTP_HEADER_SAMPLING_PRIORITY)]\n)\n\n\nclass HTTPPropagator(object):\n \"\"\"A HTTP Propagator using HTTP headers as carrier.\"\"\"\n\n def inject(self, span_context, headers):\n \"\"\"Inject Context attributes that have to be propagated as HTTP headers.\n\n Here is an example using `requests`::\n\n import requests\n from ddtrace.propagation.http import HTTPPropagator\n\n def parent_call():\n with tracer.trace(\"parent_span\") as span:\n headers = {}\n propagator = HTTPPropagator()\n propagator.inject(span.context, headers)\n url = \"<some RPC endpoint>\"\n r = requests.get(url, headers=headers)\n\n :param Context span_context: Span context to propagate.\n :param dict headers: HTTP headers to extend with tracing attributes.\n \"\"\"\n headers[HTTP_HEADER_TRACE_ID] = str(span_context.trace_id)\n headers[HTTP_HEADER_PARENT_ID] = str(span_context.span_id)\n sampling_priority = span_context.sampling_priority\n # Propagate priority only if defined\n if sampling_priority is not None:\n headers[HTTP_HEADER_SAMPLING_PRIORITY] = str(span_context.sampling_priority)\n\n @staticmethod\n def extract_header_value(possible_header_names, headers, default=None):\n for header, value in headers.items():\n for header_name in possible_header_names:\n if header.lower() == header_name.lower():\n return value\n\n return default\n\n @staticmethod\n def extract_trace_id(headers):\n return int(\n HTTPPropagator.extract_header_value(\n POSSIBLE_HTTP_HEADER_TRACE_IDS, headers, default=0,\n )\n )\n\n @staticmethod\n def extract_parent_span_id(headers):\n return int(\n HTTPPropagator.extract_header_value(\n POSSIBLE_HTTP_HEADER_PARENT_IDS, headers, default=0,\n )\n )\n\n @staticmethod\n def extract_sampling_priority(headers):\n return HTTPPropagator.extract_header_value(\n POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES, headers,\n )\n\n def extract(self, headers):\n \"\"\"Extract a Context from HTTP headers into a new Context.\n\n Here is an example from a web endpoint::\n\n from ddtrace.propagation.http import HTTPPropagator\n\n def my_controller(url, headers):\n propagator = HTTPPropagator()\n context = propagator.extract(headers)\n tracer.context_provider.activate(context)\n\n with tracer.trace(\"my_controller\") as span:\n span.set_meta('http.url', url)\n\n :param dict headers: HTTP headers to extract tracing attributes.\n :return: New `Context` with propagated attributes.\n \"\"\"\n if not headers:\n return Context()\n\n try:\n trace_id = HTTPPropagator.extract_trace_id(headers)\n parent_span_id = HTTPPropagator.extract_parent_span_id(headers)\n sampling_priority = HTTPPropagator.extract_sampling_priority(headers)\n\n if sampling_priority is not None:\n sampling_priority = int(sampling_priority)\n\n return Context(\n trace_id=trace_id,\n span_id=parent_span_id,\n sampling_priority=sampling_priority,\n )\n # If headers are invalid and cannot be parsed, return a new context and log the issue.\n except Exception as error:\n try:\n log.debug(\n \"invalid x-datadog-* headers, trace-id: %s, parent-id: %s, priority: %s, error: %s\",\n headers.get(HTTP_HEADER_TRACE_ID, 0),\n headers.get(HTTP_HEADER_PARENT_ID, 0),\n headers.get(HTTP_HEADER_SAMPLING_PRIORITY),\n error,\n )\n # We might fail on string formatting errors ; in that case only format the first error\n except Exception:\n log.debug(error)\n return Context()\n", "path": "ddtrace/propagation/http.py"}]}
| 3,480 | 437 |
gh_patches_debug_17006
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1950
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Side by side option for symbol action links in hover popup doesn't work if location is in same file
**Describe the bug**
The side by side icon link for "Definition" / "Type Definition" / "Declaration" from the hover popup doesn't work if the location of the definition/declaration is in the same file.
**To Reproduce**
Steps to reproduce the behavior:
1. Have `"show_symbol_action_links": true` in the settings (this is the default value)
2. Hover over symbol (e.g. function call) which has a definition in the same file
3. Click on ◨ next to "Definition", or use <kbd>Ctrl</kbd> + click on the text link
4. See that the view scrolls to the location, instead of opening the location in a new tab to the right
**Expected behavior**
LSP should open the definition in a new new to the right, similar to how the built-in definitions popup from ST does
**Environment (please complete the following information):**
- OS: Win 10
- LSP version: main
**Additional context**
Seems like the `flags` argument which includes the "side_by_side" information is lost/ignored here:
https://github.com/sublimelsp/LSP/blob/1bcd518102c1516c9d808c974b7d2a5eba7d0b80/plugin/core/open.py#L30-L31
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/open.py`
Content:
```
1 from .logging import exception_log
2 from .promise import Promise
3 from .promise import ResolveFunc
4 from .protocol import DocumentUri
5 from .protocol import Range
6 from .protocol import RangeLsp
7 from .typing import Dict, Tuple, Optional
8 from .url import parse_uri
9 from .views import range_to_region
10 import os
11 import sublime
12 import subprocess
13
14
15 opening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]
16
17
18 def open_file(
19 window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1
20 ) -> Promise[Optional[sublime.View]]:
21 """
22 Open a file asynchronously.
23 It is only safe to call this function from the UI thread.
24 The provided uri MUST be a file URI
25 """
26 file = parse_uri(uri)[1]
27 # window.open_file brings the file to focus if it's already opened, which we don't want.
28 # So we first check if there's already a view for that file.
29 view = window.find_open_file(file)
30 if view:
31 return Promise.resolve(view)
32
33 view = window.open_file(file, flags, group)
34 if not view.is_loading():
35 # It's already loaded. Possibly already open in a tab.
36 return Promise.resolve(view)
37
38 # Is the view opening right now? Then return the associated unresolved promise
39 for fn, value in opening_files.items():
40 if fn == file or os.path.samefile(fn, file):
41 # Return the unresolved promise. A future on_load event will resolve the promise.
42 return value[0]
43
44 # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)
45 def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:
46 global opening_files
47 # Save the promise in the first element of the tuple -- except we cannot yet do that here
48 opening_files[file] = (None, resolve) # type: ignore
49
50 promise = Promise(fullfill)
51 tup = opening_files[file]
52 # Save the promise in the first element of the tuple so that the for-loop above can return it
53 opening_files[file] = (promise, tup[1])
54 return promise
55
56
57 def center_selection(v: sublime.View, r: RangeLsp) -> sublime.View:
58 selection = range_to_region(Range.from_lsp(r), v)
59 v.run_command("lsp_selection_set", {"regions": [(selection.a, selection.a)]})
60 window = v.window()
61 if window:
62 window.focus_view(v)
63 if int(sublime.version()) >= 4124:
64 v.show_at_center(selection, animate=False)
65 else:
66 # TODO: remove later when a stable build lands
67 v.show_at_center(selection) # type: ignore
68 return v
69
70
71 def open_externally(uri: str, take_focus: bool) -> bool:
72 """
73 A blocking function that invokes the OS's "open with default extension"
74 """
75 try:
76 # TODO: handle take_focus
77 if sublime.platform() == "windows":
78 os.startfile(uri) # type: ignore
79 elif sublime.platform() == "osx":
80 subprocess.check_call(("/usr/bin/open", uri))
81 else: # linux
82 subprocess.check_call(("xdg-open", uri))
83 return True
84 except Exception as ex:
85 exception_log("Failed to open {}".format(uri), ex)
86 return False
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/core/open.py b/plugin/core/open.py
--- a/plugin/core/open.py
+++ b/plugin/core/open.py
@@ -24,11 +24,15 @@
The provided uri MUST be a file URI
"""
file = parse_uri(uri)[1]
- # window.open_file brings the file to focus if it's already opened, which we don't want.
- # So we first check if there's already a view for that file.
+ # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed
+ # to open as a separate view).
view = window.find_open_file(file)
if view:
- return Promise.resolve(view)
+ opens_in_current_group = group == -1 or window.active_group() == group
+ opens_as_new_selection = (flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)) != 0
+ return_existing_view = opens_in_current_group and not opens_as_new_selection
+ if return_existing_view:
+ return Promise.resolve(view)
view = window.open_file(file, flags, group)
if not view.is_loading():
|
{"golden_diff": "diff --git a/plugin/core/open.py b/plugin/core/open.py\n--- a/plugin/core/open.py\n+++ b/plugin/core/open.py\n@@ -24,11 +24,15 @@\n The provided uri MUST be a file URI\n \"\"\"\n file = parse_uri(uri)[1]\n- # window.open_file brings the file to focus if it's already opened, which we don't want.\n- # So we first check if there's already a view for that file.\n+ # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed\n+ # to open as a separate view).\n view = window.find_open_file(file)\n if view:\n- return Promise.resolve(view)\n+ opens_in_current_group = group == -1 or window.active_group() == group\n+ opens_as_new_selection = (flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)) != 0\n+ return_existing_view = opens_in_current_group and not opens_as_new_selection\n+ if return_existing_view:\n+ return Promise.resolve(view)\n \n view = window.open_file(file, flags, group)\n if not view.is_loading():\n", "issue": "Side by side option for symbol action links in hover popup doesn't work if location is in same file\n**Describe the bug**\r\nThe side by side icon link for \"Definition\" / \"Type Definition\" / \"Declaration\" from the hover popup doesn't work if the location of the definition/declaration is in the same file.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Have `\"show_symbol_action_links\": true` in the settings (this is the default value)\r\n2. Hover over symbol (e.g. function call) which has a definition in the same file\r\n3. Click on \u25e8 next to \"Definition\", or use <kbd>Ctrl</kbd> + click on the text link\r\n4. See that the view scrolls to the location, instead of opening the location in a new tab to the right\r\n\r\n**Expected behavior**\r\nLSP should open the definition in a new new to the right, similar to how the built-in definitions popup from ST does\r\n\r\n**Environment (please complete the following information):**\r\n- OS: Win 10\r\n- LSP version: main\r\n\r\n**Additional context**\r\n\r\nSeems like the `flags` argument which includes the \"side_by_side\" information is lost/ignored here:\r\nhttps://github.com/sublimelsp/LSP/blob/1bcd518102c1516c9d808c974b7d2a5eba7d0b80/plugin/core/open.py#L30-L31\n", "before_files": [{"content": "from .logging import exception_log\nfrom .promise import Promise\nfrom .promise import ResolveFunc\nfrom .protocol import DocumentUri\nfrom .protocol import Range\nfrom .protocol import RangeLsp\nfrom .typing import Dict, Tuple, Optional\nfrom .url import parse_uri\nfrom .views import range_to_region\nimport os\nimport sublime\nimport subprocess\n\n\nopening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]\n\n\ndef open_file(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n \"\"\"\n Open a file asynchronously.\n It is only safe to call this function from the UI thread.\n The provided uri MUST be a file URI\n \"\"\"\n file = parse_uri(uri)[1]\n # window.open_file brings the file to focus if it's already opened, which we don't want.\n # So we first check if there's already a view for that file.\n view = window.find_open_file(file)\n if view:\n return Promise.resolve(view)\n\n view = window.open_file(file, flags, group)\n if not view.is_loading():\n # It's already loaded. Possibly already open in a tab.\n return Promise.resolve(view)\n\n # Is the view opening right now? Then return the associated unresolved promise\n for fn, value in opening_files.items():\n if fn == file or os.path.samefile(fn, file):\n # Return the unresolved promise. A future on_load event will resolve the promise.\n return value[0]\n\n # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)\n def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:\n global opening_files\n # Save the promise in the first element of the tuple -- except we cannot yet do that here\n opening_files[file] = (None, resolve) # type: ignore\n\n promise = Promise(fullfill)\n tup = opening_files[file]\n # Save the promise in the first element of the tuple so that the for-loop above can return it\n opening_files[file] = (promise, tup[1])\n return promise\n\n\ndef center_selection(v: sublime.View, r: RangeLsp) -> sublime.View:\n selection = range_to_region(Range.from_lsp(r), v)\n v.run_command(\"lsp_selection_set\", {\"regions\": [(selection.a, selection.a)]})\n window = v.window()\n if window:\n window.focus_view(v)\n if int(sublime.version()) >= 4124:\n v.show_at_center(selection, animate=False)\n else:\n # TODO: remove later when a stable build lands\n v.show_at_center(selection) # type: ignore\n return v\n\n\ndef open_externally(uri: str, take_focus: bool) -> bool:\n \"\"\"\n A blocking function that invokes the OS's \"open with default extension\"\n \"\"\"\n try:\n # TODO: handle take_focus\n if sublime.platform() == \"windows\":\n os.startfile(uri) # type: ignore\n elif sublime.platform() == \"osx\":\n subprocess.check_call((\"/usr/bin/open\", uri))\n else: # linux\n subprocess.check_call((\"xdg-open\", uri))\n return True\n except Exception as ex:\n exception_log(\"Failed to open {}\".format(uri), ex)\n return False\n", "path": "plugin/core/open.py"}], "after_files": [{"content": "from .logging import exception_log\nfrom .promise import Promise\nfrom .promise import ResolveFunc\nfrom .protocol import DocumentUri\nfrom .protocol import Range\nfrom .protocol import RangeLsp\nfrom .typing import Dict, Tuple, Optional\nfrom .url import parse_uri\nfrom .views import range_to_region\nimport os\nimport sublime\nimport subprocess\n\n\nopening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]\n\n\ndef open_file(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n \"\"\"\n Open a file asynchronously.\n It is only safe to call this function from the UI thread.\n The provided uri MUST be a file URI\n \"\"\"\n file = parse_uri(uri)[1]\n # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed\n # to open as a separate view).\n view = window.find_open_file(file)\n if view:\n opens_in_current_group = group == -1 or window.active_group() == group\n opens_as_new_selection = (flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)) != 0\n return_existing_view = opens_in_current_group and not opens_as_new_selection\n if return_existing_view:\n return Promise.resolve(view)\n\n view = window.open_file(file, flags, group)\n if not view.is_loading():\n # It's already loaded. Possibly already open in a tab.\n return Promise.resolve(view)\n\n # Is the view opening right now? Then return the associated unresolved promise\n for fn, value in opening_files.items():\n if fn == file or os.path.samefile(fn, file):\n # Return the unresolved promise. A future on_load event will resolve the promise.\n return value[0]\n\n # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)\n def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:\n global opening_files\n # Save the promise in the first element of the tuple -- except we cannot yet do that here\n opening_files[file] = (None, resolve) # type: ignore\n\n promise = Promise(fullfill)\n tup = opening_files[file]\n # Save the promise in the first element of the tuple so that the for-loop above can return it\n opening_files[file] = (promise, tup[1])\n return promise\n\n\ndef center_selection(v: sublime.View, r: RangeLsp) -> sublime.View:\n selection = range_to_region(Range.from_lsp(r), v)\n v.run_command(\"lsp_selection_set\", {\"regions\": [(selection.a, selection.a)]})\n window = v.window()\n if window:\n window.focus_view(v)\n if int(sublime.version()) >= 4124:\n v.show_at_center(selection, animate=False)\n else:\n # TODO: remove later when a stable build lands\n v.show_at_center(selection) # type: ignore\n return v\n\n\ndef open_externally(uri: str, take_focus: bool) -> bool:\n \"\"\"\n A blocking function that invokes the OS's \"open with default extension\"\n \"\"\"\n try:\n # TODO: handle take_focus\n if sublime.platform() == \"windows\":\n os.startfile(uri) # type: ignore\n elif sublime.platform() == \"osx\":\n subprocess.check_call((\"/usr/bin/open\", uri))\n else: # linux\n subprocess.check_call((\"xdg-open\", uri))\n return True\n except Exception as ex:\n exception_log(\"Failed to open {}\".format(uri), ex)\n return False\n", "path": "plugin/core/open.py"}]}
| 1,490 | 258 |
gh_patches_debug_1713
|
rasdani/github-patches
|
git_diff
|
pallets__werkzeug-1032
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTML text can vanish in the Werkzeug interactive debugger
(I feel like I might have raised this bug before, but I can’t find it if I have.)
If you’re using the interactive debugger and you type a command includes something that looks like an HTML tag, it gets treated as literal HTML text. This causes it to disappear once you’ve finished the command, which makes for an inconsistent history.
Here’s a simple repro:

The HTML tag should continue to be visible after executing your command.
Python/Werkzeug versions, although I know I’ve seen this on older versions too:
```console
$ python --version
Python 3.5.0
$ pip freeze | grep Werkzeug
Werkzeug==0.11.10
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `werkzeug/debug/console.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 werkzeug.debug.console
4 ~~~~~~~~~~~~~~~~~~~~~~
5
6 Interactive console support.
7
8 :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
9 :license: BSD.
10 """
11 import sys
12 import code
13 from types import CodeType
14
15 from werkzeug.utils import escape
16 from werkzeug.local import Local
17 from werkzeug.debug.repr import debug_repr, dump, helper
18
19
20 _local = Local()
21
22
23 class HTMLStringO(object):
24
25 """A StringO version that HTML escapes on write."""
26
27 def __init__(self):
28 self._buffer = []
29
30 def isatty(self):
31 return False
32
33 def close(self):
34 pass
35
36 def flush(self):
37 pass
38
39 def seek(self, n, mode=0):
40 pass
41
42 def readline(self):
43 if len(self._buffer) == 0:
44 return ''
45 ret = self._buffer[0]
46 del self._buffer[0]
47 return ret
48
49 def reset(self):
50 val = ''.join(self._buffer)
51 del self._buffer[:]
52 return val
53
54 def _write(self, x):
55 if isinstance(x, bytes):
56 x = x.decode('utf-8', 'replace')
57 self._buffer.append(x)
58
59 def write(self, x):
60 self._write(escape(x))
61
62 def writelines(self, x):
63 self._write(escape(''.join(x)))
64
65
66 class ThreadedStream(object):
67
68 """Thread-local wrapper for sys.stdout for the interactive console."""
69
70 def push():
71 if not isinstance(sys.stdout, ThreadedStream):
72 sys.stdout = ThreadedStream()
73 _local.stream = HTMLStringO()
74 push = staticmethod(push)
75
76 def fetch():
77 try:
78 stream = _local.stream
79 except AttributeError:
80 return ''
81 return stream.reset()
82 fetch = staticmethod(fetch)
83
84 def displayhook(obj):
85 try:
86 stream = _local.stream
87 except AttributeError:
88 return _displayhook(obj)
89 # stream._write bypasses escaping as debug_repr is
90 # already generating HTML for us.
91 if obj is not None:
92 _local._current_ipy.locals['_'] = obj
93 stream._write(debug_repr(obj))
94 displayhook = staticmethod(displayhook)
95
96 def __setattr__(self, name, value):
97 raise AttributeError('read only attribute %s' % name)
98
99 def __dir__(self):
100 return dir(sys.__stdout__)
101
102 def __getattribute__(self, name):
103 if name == '__members__':
104 return dir(sys.__stdout__)
105 try:
106 stream = _local.stream
107 except AttributeError:
108 stream = sys.__stdout__
109 return getattr(stream, name)
110
111 def __repr__(self):
112 return repr(sys.__stdout__)
113
114
115 # add the threaded stream as display hook
116 _displayhook = sys.displayhook
117 sys.displayhook = ThreadedStream.displayhook
118
119
120 class _ConsoleLoader(object):
121
122 def __init__(self):
123 self._storage = {}
124
125 def register(self, code, source):
126 self._storage[id(code)] = source
127 # register code objects of wrapped functions too.
128 for var in code.co_consts:
129 if isinstance(var, CodeType):
130 self._storage[id(var)] = source
131
132 def get_source_by_code(self, code):
133 try:
134 return self._storage[id(code)]
135 except KeyError:
136 pass
137
138
139 def _wrap_compiler(console):
140 compile = console.compile
141
142 def func(source, filename, symbol):
143 code = compile(source, filename, symbol)
144 console.loader.register(code, source)
145 return code
146 console.compile = func
147
148
149 class _InteractiveConsole(code.InteractiveInterpreter):
150
151 def __init__(self, globals, locals):
152 code.InteractiveInterpreter.__init__(self, locals)
153 self.globals = dict(globals)
154 self.globals['dump'] = dump
155 self.globals['help'] = helper
156 self.globals['__loader__'] = self.loader = _ConsoleLoader()
157 self.more = False
158 self.buffer = []
159 _wrap_compiler(self)
160
161 def runsource(self, source):
162 source = source.rstrip() + '\n'
163 ThreadedStream.push()
164 prompt = self.more and '... ' or '>>> '
165 try:
166 source_to_eval = ''.join(self.buffer + [source])
167 if code.InteractiveInterpreter.runsource(self,
168 source_to_eval, '<debugger>', 'single'):
169 self.more = True
170 self.buffer.append(source)
171 else:
172 self.more = False
173 del self.buffer[:]
174 finally:
175 output = ThreadedStream.fetch()
176 return prompt + source + output
177
178 def runcode(self, code):
179 try:
180 eval(code, self.globals, self.locals)
181 except Exception:
182 self.showtraceback()
183
184 def showtraceback(self):
185 from werkzeug.debug.tbtools import get_current_traceback
186 tb = get_current_traceback(skip=1)
187 sys.stdout._write(tb.render_summary())
188
189 def showsyntaxerror(self, filename=None):
190 from werkzeug.debug.tbtools import get_current_traceback
191 tb = get_current_traceback(skip=4)
192 sys.stdout._write(tb.render_summary())
193
194 def write(self, data):
195 sys.stdout.write(data)
196
197
198 class Console(object):
199
200 """An interactive console."""
201
202 def __init__(self, globals=None, locals=None):
203 if locals is None:
204 locals = {}
205 if globals is None:
206 globals = {}
207 self._ipy = _InteractiveConsole(globals, locals)
208
209 def eval(self, code):
210 _local._current_ipy = self._ipy
211 old_sys_stdout = sys.stdout
212 try:
213 return self._ipy.runsource(code)
214 finally:
215 sys.stdout = old_sys_stdout
216
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/werkzeug/debug/console.py b/werkzeug/debug/console.py
--- a/werkzeug/debug/console.py
+++ b/werkzeug/debug/console.py
@@ -173,7 +173,7 @@
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
- return prompt + source + output
+ return prompt + escape(source) + output
def runcode(self, code):
try:
|
{"golden_diff": "diff --git a/werkzeug/debug/console.py b/werkzeug/debug/console.py\n--- a/werkzeug/debug/console.py\n+++ b/werkzeug/debug/console.py\n@@ -173,7 +173,7 @@\n del self.buffer[:]\n finally:\n output = ThreadedStream.fetch()\n- return prompt + source + output\n+ return prompt + escape(source) + output\n \n def runcode(self, code):\n try:\n", "issue": "HTML text can vanish in the Werkzeug interactive debugger\n(I feel like I might have raised this bug before, but I can\u2019t find it if I have.)\r\n\r\nIf you\u2019re using the interactive debugger and you type a command includes something that looks like an HTML tag, it gets treated as literal HTML text. This causes it to disappear once you\u2019ve finished the command, which makes for an inconsistent history.\r\n\r\nHere\u2019s a simple repro:\r\n\r\n\r\n\r\nThe HTML tag should continue to be visible after executing your command.\r\n\r\nPython/Werkzeug versions, although I know I\u2019ve seen this on older versions too:\r\n\r\n```console\r\n$ python --version\r\nPython 3.5.0\r\n\r\n$ pip freeze | grep Werkzeug\r\nWerkzeug==0.11.10\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.debug.console\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Interactive console support.\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD.\n\"\"\"\nimport sys\nimport code\nfrom types import CodeType\n\nfrom werkzeug.utils import escape\nfrom werkzeug.local import Local\nfrom werkzeug.debug.repr import debug_repr, dump, helper\n\n\n_local = Local()\n\n\nclass HTMLStringO(object):\n\n \"\"\"A StringO version that HTML escapes on write.\"\"\"\n\n def __init__(self):\n self._buffer = []\n\n def isatty(self):\n return False\n\n def close(self):\n pass\n\n def flush(self):\n pass\n\n def seek(self, n, mode=0):\n pass\n\n def readline(self):\n if len(self._buffer) == 0:\n return ''\n ret = self._buffer[0]\n del self._buffer[0]\n return ret\n\n def reset(self):\n val = ''.join(self._buffer)\n del self._buffer[:]\n return val\n\n def _write(self, x):\n if isinstance(x, bytes):\n x = x.decode('utf-8', 'replace')\n self._buffer.append(x)\n\n def write(self, x):\n self._write(escape(x))\n\n def writelines(self, x):\n self._write(escape(''.join(x)))\n\n\nclass ThreadedStream(object):\n\n \"\"\"Thread-local wrapper for sys.stdout for the interactive console.\"\"\"\n\n def push():\n if not isinstance(sys.stdout, ThreadedStream):\n sys.stdout = ThreadedStream()\n _local.stream = HTMLStringO()\n push = staticmethod(push)\n\n def fetch():\n try:\n stream = _local.stream\n except AttributeError:\n return ''\n return stream.reset()\n fetch = staticmethod(fetch)\n\n def displayhook(obj):\n try:\n stream = _local.stream\n except AttributeError:\n return _displayhook(obj)\n # stream._write bypasses escaping as debug_repr is\n # already generating HTML for us.\n if obj is not None:\n _local._current_ipy.locals['_'] = obj\n stream._write(debug_repr(obj))\n displayhook = staticmethod(displayhook)\n\n def __setattr__(self, name, value):\n raise AttributeError('read only attribute %s' % name)\n\n def __dir__(self):\n return dir(sys.__stdout__)\n\n def __getattribute__(self, name):\n if name == '__members__':\n return dir(sys.__stdout__)\n try:\n stream = _local.stream\n except AttributeError:\n stream = sys.__stdout__\n return getattr(stream, name)\n\n def __repr__(self):\n return repr(sys.__stdout__)\n\n\n# add the threaded stream as display hook\n_displayhook = sys.displayhook\nsys.displayhook = ThreadedStream.displayhook\n\n\nclass _ConsoleLoader(object):\n\n def __init__(self):\n self._storage = {}\n\n def register(self, code, source):\n self._storage[id(code)] = source\n # register code objects of wrapped functions too.\n for var in code.co_consts:\n if isinstance(var, CodeType):\n self._storage[id(var)] = source\n\n def get_source_by_code(self, code):\n try:\n return self._storage[id(code)]\n except KeyError:\n pass\n\n\ndef _wrap_compiler(console):\n compile = console.compile\n\n def func(source, filename, symbol):\n code = compile(source, filename, symbol)\n console.loader.register(code, source)\n return code\n console.compile = func\n\n\nclass _InteractiveConsole(code.InteractiveInterpreter):\n\n def __init__(self, globals, locals):\n code.InteractiveInterpreter.__init__(self, locals)\n self.globals = dict(globals)\n self.globals['dump'] = dump\n self.globals['help'] = helper\n self.globals['__loader__'] = self.loader = _ConsoleLoader()\n self.more = False\n self.buffer = []\n _wrap_compiler(self)\n\n def runsource(self, source):\n source = source.rstrip() + '\\n'\n ThreadedStream.push()\n prompt = self.more and '... ' or '>>> '\n try:\n source_to_eval = ''.join(self.buffer + [source])\n if code.InteractiveInterpreter.runsource(self,\n source_to_eval, '<debugger>', 'single'):\n self.more = True\n self.buffer.append(source)\n else:\n self.more = False\n del self.buffer[:]\n finally:\n output = ThreadedStream.fetch()\n return prompt + source + output\n\n def runcode(self, code):\n try:\n eval(code, self.globals, self.locals)\n except Exception:\n self.showtraceback()\n\n def showtraceback(self):\n from werkzeug.debug.tbtools import get_current_traceback\n tb = get_current_traceback(skip=1)\n sys.stdout._write(tb.render_summary())\n\n def showsyntaxerror(self, filename=None):\n from werkzeug.debug.tbtools import get_current_traceback\n tb = get_current_traceback(skip=4)\n sys.stdout._write(tb.render_summary())\n\n def write(self, data):\n sys.stdout.write(data)\n\n\nclass Console(object):\n\n \"\"\"An interactive console.\"\"\"\n\n def __init__(self, globals=None, locals=None):\n if locals is None:\n locals = {}\n if globals is None:\n globals = {}\n self._ipy = _InteractiveConsole(globals, locals)\n\n def eval(self, code):\n _local._current_ipy = self._ipy\n old_sys_stdout = sys.stdout\n try:\n return self._ipy.runsource(code)\n finally:\n sys.stdout = old_sys_stdout\n", "path": "werkzeug/debug/console.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.debug.console\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Interactive console support.\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD.\n\"\"\"\nimport sys\nimport code\nfrom types import CodeType\n\nfrom werkzeug.utils import escape\nfrom werkzeug.local import Local\nfrom werkzeug.debug.repr import debug_repr, dump, helper\n\n\n_local = Local()\n\n\nclass HTMLStringO(object):\n\n \"\"\"A StringO version that HTML escapes on write.\"\"\"\n\n def __init__(self):\n self._buffer = []\n\n def isatty(self):\n return False\n\n def close(self):\n pass\n\n def flush(self):\n pass\n\n def seek(self, n, mode=0):\n pass\n\n def readline(self):\n if len(self._buffer) == 0:\n return ''\n ret = self._buffer[0]\n del self._buffer[0]\n return ret\n\n def reset(self):\n val = ''.join(self._buffer)\n del self._buffer[:]\n return val\n\n def _write(self, x):\n if isinstance(x, bytes):\n x = x.decode('utf-8', 'replace')\n self._buffer.append(x)\n\n def write(self, x):\n self._write(escape(x))\n\n def writelines(self, x):\n self._write(escape(''.join(x)))\n\n\nclass ThreadedStream(object):\n\n \"\"\"Thread-local wrapper for sys.stdout for the interactive console.\"\"\"\n\n def push():\n if not isinstance(sys.stdout, ThreadedStream):\n sys.stdout = ThreadedStream()\n _local.stream = HTMLStringO()\n push = staticmethod(push)\n\n def fetch():\n try:\n stream = _local.stream\n except AttributeError:\n return ''\n return stream.reset()\n fetch = staticmethod(fetch)\n\n def displayhook(obj):\n try:\n stream = _local.stream\n except AttributeError:\n return _displayhook(obj)\n # stream._write bypasses escaping as debug_repr is\n # already generating HTML for us.\n if obj is not None:\n _local._current_ipy.locals['_'] = obj\n stream._write(debug_repr(obj))\n displayhook = staticmethod(displayhook)\n\n def __setattr__(self, name, value):\n raise AttributeError('read only attribute %s' % name)\n\n def __dir__(self):\n return dir(sys.__stdout__)\n\n def __getattribute__(self, name):\n if name == '__members__':\n return dir(sys.__stdout__)\n try:\n stream = _local.stream\n except AttributeError:\n stream = sys.__stdout__\n return getattr(stream, name)\n\n def __repr__(self):\n return repr(sys.__stdout__)\n\n\n# add the threaded stream as display hook\n_displayhook = sys.displayhook\nsys.displayhook = ThreadedStream.displayhook\n\n\nclass _ConsoleLoader(object):\n\n def __init__(self):\n self._storage = {}\n\n def register(self, code, source):\n self._storage[id(code)] = source\n # register code objects of wrapped functions too.\n for var in code.co_consts:\n if isinstance(var, CodeType):\n self._storage[id(var)] = source\n\n def get_source_by_code(self, code):\n try:\n return self._storage[id(code)]\n except KeyError:\n pass\n\n\ndef _wrap_compiler(console):\n compile = console.compile\n\n def func(source, filename, symbol):\n code = compile(source, filename, symbol)\n console.loader.register(code, source)\n return code\n console.compile = func\n\n\nclass _InteractiveConsole(code.InteractiveInterpreter):\n\n def __init__(self, globals, locals):\n code.InteractiveInterpreter.__init__(self, locals)\n self.globals = dict(globals)\n self.globals['dump'] = dump\n self.globals['help'] = helper\n self.globals['__loader__'] = self.loader = _ConsoleLoader()\n self.more = False\n self.buffer = []\n _wrap_compiler(self)\n\n def runsource(self, source):\n source = source.rstrip() + '\\n'\n ThreadedStream.push()\n prompt = self.more and '... ' or '>>> '\n try:\n source_to_eval = ''.join(self.buffer + [source])\n if code.InteractiveInterpreter.runsource(self,\n source_to_eval, '<debugger>', 'single'):\n self.more = True\n self.buffer.append(source)\n else:\n self.more = False\n del self.buffer[:]\n finally:\n output = ThreadedStream.fetch()\n return prompt + escape(source) + output\n\n def runcode(self, code):\n try:\n eval(code, self.globals, self.locals)\n except Exception:\n self.showtraceback()\n\n def showtraceback(self):\n from werkzeug.debug.tbtools import get_current_traceback\n tb = get_current_traceback(skip=1)\n sys.stdout._write(tb.render_summary())\n\n def showsyntaxerror(self, filename=None):\n from werkzeug.debug.tbtools import get_current_traceback\n tb = get_current_traceback(skip=4)\n sys.stdout._write(tb.render_summary())\n\n def write(self, data):\n sys.stdout.write(data)\n\n\nclass Console(object):\n\n \"\"\"An interactive console.\"\"\"\n\n def __init__(self, globals=None, locals=None):\n if locals is None:\n locals = {}\n if globals is None:\n globals = {}\n self._ipy = _InteractiveConsole(globals, locals)\n\n def eval(self, code):\n _local._current_ipy = self._ipy\n old_sys_stdout = sys.stdout\n try:\n return self._ipy.runsource(code)\n finally:\n sys.stdout = old_sys_stdout\n", "path": "werkzeug/debug/console.py"}]}
| 2,300 | 98 |
gh_patches_debug_19667
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-1383
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[UI idea] Group customization pages within for-teachers url
**Idea incl level**
Currently the class customization page has a dedicated url at `/customize-class`. Due to the commit https://github.com/Felienne/hedy/commit/42ab2641d2c26a101a870371e7c16f0de8729439 we automatically extract the current page from the url and display this in the menu bar as active. As the `/customize-class` page has no own menu option no active page is shown. Because the page belongs conceptually to the `/for-teachers` section it would be nice to show this page as active. We can solve this issue by making it a sub url of `/for-teachers` such as `/for-teachers/customize-class`. A structure that can also be used for possible future page belonging to the teacher section such as `/customize-student`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/teacher.py`
Content:
```
1 from website.auth import requires_login, is_teacher, current_user
2 import utils
3 import uuid
4 from flask import g, request, jsonify, redirect
5 from flask_helpers import render_template
6 import os
7 import hedyweb
8 import hedy_content
9 TRANSLATIONS = hedyweb.Translations ()
10 from config import config
11 cookie_name = config ['session'] ['cookie_name']
12
13 def routes (app, database):
14 global DATABASE
15 DATABASE = database
16
17 @app.route('/classes', methods=['GET'])
18 @requires_login
19 def get_classes (user):
20 if not is_teacher(user):
21 return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))
22 return jsonify (DATABASE.get_teacher_classes (user ['username'], True))
23
24 @app.route('/class/<class_id>', methods=['GET'])
25 @requires_login
26 def get_class (user, class_id):
27 app.logger.info('This is info output')
28 if not is_teacher(user):
29 return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))
30 Class = DATABASE.get_class (class_id)
31 if not Class or Class ['teacher'] != user ['username']:
32 return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))
33 students = []
34 for student_username in Class.get ('students', []):
35 student = DATABASE.user_by_username (student_username)
36 programs = DATABASE.programs_for_user(student_username)
37 highest_level = max(program['level'] for program in programs) if len(programs) else 0
38 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
39 if sorted_public_programs:
40 latest_shared = sorted_public_programs[-1]
41 latest_shared['link'] = os.getenv ('BASE_URL') + f"/hedy/{latest_shared['id']}/view"
42 else:
43 latest_shared = None
44 students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
45
46 if utils.is_testing_request (request):
47 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
48 return render_template ('class-overview.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), current_page='for-teachers', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
49
50 @app.route('/class', methods=['POST'])
51 @requires_login
52 def create_class (user):
53 if not is_teacher(user):
54 return 'Only teachers can create classes', 403
55
56 body = request.json
57 # Validations
58 if not isinstance(body, dict):
59 return 'body must be an object', 400
60 if not isinstance(body.get('name'), str):
61 return 'name must be a string', 400
62
63 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate
64 Classes = DATABASE.get_teacher_classes(user['username'], True)
65 for Class in Classes:
66 if Class['name'] == body['name']:
67 return "duplicate", 200
68
69 Class = {
70 'id': uuid.uuid4().hex,
71 'date': utils.timems (),
72 'teacher': user ['username'],
73 'link': utils.random_id_generator (7),
74 'name': body ['name']
75 }
76
77 DATABASE.store_class (Class)
78
79 return {'id': Class['id']}, 200
80
81 @app.route('/class/<class_id>', methods=['PUT'])
82 @requires_login
83 def update_class (user, class_id):
84 if not is_teacher(user):
85 return 'Only teachers can update classes', 403
86
87 body = request.json
88 # Validations
89 if not isinstance(body, dict):
90 return 'body must be an object', 400
91 if not isinstance(body.get('name'), str):
92 return 'name must be a string', 400
93
94 Class = DATABASE.get_class (class_id)
95 if not Class or Class ['teacher'] != user ['username']:
96 return 'No such class', 404
97
98 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate
99 Classes = DATABASE.get_teacher_classes(user ['username'], True)
100 for Class in Classes:
101 if Class['name'] == body['name']:
102 return "duplicate", 200
103
104 Class = DATABASE.update_class (class_id, body ['name'])
105
106 return {}, 200
107
108 @app.route('/class/<class_id>', methods=['DELETE'])
109 @requires_login
110 def delete_class (user, class_id):
111 Class = DATABASE.get_class (class_id)
112 if not Class or Class ['teacher'] != user ['username']:
113 return 'No such class', 404
114
115 DATABASE.delete_class (Class)
116
117 return {}, 200
118
119 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])
120 def prejoin_class (class_id, link):
121 Class = DATABASE.get_class (class_id)
122 if not Class or Class ['link'] != link:
123 return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))
124 user = {}
125 if request.cookies.get (cookie_name):
126 token = DATABASE.get_token(request.cookies.get (cookie_name))
127 if token:
128 if token ['username'] in Class.get ('students', []):
129 return render_template ('class-already-joined.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), current_page='my-profile', class_info={'name': Class ['name']})
130 user = DATABASE.user_by_username(token ['username'])
131
132 return render_template ('class-prejoin.html',
133 auth=TRANSLATIONS.get_translations (g.lang, 'Auth'),
134 current_page='my-profile',
135 class_info={
136 'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + g.lang,
137 'name': Class ['name'],
138 })
139
140 @app.route('/class/<class_id>/join/<link>', methods=['GET'])
141 @requires_login
142 def join_class (user, class_id, link):
143 Class = DATABASE.get_class (class_id)
144 if not Class or Class ['link'] != link:
145 return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))
146
147 DATABASE.add_student_to_class (Class ['id'], user ['username'])
148
149 return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)
150
151 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])
152 @requires_login
153 def leave_class (user, class_id, student_id):
154
155 Class = DATABASE.get_class (class_id)
156 if not Class or Class ['teacher'] != user ['username']:
157 return 'No such class', 404
158
159 DATABASE.remove_student_from_class (Class ['id'], student_id)
160
161 return {}, 200
162
163 @app.route('/customize-class/<class_id>', methods=['GET'])
164 @requires_login
165 def get_class_info(user, class_id):
166 if not is_teacher(user):
167 return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))
168 Class = DATABASE.get_class(class_id)
169 if not Class or Class['teacher'] != user['username']:
170 return utils.page_404(TRANSLATIONS, current_user()['username'], g.lang,
171 TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))
172
173 if hedy_content.Adventures(g.lang).has_adventures():
174 adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()
175 else:
176 adventures = hedy_content.Adventures("en").get_adventure_keyname_name_levels()
177 levels = hedy_content.LevelDefaults(g.lang).levels
178 preferences = DATABASE.get_customizations_class(class_id)
179
180 return render_template('customize-class.html', auth=TRANSLATIONS.get_translations(g.lang, 'Auth'),
181 ui=TRANSLATIONS.get_translations(g.lang, 'ui'),
182 class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,
183 adventures=adventures, preferences=preferences, current_page='for-teachers')
184
185 @app.route('/customize-class/<class_id>', methods=['PUT'])
186 @requires_login
187 def update_level_preferences(user, class_id):
188 if not is_teacher(user):
189 return 'Only teachers can update class preferences', 403
190
191 body = request.json
192 print(body)
193 # Validations
194 if not isinstance(body, dict):
195 return 'body must be an object', 400
196 if not isinstance(body.get('example_programs'), bool):
197 return 'amount of example programs must be an integer', 400
198 if not isinstance(body.get('hide_level'), bool):
199 return 'level switch must be a boolean', 400
200 if not isinstance(body.get('hide_prev_level'), bool):
201 return 'level switch must be a boolean', 400
202 if not isinstance(body.get('hide_next_level'), bool):
203 return 'level switch must be a boolean', 400
204 if not isinstance(int(body.get('level')), int):
205 return 'level must ben an integer', 400
206
207 Class = DATABASE.get_class(class_id)
208 if not Class or Class['teacher'] != user['username']:
209 return 'No such class', 404
210
211 customizations = {}
212 customizations['id'] = class_id
213 customizations['level'] = int(body.get('level'))
214 customizations['adventures'] = body.get('adventures')
215 customizations['example_programs'] = body.get('example_programs')
216 customizations['hide'] = body.get('hide_level')
217 customizations['hide_prev_level'] = body.get('hide_prev_level')
218 customizations['hide_next_level'] = body.get('hide_next_level')
219
220
221 Class = DATABASE.update_customizations_class(customizations)
222
223 return {}, 200
224
225 @app.route('/hedy/l/<link_id>', methods=['GET'])
226 def resolve_class_link (link_id):
227 Class = DATABASE.resolve_class_link (link_id)
228 if not Class:
229 return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))
230 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/teacher.py b/website/teacher.py
--- a/website/teacher.py
+++ b/website/teacher.py
@@ -21,7 +21,7 @@
return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))
return jsonify (DATABASE.get_teacher_classes (user ['username'], True))
- @app.route('/class/<class_id>', methods=['GET'])
+ @app.route('/for-teachers/class/<class_id>', methods=['GET'])
@requires_login
def get_class (user, class_id):
app.logger.info('This is info output')
@@ -160,7 +160,7 @@
return {}, 200
- @app.route('/customize-class/<class_id>', methods=['GET'])
+ @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])
@requires_login
def get_class_info(user, class_id):
if not is_teacher(user):
|
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -21,7 +21,7 @@\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n \n- @app.route('/class/<class_id>', methods=['GET'])\n+ @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n@@ -160,7 +160,7 @@\n \n return {}, 200\n \n- @app.route('/customize-class/<class_id>', methods=['GET'])\n+ @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n", "issue": "[UI idea] Group customization pages within for-teachers url\n**Idea incl level**\r\nCurrently the class customization page has a dedicated url at `/customize-class`. Due to the commit https://github.com/Felienne/hedy/commit/42ab2641d2c26a101a870371e7c16f0de8729439 we automatically extract the current page from the url and display this in the menu bar as active. As the `/customize-class` page has no own menu option no active page is shown. Because the page belongs conceptually to the `/for-teachers` section it would be nice to show this page as active. We can solve this issue by making it a sub url of `/for-teachers` such as `/for-teachers/customize-class`. A structure that can also be used for possible future page belonging to the teacher section such as `/customize-student`.\r\n\r\n\n", "before_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database):\n global DATABASE\n DATABASE = database\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = os.getenv ('BASE_URL') + f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), current_page='for-teachers', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html',\n auth=TRANSLATIONS.get_translations (g.lang, 'Auth'),\n current_page='my-profile',\n class_info={\n 'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + g.lang,\n 'name': Class ['name'],\n })\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.page_404(TRANSLATIONS, current_user()['username'], g.lang,\n TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', auth=TRANSLATIONS.get_translations(g.lang, 'Auth'),\n ui=TRANSLATIONS.get_translations(g.lang, 'ui'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='for-teachers')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n\n Class = DATABASE.update_customizations_class(customizations)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}], "after_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database):\n global DATABASE\n DATABASE = database\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = os.getenv ('BASE_URL') + f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), current_page='for-teachers', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', auth=TRANSLATIONS.get_translations (g.lang, 'Auth'), current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html',\n auth=TRANSLATIONS.get_translations (g.lang, 'Auth'),\n current_page='my-profile',\n class_info={\n 'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + g.lang,\n 'name': Class ['name'],\n })\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.page_403 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations (g.lang, 'ui').get ('retrieve_class'))\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.page_404(TRANSLATIONS, current_user()['username'], g.lang,\n TRANSLATIONS.get_translations(g.lang, 'ui').get('no_such_class'))\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', auth=TRANSLATIONS.get_translations(g.lang, 'Auth'),\n ui=TRANSLATIONS.get_translations(g.lang, 'ui'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='for-teachers')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n\n Class = DATABASE.update_customizations_class(customizations)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.page_404 (TRANSLATIONS, current_user()['username'], g.lang, TRANSLATIONS.get_translations(g.lang, 'ui').get('invalid_class_link'))\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
| 3,586 | 238 |
gh_patches_debug_3800
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-7364
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[sample] Error running xgboost_training_cm.py
### Environment
* How did you deploy Kubeflow Pipelines (KFP)? GCP marketplace
<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->
* KFP version: 1.7.1
<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.
To find the version number, See version number shows on bottom of KFP UI left sidenav. -->
* KFP SDK version:
<!-- Specify the output of the following shell command: $pip3 list | grep kfp -->
```
kfp 1.8.11
kfp-pipeline-spec 0.1.13
kfp-server-api 1.8.1
```
### Steps to reproduce
<!--
Specify how to reproduce the problem.
This may include information such as: a description of the process, code snippets, log output, or screenshots.
-->
Follow the steps given in the following link to deploy `xgboost_training_cm.py`
https://github.com/kubeflow/pipelines/tree/master/samples/core/xgboost_training_cm
### Expected result
<!-- What should the correct behavior be? -->
There shows an error with `dataproc-create-cluster` (the second block)
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/ml/kfp_component/launcher/__main__.py", line 45, in <module>
main()
File "/ml/kfp_component/launcher/__main__.py", line 42, in main
launch(args.file_or_module, args.args)
File "/ml/kfp_component/launcher/launcher.py", line 45, in launch
return fire.Fire(module, command=args, name=module.__name__)
File "/usr/local/lib/python3.7/site-packages/fire/core.py", line 127, in Fire
component_trace = _Fire(component, args, context, name)
File "/usr/local/lib/python3.7/site-packages/fire/core.py", line 366, in _Fire
component, remaining_args)
File "/usr/local/lib/python3.7/site-packages/fire/core.py", line 542, in _CallCallable
result = fn(*varargs, **kwargs)
File "/ml/kfp_component/google/dataproc/_create_cluster.py", line 76, in create_cluster
client = DataprocClient()
File "/ml/kfp_component/google/common/_utils.py", line 170, in __init__
self._build_client()
TypeError: _build_client() takes 0 positional arguments but 1 was given
```
### Materials and Reference
<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->
N/A
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍. We prioritise the issues with the most 👍.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py`
Content:
```
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import abc
16 import logging
17 import re
18 import os
19 import time
20 from functools import wraps
21 from typing import Any, Callable, Optional, Tuple
22
23 def normalize_name(name,
24 valid_first_char_pattern='a-zA-Z',
25 valid_char_pattern='0-9a-zA-Z_',
26 invalid_char_placeholder='_',
27 prefix_placeholder='x_'):
28 """Normalize a name to a valid resource name.
29
30 Uses ``valid_first_char_pattern`` and ``valid_char_pattern`` regex pattern
31 to find invalid characters from ``name`` and replaces them with
32 ``invalid_char_placeholder`` or prefix the name with ``prefix_placeholder``.
33
34 Args:
35 name: The name to be normalized.
36 valid_first_char_pattern: The regex pattern for the first character.
37 valid_char_pattern: The regex pattern for all the characters in the name.
38 invalid_char_placeholder: The placeholder to replace invalid characters.
39 prefix_placeholder: The placeholder to prefix the name if the first char
40 is invalid.
41
42 Returns:
43 The normalized name. Unchanged if all characters are valid.
44 """
45 if not name:
46 return name
47 normalized_name = re.sub('[^{}]+'.format(valid_char_pattern),
48 invalid_char_placeholder, name)
49 if not re.match('[{}]'.format(valid_first_char_pattern),
50 normalized_name[0]):
51 normalized_name = prefix_placeholder + normalized_name
52 if name != normalized_name:
53 logging.info('Normalize name from "{}" to "{}".'.format(
54 name, normalized_name))
55 return normalized_name
56
57 def dump_file(path, content):
58 """Dumps string into local file.
59
60 Args:
61 path: the local path to the file.
62 content: the string content to dump.
63 """
64 directory = os.path.dirname(path)
65 if not os.path.exists(directory):
66 os.makedirs(directory)
67 elif os.path.exists(path):
68 logging.warning('The file {} will be overwritten.'.format(path))
69 with open(path, 'w') as f:
70 f.write(content)
71
72 def check_resource_changed(requested_resource,
73 existing_resource, property_names):
74 """Check if a resource has been changed.
75
76 The function checks requested resource with existing resource
77 by comparing specified property names. Check fails if any property
78 name in the list is in ``requested_resource`` but its value is
79 different with the value in ``existing_resource``.
80
81 Args:
82 requested_resource: the user requested resource paylod.
83 existing_resource: the existing resource payload from data storage.
84 property_names: a list of property names.
85
86 Return:
87 True if ``requested_resource`` has been changed.
88 """
89 for property_name in property_names:
90 if not property_name in requested_resource:
91 continue
92 existing_value = existing_resource.get(property_name, None)
93 if requested_resource[property_name] != existing_value:
94 return True
95 return False
96
97 def wait_operation_done(get_operation, wait_interval):
98 """Waits for an operation to be done.
99
100 Args:
101 get_operation: the name of the operation.
102 wait_interval: the wait interview between pulling job
103 status.
104
105 Returns:
106 The completed operation.
107 """
108 while True:
109 operation = get_operation()
110 operation_name = operation.get('name')
111 done = operation.get('done', False)
112 if not done:
113 logging.info('Operation {} is not done. Wait for {}s.'.format(
114 operation_name, wait_interval))
115 time.sleep(wait_interval)
116 continue
117 error = operation.get('error', None)
118 if error:
119 raise RuntimeError('Failed to complete operation {}: {} {}'.format(
120 operation_name,
121 error.get('code', 'Unknown code'),
122 error.get('message', 'Unknown message'),
123 ))
124 return operation
125
126
127 def with_retries(
128 func: Callable,
129 on_error: Optional[Callable[[], Any]] = None,
130 errors: Tuple[Exception, ...] = Exception,
131 number_of_retries: int = 5,
132 delay: float = 1,
133 ):
134 """Retry decorator.
135
136 The decorator catches `errors`, calls `on_error` and retries after waiting `delay` seconds.
137
138 Args:
139 number_of_retries (int): Total number of retries if error is raised.
140 delay (float): Number of seconds to wait between consecutive retries.
141 """
142
143 @wraps(func)
144 def wrapper(self, *args, **kwargs):
145 remaining_retries = number_of_retries
146 while remaining_retries:
147 try:
148 return func(self, *args, **kwargs)
149 except errors as e:
150 remaining_retries -= 1
151 if not remaining_retries:
152 raise
153
154 logging.warning(
155 'Caught {}. Retrying in {} seconds...'.format(
156 e.__class__.__name__, delay
157 )
158 )
159
160 time.sleep(delay)
161 if on_error:
162 on_error()
163
164 return wrapper
165
166
167 class ClientWithRetries:
168
169 def __init__(self):
170 self._build_client()
171 for name, member in self.__dict__.items():
172 if callable(member) and not name.startswith("_"):
173 self.__dict__[name] = with_retries(func=member, errors=(BrokenPipeError, IOError), on_error=self._build_client)
174
175 @abc.abstractmethod
176 def _build_client():
177 raise NotImplementedError()
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py b/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py
--- a/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py
+++ b/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py
@@ -173,5 +173,5 @@
self.__dict__[name] = with_retries(func=member, errors=(BrokenPipeError, IOError), on_error=self._build_client)
@abc.abstractmethod
- def _build_client():
+ def _build_client(self):
raise NotImplementedError()
|
{"golden_diff": "diff --git a/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py b/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py\n--- a/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py\n+++ b/components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py\n@@ -173,5 +173,5 @@\n self.__dict__[name] = with_retries(func=member, errors=(BrokenPipeError, IOError), on_error=self._build_client)\n \n @abc.abstractmethod\n- def _build_client():\n+ def _build_client(self):\n raise NotImplementedError()\n", "issue": "[sample] Error running xgboost_training_cm.py\n### Environment\r\n\r\n* How did you deploy Kubeflow Pipelines (KFP)? GCP marketplace\r\n<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->\r\n* KFP version: 1.7.1\r\n<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.\r\nTo find the version number, See version number shows on bottom of KFP UI left sidenav. -->\r\n* KFP SDK version: \r\n<!-- Specify the output of the following shell command: $pip3 list | grep kfp -->\r\n```\r\nkfp 1.8.11\r\nkfp-pipeline-spec 0.1.13\r\nkfp-server-api 1.8.1\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n<!--\r\nSpecify how to reproduce the problem. \r\nThis may include information such as: a description of the process, code snippets, log output, or screenshots.\r\n-->\r\nFollow the steps given in the following link to deploy `xgboost_training_cm.py` \r\nhttps://github.com/kubeflow/pipelines/tree/master/samples/core/xgboost_training_cm\r\n\r\n### Expected result\r\n\r\n<!-- What should the correct behavior be? -->\r\nThere shows an error with `dataproc-create-cluster` (the second block)\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/lib/python3.7/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/ml/kfp_component/launcher/__main__.py\", line 45, in <module>\r\n main()\r\n File \"/ml/kfp_component/launcher/__main__.py\", line 42, in main\r\n launch(args.file_or_module, args.args)\r\n File \"/ml/kfp_component/launcher/launcher.py\", line 45, in launch\r\n return fire.Fire(module, command=args, name=module.__name__)\r\n File \"/usr/local/lib/python3.7/site-packages/fire/core.py\", line 127, in Fire\r\n component_trace = _Fire(component, args, context, name)\r\n File \"/usr/local/lib/python3.7/site-packages/fire/core.py\", line 366, in _Fire\r\n component, remaining_args)\r\n File \"/usr/local/lib/python3.7/site-packages/fire/core.py\", line 542, in _CallCallable\r\n result = fn(*varargs, **kwargs)\r\n File \"/ml/kfp_component/google/dataproc/_create_cluster.py\", line 76, in create_cluster\r\n client = DataprocClient()\r\n File \"/ml/kfp_component/google/common/_utils.py\", line 170, in __init__\r\n self._build_client()\r\nTypeError: _build_client() takes 0 positional arguments but 1 was given\r\n```\r\n\r\n### Materials and Reference\r\n\r\n<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->\r\nN/A\r\n\r\n---\r\n\r\n<!-- Don't delete message below to encourage users to support your issue! -->\r\nImpacted by this bug? Give it a \ud83d\udc4d. We prioritise the issues with the most \ud83d\udc4d.\r\n\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nimport logging\nimport re\nimport os\nimport time\nfrom functools import wraps\nfrom typing import Any, Callable, Optional, Tuple\n\ndef normalize_name(name,\n valid_first_char_pattern='a-zA-Z',\n valid_char_pattern='0-9a-zA-Z_',\n invalid_char_placeholder='_',\n prefix_placeholder='x_'):\n \"\"\"Normalize a name to a valid resource name.\n\n Uses ``valid_first_char_pattern`` and ``valid_char_pattern`` regex pattern\n to find invalid characters from ``name`` and replaces them with \n ``invalid_char_placeholder`` or prefix the name with ``prefix_placeholder``.\n\n Args:\n name: The name to be normalized.\n valid_first_char_pattern: The regex pattern for the first character.\n valid_char_pattern: The regex pattern for all the characters in the name.\n invalid_char_placeholder: The placeholder to replace invalid characters.\n prefix_placeholder: The placeholder to prefix the name if the first char \n is invalid.\n \n Returns:\n The normalized name. Unchanged if all characters are valid.\n \"\"\"\n if not name:\n return name\n normalized_name = re.sub('[^{}]+'.format(valid_char_pattern), \n invalid_char_placeholder, name)\n if not re.match('[{}]'.format(valid_first_char_pattern), \n normalized_name[0]):\n normalized_name = prefix_placeholder + normalized_name\n if name != normalized_name:\n logging.info('Normalize name from \"{}\" to \"{}\".'.format(\n name, normalized_name))\n return normalized_name\n\ndef dump_file(path, content):\n \"\"\"Dumps string into local file.\n\n Args:\n path: the local path to the file.\n content: the string content to dump.\n \"\"\"\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n elif os.path.exists(path):\n logging.warning('The file {} will be overwritten.'.format(path))\n with open(path, 'w') as f:\n f.write(content)\n\ndef check_resource_changed(requested_resource, \n existing_resource, property_names):\n \"\"\"Check if a resource has been changed.\n\n The function checks requested resource with existing resource\n by comparing specified property names. Check fails if any property\n name in the list is in ``requested_resource`` but its value is\n different with the value in ``existing_resource``.\n\n Args:\n requested_resource: the user requested resource paylod.\n existing_resource: the existing resource payload from data storage.\n property_names: a list of property names.\n\n Return:\n True if ``requested_resource`` has been changed.\n \"\"\"\n for property_name in property_names:\n if not property_name in requested_resource:\n continue\n existing_value = existing_resource.get(property_name, None)\n if requested_resource[property_name] != existing_value:\n return True\n return False\n\ndef wait_operation_done(get_operation, wait_interval):\n \"\"\"Waits for an operation to be done.\n\n Args:\n get_operation: the name of the operation.\n wait_interval: the wait interview between pulling job\n status.\n\n Returns:\n The completed operation.\n \"\"\"\n while True:\n operation = get_operation()\n operation_name = operation.get('name')\n done = operation.get('done', False)\n if not done:\n logging.info('Operation {} is not done. Wait for {}s.'.format(\n operation_name, wait_interval))\n time.sleep(wait_interval)\n continue\n error = operation.get('error', None)\n if error:\n raise RuntimeError('Failed to complete operation {}: {} {}'.format(\n operation_name,\n error.get('code', 'Unknown code'),\n error.get('message', 'Unknown message'),\n ))\n return operation\n\n\ndef with_retries(\n func: Callable,\n on_error: Optional[Callable[[], Any]] = None,\n errors: Tuple[Exception, ...] = Exception,\n number_of_retries: int = 5,\n delay: float = 1,\n):\n \"\"\"Retry decorator.\n\n The decorator catches `errors`, calls `on_error` and retries after waiting `delay` seconds.\n\n Args:\n number_of_retries (int): Total number of retries if error is raised.\n delay (float): Number of seconds to wait between consecutive retries.\n \"\"\"\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n remaining_retries = number_of_retries\n while remaining_retries:\n try:\n return func(self, *args, **kwargs)\n except errors as e:\n remaining_retries -= 1\n if not remaining_retries:\n raise\n\n logging.warning(\n 'Caught {}. Retrying in {} seconds...'.format(\n e.__class__.__name__, delay\n )\n )\n\n time.sleep(delay)\n if on_error:\n on_error()\n\n return wrapper\n\n\nclass ClientWithRetries:\n\n def __init__(self):\n self._build_client()\n for name, member in self.__dict__.items():\n if callable(member) and not name.startswith(\"_\"):\n self.__dict__[name] = with_retries(func=member, errors=(BrokenPipeError, IOError), on_error=self._build_client)\n\n @abc.abstractmethod\n def _build_client():\n raise NotImplementedError()\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py"}], "after_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nimport logging\nimport re\nimport os\nimport time\nfrom functools import wraps\nfrom typing import Any, Callable, Optional, Tuple\n\ndef normalize_name(name,\n valid_first_char_pattern='a-zA-Z',\n valid_char_pattern='0-9a-zA-Z_',\n invalid_char_placeholder='_',\n prefix_placeholder='x_'):\n \"\"\"Normalize a name to a valid resource name.\n\n Uses ``valid_first_char_pattern`` and ``valid_char_pattern`` regex pattern\n to find invalid characters from ``name`` and replaces them with \n ``invalid_char_placeholder`` or prefix the name with ``prefix_placeholder``.\n\n Args:\n name: The name to be normalized.\n valid_first_char_pattern: The regex pattern for the first character.\n valid_char_pattern: The regex pattern for all the characters in the name.\n invalid_char_placeholder: The placeholder to replace invalid characters.\n prefix_placeholder: The placeholder to prefix the name if the first char \n is invalid.\n \n Returns:\n The normalized name. Unchanged if all characters are valid.\n \"\"\"\n if not name:\n return name\n normalized_name = re.sub('[^{}]+'.format(valid_char_pattern), \n invalid_char_placeholder, name)\n if not re.match('[{}]'.format(valid_first_char_pattern), \n normalized_name[0]):\n normalized_name = prefix_placeholder + normalized_name\n if name != normalized_name:\n logging.info('Normalize name from \"{}\" to \"{}\".'.format(\n name, normalized_name))\n return normalized_name\n\ndef dump_file(path, content):\n \"\"\"Dumps string into local file.\n\n Args:\n path: the local path to the file.\n content: the string content to dump.\n \"\"\"\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n elif os.path.exists(path):\n logging.warning('The file {} will be overwritten.'.format(path))\n with open(path, 'w') as f:\n f.write(content)\n\ndef check_resource_changed(requested_resource, \n existing_resource, property_names):\n \"\"\"Check if a resource has been changed.\n\n The function checks requested resource with existing resource\n by comparing specified property names. Check fails if any property\n name in the list is in ``requested_resource`` but its value is\n different with the value in ``existing_resource``.\n\n Args:\n requested_resource: the user requested resource paylod.\n existing_resource: the existing resource payload from data storage.\n property_names: a list of property names.\n\n Return:\n True if ``requested_resource`` has been changed.\n \"\"\"\n for property_name in property_names:\n if not property_name in requested_resource:\n continue\n existing_value = existing_resource.get(property_name, None)\n if requested_resource[property_name] != existing_value:\n return True\n return False\n\ndef wait_operation_done(get_operation, wait_interval):\n \"\"\"Waits for an operation to be done.\n\n Args:\n get_operation: the name of the operation.\n wait_interval: the wait interview between pulling job\n status.\n\n Returns:\n The completed operation.\n \"\"\"\n while True:\n operation = get_operation()\n operation_name = operation.get('name')\n done = operation.get('done', False)\n if not done:\n logging.info('Operation {} is not done. Wait for {}s.'.format(\n operation_name, wait_interval))\n time.sleep(wait_interval)\n continue\n error = operation.get('error', None)\n if error:\n raise RuntimeError('Failed to complete operation {}: {} {}'.format(\n operation_name,\n error.get('code', 'Unknown code'),\n error.get('message', 'Unknown message'),\n ))\n return operation\n\n\ndef with_retries(\n func: Callable,\n on_error: Optional[Callable[[], Any]] = None,\n errors: Tuple[Exception, ...] = Exception,\n number_of_retries: int = 5,\n delay: float = 1,\n):\n \"\"\"Retry decorator.\n\n The decorator catches `errors`, calls `on_error` and retries after waiting `delay` seconds.\n\n Args:\n number_of_retries (int): Total number of retries if error is raised.\n delay (float): Number of seconds to wait between consecutive retries.\n \"\"\"\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n remaining_retries = number_of_retries\n while remaining_retries:\n try:\n return func(self, *args, **kwargs)\n except errors as e:\n remaining_retries -= 1\n if not remaining_retries:\n raise\n\n logging.warning(\n 'Caught {}. Retrying in {} seconds...'.format(\n e.__class__.__name__, delay\n )\n )\n\n time.sleep(delay)\n if on_error:\n on_error()\n\n return wrapper\n\n\nclass ClientWithRetries:\n\n def __init__(self):\n self._build_client()\n for name, member in self.__dict__.items():\n if callable(member) and not name.startswith(\"_\"):\n self.__dict__[name] = with_retries(func=member, errors=(BrokenPipeError, IOError), on_error=self._build_client)\n\n @abc.abstractmethod\n def _build_client(self):\n raise NotImplementedError()\n", "path": "components/gcp/container/component_sdk/python/kfp_component/google/common/_utils.py"}]}
| 2,695 | 142 |
gh_patches_debug_32215
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1633
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RSA] Add functions to recover (p, q) given (n, e, d)
Right now we require (p, q, d, dmp1, dmq1, iqmp, e, n). We provide functions to generate the CRT coefficients, but they assume the user has p & q. To support other valid key material sources we need functions that recover p & q given (n, e, d).
The preferred algorithm to perform this task can be found in Appendix C of [SP-800-56B](http://csrc.nist.gov/publications/nistpubs/800-56B/sp800-56B.pdf).
[RSA] Add functions to recover (p, q) given (n, e, d)
Right now we require (p, q, d, dmp1, dmq1, iqmp, e, n). We provide functions to generate the CRT coefficients, but they assume the user has p & q. To support other valid key material sources we need functions that recover p & q given (n, e, d).
The preferred algorithm to perform this task can be found in Appendix C of [SP-800-56B](http://csrc.nist.gov/publications/nistpubs/800-56B/sp800-56B.pdf).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/asymmetric/rsa.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import six
8
9 from cryptography import utils
10 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
11 from cryptography.hazmat.backends.interfaces import RSABackend
12
13
14 def generate_private_key(public_exponent, key_size, backend):
15 if not isinstance(backend, RSABackend):
16 raise UnsupportedAlgorithm(
17 "Backend object does not implement RSABackend.",
18 _Reasons.BACKEND_MISSING_INTERFACE
19 )
20
21 _verify_rsa_parameters(public_exponent, key_size)
22 return backend.generate_rsa_private_key(public_exponent, key_size)
23
24
25 def _verify_rsa_parameters(public_exponent, key_size):
26 if public_exponent < 3:
27 raise ValueError("public_exponent must be >= 3.")
28
29 if public_exponent & 1 == 0:
30 raise ValueError("public_exponent must be odd.")
31
32 if key_size < 512:
33 raise ValueError("key_size must be at least 512-bits.")
34
35
36 def _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp,
37 public_exponent, modulus):
38 if modulus < 3:
39 raise ValueError("modulus must be >= 3.")
40
41 if p >= modulus:
42 raise ValueError("p must be < modulus.")
43
44 if q >= modulus:
45 raise ValueError("q must be < modulus.")
46
47 if dmp1 >= modulus:
48 raise ValueError("dmp1 must be < modulus.")
49
50 if dmq1 >= modulus:
51 raise ValueError("dmq1 must be < modulus.")
52
53 if iqmp >= modulus:
54 raise ValueError("iqmp must be < modulus.")
55
56 if private_exponent >= modulus:
57 raise ValueError("private_exponent must be < modulus.")
58
59 if public_exponent < 3 or public_exponent >= modulus:
60 raise ValueError("public_exponent must be >= 3 and < modulus.")
61
62 if public_exponent & 1 == 0:
63 raise ValueError("public_exponent must be odd.")
64
65 if dmp1 & 1 == 0:
66 raise ValueError("dmp1 must be odd.")
67
68 if dmq1 & 1 == 0:
69 raise ValueError("dmq1 must be odd.")
70
71 if p * q != modulus:
72 raise ValueError("p*q must equal modulus.")
73
74
75 def _check_public_key_components(e, n):
76 if n < 3:
77 raise ValueError("n must be >= 3.")
78
79 if e < 3 or e >= n:
80 raise ValueError("e must be >= 3 and < n.")
81
82 if e & 1 == 0:
83 raise ValueError("e must be odd.")
84
85
86 def _modinv(e, m):
87 """
88 Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1
89 """
90 x1, y1, x2, y2 = 1, 0, 0, 1
91 a, b = e, m
92 while b > 0:
93 q, r = divmod(a, b)
94 xn, yn = x1 - q * x2, y1 - q * y2
95 a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn
96 return x1 % m
97
98
99 def rsa_crt_iqmp(p, q):
100 """
101 Compute the CRT (q ** -1) % p value from RSA primes p and q.
102 """
103 return _modinv(q, p)
104
105
106 def rsa_crt_dmp1(private_exponent, p):
107 """
108 Compute the CRT private_exponent % (p - 1) value from the RSA
109 private_exponent and p.
110 """
111 return private_exponent % (p - 1)
112
113
114 def rsa_crt_dmq1(private_exponent, q):
115 """
116 Compute the CRT private_exponent % (q - 1) value from the RSA
117 private_exponent and q.
118 """
119 return private_exponent % (q - 1)
120
121
122 class RSAPrivateNumbers(object):
123 def __init__(self, p, q, d, dmp1, dmq1, iqmp,
124 public_numbers):
125 if (
126 not isinstance(p, six.integer_types) or
127 not isinstance(q, six.integer_types) or
128 not isinstance(d, six.integer_types) or
129 not isinstance(dmp1, six.integer_types) or
130 not isinstance(dmq1, six.integer_types) or
131 not isinstance(iqmp, six.integer_types)
132 ):
133 raise TypeError(
134 "RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must"
135 " all be an integers."
136 )
137
138 if not isinstance(public_numbers, RSAPublicNumbers):
139 raise TypeError(
140 "RSAPrivateNumbers public_numbers must be an RSAPublicNumbers"
141 " instance."
142 )
143
144 self._p = p
145 self._q = q
146 self._d = d
147 self._dmp1 = dmp1
148 self._dmq1 = dmq1
149 self._iqmp = iqmp
150 self._public_numbers = public_numbers
151
152 p = utils.read_only_property("_p")
153 q = utils.read_only_property("_q")
154 d = utils.read_only_property("_d")
155 dmp1 = utils.read_only_property("_dmp1")
156 dmq1 = utils.read_only_property("_dmq1")
157 iqmp = utils.read_only_property("_iqmp")
158 public_numbers = utils.read_only_property("_public_numbers")
159
160 def private_key(self, backend):
161 return backend.load_rsa_private_numbers(self)
162
163 def __eq__(self, other):
164 if not isinstance(other, RSAPrivateNumbers):
165 return NotImplemented
166
167 return (
168 self.p == other.p and
169 self.q == other.q and
170 self.d == other.d and
171 self.dmp1 == other.dmp1 and
172 self.dmq1 == other.dmq1 and
173 self.iqmp == other.iqmp and
174 self.public_numbers == other.public_numbers
175 )
176
177 def __ne__(self, other):
178 return not self == other
179
180
181 class RSAPublicNumbers(object):
182 def __init__(self, e, n):
183 if (
184 not isinstance(e, six.integer_types) or
185 not isinstance(n, six.integer_types)
186 ):
187 raise TypeError("RSAPublicNumbers arguments must be integers.")
188
189 self._e = e
190 self._n = n
191
192 e = utils.read_only_property("_e")
193 n = utils.read_only_property("_n")
194
195 def public_key(self, backend):
196 return backend.load_rsa_public_numbers(self)
197
198 def __repr__(self):
199 return "<RSAPublicNumbers(e={0}, n={1})>".format(self._e, self._n)
200
201 def __eq__(self, other):
202 if not isinstance(other, RSAPublicNumbers):
203 return NotImplemented
204
205 return self.e == other.e and self.n == other.n
206
207 def __ne__(self, other):
208 return not self == other
209
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/hazmat/primitives/asymmetric/rsa.py b/src/cryptography/hazmat/primitives/asymmetric/rsa.py
--- a/src/cryptography/hazmat/primitives/asymmetric/rsa.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/rsa.py
@@ -4,6 +4,8 @@
from __future__ import absolute_import, division, print_function
+from fractions import gcd
+
import six
from cryptography import utils
@@ -119,6 +121,55 @@
return private_exponent % (q - 1)
+# Controls the number of iterations rsa_recover_prime_factors will perform
+# to obtain the prime factors. Each iteration increments by 2 so the actual
+# maximum attempts is half this number.
+_MAX_RECOVERY_ATTEMPTS = 1000
+
+
+def rsa_recover_prime_factors(n, e, d):
+ """
+ Compute factors p and q from the private exponent d. We assume that n has
+ no more than two factors. This function is adapted from code in PyCrypto.
+ """
+ # See 8.2.2(i) in Handbook of Applied Cryptography.
+ ktot = d * e - 1
+ # The quantity d*e-1 is a multiple of phi(n), even,
+ # and can be represented as t*2^s.
+ t = ktot
+ while t % 2 == 0:
+ t = t // 2
+ # Cycle through all multiplicative inverses in Zn.
+ # The algorithm is non-deterministic, but there is a 50% chance
+ # any candidate a leads to successful factoring.
+ # See "Digitalized Signatures and Public Key Functions as Intractable
+ # as Factorization", M. Rabin, 1979
+ spotted = False
+ a = 2
+ while not spotted and a < _MAX_RECOVERY_ATTEMPTS:
+ k = t
+ # Cycle through all values a^{t*2^i}=a^k
+ while k < ktot:
+ cand = pow(a, k, n)
+ # Check if a^k is a non-trivial root of unity (mod n)
+ if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
+ # We have found a number such that (cand-1)(cand+1)=0 (mod n).
+ # Either of the terms divides n.
+ p = gcd(cand + 1, n)
+ spotted = True
+ break
+ k *= 2
+ # This value was not any good... let's try another!
+ a += 2
+ if not spotted:
+ raise ValueError("Unable to compute factors p and q from exponent d.")
+ # Found !
+ q, r = divmod(n, p)
+ assert r == 0
+
+ return (p, q)
+
+
class RSAPrivateNumbers(object):
def __init__(self, p, q, d, dmp1, dmq1, iqmp,
public_numbers):
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/rsa.py b/src/cryptography/hazmat/primitives/asymmetric/rsa.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/rsa.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/rsa.py\n@@ -4,6 +4,8 @@\n \n from __future__ import absolute_import, division, print_function\n \n+from fractions import gcd\n+\n import six\n \n from cryptography import utils\n@@ -119,6 +121,55 @@\n return private_exponent % (q - 1)\n \n \n+# Controls the number of iterations rsa_recover_prime_factors will perform\n+# to obtain the prime factors. Each iteration increments by 2 so the actual\n+# maximum attempts is half this number.\n+_MAX_RECOVERY_ATTEMPTS = 1000\n+\n+\n+def rsa_recover_prime_factors(n, e, d):\n+ \"\"\"\n+ Compute factors p and q from the private exponent d. We assume that n has\n+ no more than two factors. This function is adapted from code in PyCrypto.\n+ \"\"\"\n+ # See 8.2.2(i) in Handbook of Applied Cryptography.\n+ ktot = d * e - 1\n+ # The quantity d*e-1 is a multiple of phi(n), even,\n+ # and can be represented as t*2^s.\n+ t = ktot\n+ while t % 2 == 0:\n+ t = t // 2\n+ # Cycle through all multiplicative inverses in Zn.\n+ # The algorithm is non-deterministic, but there is a 50% chance\n+ # any candidate a leads to successful factoring.\n+ # See \"Digitalized Signatures and Public Key Functions as Intractable\n+ # as Factorization\", M. Rabin, 1979\n+ spotted = False\n+ a = 2\n+ while not spotted and a < _MAX_RECOVERY_ATTEMPTS:\n+ k = t\n+ # Cycle through all values a^{t*2^i}=a^k\n+ while k < ktot:\n+ cand = pow(a, k, n)\n+ # Check if a^k is a non-trivial root of unity (mod n)\n+ if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:\n+ # We have found a number such that (cand-1)(cand+1)=0 (mod n).\n+ # Either of the terms divides n.\n+ p = gcd(cand + 1, n)\n+ spotted = True\n+ break\n+ k *= 2\n+ # This value was not any good... let's try another!\n+ a += 2\n+ if not spotted:\n+ raise ValueError(\"Unable to compute factors p and q from exponent d.\")\n+ # Found !\n+ q, r = divmod(n, p)\n+ assert r == 0\n+\n+ return (p, q)\n+\n+\n class RSAPrivateNumbers(object):\n def __init__(self, p, q, d, dmp1, dmq1, iqmp,\n public_numbers):\n", "issue": "[RSA] Add functions to recover (p, q) given (n, e, d)\nRight now we require (p, q, d, dmp1, dmq1, iqmp, e, n). We provide functions to generate the CRT coefficients, but they assume the user has p & q. To support other valid key material sources we need functions that recover p & q given (n, e, d).\n\nThe preferred algorithm to perform this task can be found in Appendix C of [SP-800-56B](http://csrc.nist.gov/publications/nistpubs/800-56B/sp800-56B.pdf).\n\n[RSA] Add functions to recover (p, q) given (n, e, d)\nRight now we require (p, q, d, dmp1, dmq1, iqmp, e, n). We provide functions to generate the CRT coefficients, but they assume the user has p & q. To support other valid key material sources we need functions that recover p & q given (n, e, d).\n\nThe preferred algorithm to perform this task can be found in Appendix C of [SP-800-56B](http://csrc.nist.gov/publications/nistpubs/800-56B/sp800-56B.pdf).\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.backends.interfaces import RSABackend\n\n\ndef generate_private_key(public_exponent, key_size, backend):\n if not isinstance(backend, RSABackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement RSABackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n _verify_rsa_parameters(public_exponent, key_size)\n return backend.generate_rsa_private_key(public_exponent, key_size)\n\n\ndef _verify_rsa_parameters(public_exponent, key_size):\n if public_exponent < 3:\n raise ValueError(\"public_exponent must be >= 3.\")\n\n if public_exponent & 1 == 0:\n raise ValueError(\"public_exponent must be odd.\")\n\n if key_size < 512:\n raise ValueError(\"key_size must be at least 512-bits.\")\n\n\ndef _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp,\n public_exponent, modulus):\n if modulus < 3:\n raise ValueError(\"modulus must be >= 3.\")\n\n if p >= modulus:\n raise ValueError(\"p must be < modulus.\")\n\n if q >= modulus:\n raise ValueError(\"q must be < modulus.\")\n\n if dmp1 >= modulus:\n raise ValueError(\"dmp1 must be < modulus.\")\n\n if dmq1 >= modulus:\n raise ValueError(\"dmq1 must be < modulus.\")\n\n if iqmp >= modulus:\n raise ValueError(\"iqmp must be < modulus.\")\n\n if private_exponent >= modulus:\n raise ValueError(\"private_exponent must be < modulus.\")\n\n if public_exponent < 3 or public_exponent >= modulus:\n raise ValueError(\"public_exponent must be >= 3 and < modulus.\")\n\n if public_exponent & 1 == 0:\n raise ValueError(\"public_exponent must be odd.\")\n\n if dmp1 & 1 == 0:\n raise ValueError(\"dmp1 must be odd.\")\n\n if dmq1 & 1 == 0:\n raise ValueError(\"dmq1 must be odd.\")\n\n if p * q != modulus:\n raise ValueError(\"p*q must equal modulus.\")\n\n\ndef _check_public_key_components(e, n):\n if n < 3:\n raise ValueError(\"n must be >= 3.\")\n\n if e < 3 or e >= n:\n raise ValueError(\"e must be >= 3 and < n.\")\n\n if e & 1 == 0:\n raise ValueError(\"e must be odd.\")\n\n\ndef _modinv(e, m):\n \"\"\"\n Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1\n \"\"\"\n x1, y1, x2, y2 = 1, 0, 0, 1\n a, b = e, m\n while b > 0:\n q, r = divmod(a, b)\n xn, yn = x1 - q * x2, y1 - q * y2\n a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn\n return x1 % m\n\n\ndef rsa_crt_iqmp(p, q):\n \"\"\"\n Compute the CRT (q ** -1) % p value from RSA primes p and q.\n \"\"\"\n return _modinv(q, p)\n\n\ndef rsa_crt_dmp1(private_exponent, p):\n \"\"\"\n Compute the CRT private_exponent % (p - 1) value from the RSA\n private_exponent and p.\n \"\"\"\n return private_exponent % (p - 1)\n\n\ndef rsa_crt_dmq1(private_exponent, q):\n \"\"\"\n Compute the CRT private_exponent % (q - 1) value from the RSA\n private_exponent and q.\n \"\"\"\n return private_exponent % (q - 1)\n\n\nclass RSAPrivateNumbers(object):\n def __init__(self, p, q, d, dmp1, dmq1, iqmp,\n public_numbers):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(q, six.integer_types) or\n not isinstance(d, six.integer_types) or\n not isinstance(dmp1, six.integer_types) or\n not isinstance(dmq1, six.integer_types) or\n not isinstance(iqmp, six.integer_types)\n ):\n raise TypeError(\n \"RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must\"\n \" all be an integers.\"\n )\n\n if not isinstance(public_numbers, RSAPublicNumbers):\n raise TypeError(\n \"RSAPrivateNumbers public_numbers must be an RSAPublicNumbers\"\n \" instance.\"\n )\n\n self._p = p\n self._q = q\n self._d = d\n self._dmp1 = dmp1\n self._dmq1 = dmq1\n self._iqmp = iqmp\n self._public_numbers = public_numbers\n\n p = utils.read_only_property(\"_p\")\n q = utils.read_only_property(\"_q\")\n d = utils.read_only_property(\"_d\")\n dmp1 = utils.read_only_property(\"_dmp1\")\n dmq1 = utils.read_only_property(\"_dmq1\")\n iqmp = utils.read_only_property(\"_iqmp\")\n public_numbers = utils.read_only_property(\"_public_numbers\")\n\n def private_key(self, backend):\n return backend.load_rsa_private_numbers(self)\n\n def __eq__(self, other):\n if not isinstance(other, RSAPrivateNumbers):\n return NotImplemented\n\n return (\n self.p == other.p and\n self.q == other.q and\n self.d == other.d and\n self.dmp1 == other.dmp1 and\n self.dmq1 == other.dmq1 and\n self.iqmp == other.iqmp and\n self.public_numbers == other.public_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n\nclass RSAPublicNumbers(object):\n def __init__(self, e, n):\n if (\n not isinstance(e, six.integer_types) or\n not isinstance(n, six.integer_types)\n ):\n raise TypeError(\"RSAPublicNumbers arguments must be integers.\")\n\n self._e = e\n self._n = n\n\n e = utils.read_only_property(\"_e\")\n n = utils.read_only_property(\"_n\")\n\n def public_key(self, backend):\n return backend.load_rsa_public_numbers(self)\n\n def __repr__(self):\n return \"<RSAPublicNumbers(e={0}, n={1})>\".format(self._e, self._n)\n\n def __eq__(self, other):\n if not isinstance(other, RSAPublicNumbers):\n return NotImplemented\n\n return self.e == other.e and self.n == other.n\n\n def __ne__(self, other):\n return not self == other\n", "path": "src/cryptography/hazmat/primitives/asymmetric/rsa.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom fractions import gcd\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.backends.interfaces import RSABackend\n\n\ndef generate_private_key(public_exponent, key_size, backend):\n if not isinstance(backend, RSABackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement RSABackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n _verify_rsa_parameters(public_exponent, key_size)\n return backend.generate_rsa_private_key(public_exponent, key_size)\n\n\ndef _verify_rsa_parameters(public_exponent, key_size):\n if public_exponent < 3:\n raise ValueError(\"public_exponent must be >= 3.\")\n\n if public_exponent & 1 == 0:\n raise ValueError(\"public_exponent must be odd.\")\n\n if key_size < 512:\n raise ValueError(\"key_size must be at least 512-bits.\")\n\n\ndef _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp,\n public_exponent, modulus):\n if modulus < 3:\n raise ValueError(\"modulus must be >= 3.\")\n\n if p >= modulus:\n raise ValueError(\"p must be < modulus.\")\n\n if q >= modulus:\n raise ValueError(\"q must be < modulus.\")\n\n if dmp1 >= modulus:\n raise ValueError(\"dmp1 must be < modulus.\")\n\n if dmq1 >= modulus:\n raise ValueError(\"dmq1 must be < modulus.\")\n\n if iqmp >= modulus:\n raise ValueError(\"iqmp must be < modulus.\")\n\n if private_exponent >= modulus:\n raise ValueError(\"private_exponent must be < modulus.\")\n\n if public_exponent < 3 or public_exponent >= modulus:\n raise ValueError(\"public_exponent must be >= 3 and < modulus.\")\n\n if public_exponent & 1 == 0:\n raise ValueError(\"public_exponent must be odd.\")\n\n if dmp1 & 1 == 0:\n raise ValueError(\"dmp1 must be odd.\")\n\n if dmq1 & 1 == 0:\n raise ValueError(\"dmq1 must be odd.\")\n\n if p * q != modulus:\n raise ValueError(\"p*q must equal modulus.\")\n\n\ndef _check_public_key_components(e, n):\n if n < 3:\n raise ValueError(\"n must be >= 3.\")\n\n if e < 3 or e >= n:\n raise ValueError(\"e must be >= 3 and < n.\")\n\n if e & 1 == 0:\n raise ValueError(\"e must be odd.\")\n\n\ndef _modinv(e, m):\n \"\"\"\n Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1\n \"\"\"\n x1, y1, x2, y2 = 1, 0, 0, 1\n a, b = e, m\n while b > 0:\n q, r = divmod(a, b)\n xn, yn = x1 - q * x2, y1 - q * y2\n a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn\n return x1 % m\n\n\ndef rsa_crt_iqmp(p, q):\n \"\"\"\n Compute the CRT (q ** -1) % p value from RSA primes p and q.\n \"\"\"\n return _modinv(q, p)\n\n\ndef rsa_crt_dmp1(private_exponent, p):\n \"\"\"\n Compute the CRT private_exponent % (p - 1) value from the RSA\n private_exponent and p.\n \"\"\"\n return private_exponent % (p - 1)\n\n\ndef rsa_crt_dmq1(private_exponent, q):\n \"\"\"\n Compute the CRT private_exponent % (q - 1) value from the RSA\n private_exponent and q.\n \"\"\"\n return private_exponent % (q - 1)\n\n\n# Controls the number of iterations rsa_recover_prime_factors will perform\n# to obtain the prime factors. Each iteration increments by 2 so the actual\n# maximum attempts is half this number.\n_MAX_RECOVERY_ATTEMPTS = 1000\n\n\ndef rsa_recover_prime_factors(n, e, d):\n \"\"\"\n Compute factors p and q from the private exponent d. We assume that n has\n no more than two factors. This function is adapted from code in PyCrypto.\n \"\"\"\n # See 8.2.2(i) in Handbook of Applied Cryptography.\n ktot = d * e - 1\n # The quantity d*e-1 is a multiple of phi(n), even,\n # and can be represented as t*2^s.\n t = ktot\n while t % 2 == 0:\n t = t // 2\n # Cycle through all multiplicative inverses in Zn.\n # The algorithm is non-deterministic, but there is a 50% chance\n # any candidate a leads to successful factoring.\n # See \"Digitalized Signatures and Public Key Functions as Intractable\n # as Factorization\", M. Rabin, 1979\n spotted = False\n a = 2\n while not spotted and a < _MAX_RECOVERY_ATTEMPTS:\n k = t\n # Cycle through all values a^{t*2^i}=a^k\n while k < ktot:\n cand = pow(a, k, n)\n # Check if a^k is a non-trivial root of unity (mod n)\n if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:\n # We have found a number such that (cand-1)(cand+1)=0 (mod n).\n # Either of the terms divides n.\n p = gcd(cand + 1, n)\n spotted = True\n break\n k *= 2\n # This value was not any good... let's try another!\n a += 2\n if not spotted:\n raise ValueError(\"Unable to compute factors p and q from exponent d.\")\n # Found !\n q, r = divmod(n, p)\n assert r == 0\n\n return (p, q)\n\n\nclass RSAPrivateNumbers(object):\n def __init__(self, p, q, d, dmp1, dmq1, iqmp,\n public_numbers):\n if (\n not isinstance(p, six.integer_types) or\n not isinstance(q, six.integer_types) or\n not isinstance(d, six.integer_types) or\n not isinstance(dmp1, six.integer_types) or\n not isinstance(dmq1, six.integer_types) or\n not isinstance(iqmp, six.integer_types)\n ):\n raise TypeError(\n \"RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must\"\n \" all be an integers.\"\n )\n\n if not isinstance(public_numbers, RSAPublicNumbers):\n raise TypeError(\n \"RSAPrivateNumbers public_numbers must be an RSAPublicNumbers\"\n \" instance.\"\n )\n\n self._p = p\n self._q = q\n self._d = d\n self._dmp1 = dmp1\n self._dmq1 = dmq1\n self._iqmp = iqmp\n self._public_numbers = public_numbers\n\n p = utils.read_only_property(\"_p\")\n q = utils.read_only_property(\"_q\")\n d = utils.read_only_property(\"_d\")\n dmp1 = utils.read_only_property(\"_dmp1\")\n dmq1 = utils.read_only_property(\"_dmq1\")\n iqmp = utils.read_only_property(\"_iqmp\")\n public_numbers = utils.read_only_property(\"_public_numbers\")\n\n def private_key(self, backend):\n return backend.load_rsa_private_numbers(self)\n\n def __eq__(self, other):\n if not isinstance(other, RSAPrivateNumbers):\n return NotImplemented\n\n return (\n self.p == other.p and\n self.q == other.q and\n self.d == other.d and\n self.dmp1 == other.dmp1 and\n self.dmq1 == other.dmq1 and\n self.iqmp == other.iqmp and\n self.public_numbers == other.public_numbers\n )\n\n def __ne__(self, other):\n return not self == other\n\n\nclass RSAPublicNumbers(object):\n def __init__(self, e, n):\n if (\n not isinstance(e, six.integer_types) or\n not isinstance(n, six.integer_types)\n ):\n raise TypeError(\"RSAPublicNumbers arguments must be integers.\")\n\n self._e = e\n self._n = n\n\n e = utils.read_only_property(\"_e\")\n n = utils.read_only_property(\"_n\")\n\n def public_key(self, backend):\n return backend.load_rsa_public_numbers(self)\n\n def __repr__(self):\n return \"<RSAPublicNumbers(e={0}, n={1})>\".format(self._e, self._n)\n\n def __eq__(self, other):\n if not isinstance(other, RSAPublicNumbers):\n return NotImplemented\n\n return self.e == other.e and self.n == other.n\n\n def __ne__(self, other):\n return not self == other\n", "path": "src/cryptography/hazmat/primitives/asymmetric/rsa.py"}]}
| 2,685 | 720 |
gh_patches_debug_4149
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-14565
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable `ENFORCE_GLOBAL_UNIQUE` by default
### NetBox version
v3.6.6
### Feature type
Change to existing functionality
### Proposed functionality
Change the default value of the [`ENFORCE_GLOBAL_UNIQUE`](https://docs.netbox.dev/en/stable/configuration/miscellaneous/#enforce_global_unique) configuration parameter from false to true. This will enforce unique IP addressing within the global (non-VRF) table by default.
This change would affect only deployments without any configuration already defined.
Please use a :+1: or :-1: response below to indicate your support for/opposition to this proposed change.
### Use case
Enforcing unique IP space by default is more restrictive and thus safer than the current default. Obviously, the parameter can still be toggled as needed.
This change would also be consistent with the `enforce_unique` field on the VRF model, which defaults to True.
### Database changes
_No response_
### External dependencies
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/config/parameters.py`
Content:
```
1 from django import forms
2 from django.contrib.postgres.forms import SimpleArrayField
3 from django.utils.translation import gettext_lazy as _
4
5
6 class ConfigParam:
7
8 def __init__(self, name, label, default, description='', field=None, field_kwargs=None):
9 self.name = name
10 self.label = label
11 self.default = default
12 self.field = field or forms.CharField
13 self.description = description
14 self.field_kwargs = field_kwargs or {}
15
16
17 PARAMS = (
18
19 # Banners
20 ConfigParam(
21 name='BANNER_LOGIN',
22 label=_('Login banner'),
23 default='',
24 description=_("Additional content to display on the login page"),
25 field_kwargs={
26 'widget': forms.Textarea(
27 attrs={'class': 'vLargeTextField'}
28 ),
29 },
30 ),
31 ConfigParam(
32 name='BANNER_MAINTENANCE',
33 label=_('Maintenance banner'),
34 default='NetBox is currently in maintenance mode. Functionality may be limited.',
35 description=_('Additional content to display when in maintenance mode'),
36 field_kwargs={
37 'widget': forms.Textarea(
38 attrs={'class': 'vLargeTextField'}
39 ),
40 },
41 ),
42 ConfigParam(
43 name='BANNER_TOP',
44 label=_('Top banner'),
45 default='',
46 description=_("Additional content to display at the top of every page"),
47 field_kwargs={
48 'widget': forms.Textarea(
49 attrs={'class': 'vLargeTextField'}
50 ),
51 },
52 ),
53 ConfigParam(
54 name='BANNER_BOTTOM',
55 label=_('Bottom banner'),
56 default='',
57 description=_("Additional content to display at the bottom of every page"),
58 field_kwargs={
59 'widget': forms.Textarea(
60 attrs={'class': 'vLargeTextField'}
61 ),
62 },
63 ),
64
65 # IPAM
66 ConfigParam(
67 name='ENFORCE_GLOBAL_UNIQUE',
68 label=_('Globally unique IP space'),
69 default=False,
70 description=_("Enforce unique IP addressing within the global table"),
71 field=forms.BooleanField
72 ),
73 ConfigParam(
74 name='PREFER_IPV4',
75 label=_('Prefer IPv4'),
76 default=False,
77 description=_("Prefer IPv4 addresses over IPv6"),
78 field=forms.BooleanField
79 ),
80
81 # Racks
82 ConfigParam(
83 name='RACK_ELEVATION_DEFAULT_UNIT_HEIGHT',
84 label=_('Rack unit height'),
85 default=22,
86 description=_("Default unit height for rendered rack elevations"),
87 field=forms.IntegerField
88 ),
89 ConfigParam(
90 name='RACK_ELEVATION_DEFAULT_UNIT_WIDTH',
91 label=_('Rack unit width'),
92 default=220,
93 description=_("Default unit width for rendered rack elevations"),
94 field=forms.IntegerField
95 ),
96
97 # Power
98 ConfigParam(
99 name='POWERFEED_DEFAULT_VOLTAGE',
100 label=_('Powerfeed voltage'),
101 default=120,
102 description=_("Default voltage for powerfeeds"),
103 field=forms.IntegerField
104 ),
105 ConfigParam(
106 name='POWERFEED_DEFAULT_AMPERAGE',
107 label=_('Powerfeed amperage'),
108 default=15,
109 description=_("Default amperage for powerfeeds"),
110 field=forms.IntegerField
111 ),
112 ConfigParam(
113 name='POWERFEED_DEFAULT_MAX_UTILIZATION',
114 label=_('Powerfeed max utilization'),
115 default=80,
116 description=_("Default max utilization for powerfeeds"),
117 field=forms.IntegerField
118 ),
119
120 # Security
121 ConfigParam(
122 name='ALLOWED_URL_SCHEMES',
123 label=_('Allowed URL schemes'),
124 default=(
125 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc',
126 'xmpp',
127 ),
128 description=_("Permitted schemes for URLs in user-provided content"),
129 field=SimpleArrayField,
130 field_kwargs={'base_field': forms.CharField()}
131 ),
132
133 # Pagination
134 ConfigParam(
135 name='PAGINATE_COUNT',
136 label=_('Default page size'),
137 default=50,
138 field=forms.IntegerField
139 ),
140 ConfigParam(
141 name='MAX_PAGE_SIZE',
142 label=_('Maximum page size'),
143 default=1000,
144 field=forms.IntegerField
145 ),
146
147 # Validation
148 ConfigParam(
149 name='CUSTOM_VALIDATORS',
150 label=_('Custom validators'),
151 default={},
152 description=_("Custom validation rules (JSON)"),
153 field=forms.JSONField,
154 field_kwargs={
155 'widget': forms.Textarea(),
156 },
157 ),
158 ConfigParam(
159 name='PROTECTION_RULES',
160 label=_('Protection rules'),
161 default={},
162 description=_("Deletion protection rules (JSON)"),
163 field=forms.JSONField,
164 field_kwargs={
165 'widget': forms.Textarea(),
166 },
167 ),
168
169 # User preferences
170 ConfigParam(
171 name='DEFAULT_USER_PREFERENCES',
172 label=_('Default preferences'),
173 default={},
174 description=_("Default preferences for new users"),
175 field=forms.JSONField
176 ),
177
178 # Miscellaneous
179 ConfigParam(
180 name='MAINTENANCE_MODE',
181 label=_('Maintenance mode'),
182 default=False,
183 description=_("Enable maintenance mode"),
184 field=forms.BooleanField
185 ),
186 ConfigParam(
187 name='GRAPHQL_ENABLED',
188 label=_('GraphQL enabled'),
189 default=True,
190 description=_("Enable the GraphQL API"),
191 field=forms.BooleanField
192 ),
193 ConfigParam(
194 name='CHANGELOG_RETENTION',
195 label=_('Changelog retention'),
196 default=90,
197 description=_("Days to retain changelog history (set to zero for unlimited)"),
198 field=forms.IntegerField
199 ),
200 ConfigParam(
201 name='JOB_RETENTION',
202 label=_('Job result retention'),
203 default=90,
204 description=_("Days to retain job result history (set to zero for unlimited)"),
205 field=forms.IntegerField
206 ),
207 ConfigParam(
208 name='MAPS_URL',
209 label=_('Maps URL'),
210 default='https://maps.google.com/?q=',
211 description=_("Base URL for mapping geographic locations")
212 ),
213
214 )
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/config/parameters.py b/netbox/netbox/config/parameters.py
--- a/netbox/netbox/config/parameters.py
+++ b/netbox/netbox/config/parameters.py
@@ -66,7 +66,7 @@
ConfigParam(
name='ENFORCE_GLOBAL_UNIQUE',
label=_('Globally unique IP space'),
- default=False,
+ default=True,
description=_("Enforce unique IP addressing within the global table"),
field=forms.BooleanField
),
|
{"golden_diff": "diff --git a/netbox/netbox/config/parameters.py b/netbox/netbox/config/parameters.py\n--- a/netbox/netbox/config/parameters.py\n+++ b/netbox/netbox/config/parameters.py\n@@ -66,7 +66,7 @@\n ConfigParam(\n name='ENFORCE_GLOBAL_UNIQUE',\n label=_('Globally unique IP space'),\n- default=False,\n+ default=True,\n description=_(\"Enforce unique IP addressing within the global table\"),\n field=forms.BooleanField\n ),\n", "issue": "Enable `ENFORCE_GLOBAL_UNIQUE` by default\n### NetBox version\r\n\r\nv3.6.6\r\n\r\n### Feature type\r\n\r\nChange to existing functionality\r\n\r\n### Proposed functionality\r\n\r\nChange the default value of the [`ENFORCE_GLOBAL_UNIQUE`](https://docs.netbox.dev/en/stable/configuration/miscellaneous/#enforce_global_unique) configuration parameter from false to true. This will enforce unique IP addressing within the global (non-VRF) table by default.\r\n\r\nThis change would affect only deployments without any configuration already defined.\r\n\r\nPlease use a :+1: or :-1: response below to indicate your support for/opposition to this proposed change.\r\n\r\n### Use case\r\n\r\nEnforcing unique IP space by default is more restrictive and thus safer than the current default. Obviously, the parameter can still be toggled as needed.\r\n\r\nThis change would also be consistent with the `enforce_unique` field on the VRF model, which defaults to True.\r\n\r\n### Database changes\r\n\r\n_No response_\r\n\r\n### External dependencies\r\n\r\n_No response_\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass ConfigParam:\n\n def __init__(self, name, label, default, description='', field=None, field_kwargs=None):\n self.name = name\n self.label = label\n self.default = default\n self.field = field or forms.CharField\n self.description = description\n self.field_kwargs = field_kwargs or {}\n\n\nPARAMS = (\n\n # Banners\n ConfigParam(\n name='BANNER_LOGIN',\n label=_('Login banner'),\n default='',\n description=_(\"Additional content to display on the login page\"),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n ConfigParam(\n name='BANNER_MAINTENANCE',\n label=_('Maintenance banner'),\n default='NetBox is currently in maintenance mode. Functionality may be limited.',\n description=_('Additional content to display when in maintenance mode'),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n ConfigParam(\n name='BANNER_TOP',\n label=_('Top banner'),\n default='',\n description=_(\"Additional content to display at the top of every page\"),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n ConfigParam(\n name='BANNER_BOTTOM',\n label=_('Bottom banner'),\n default='',\n description=_(\"Additional content to display at the bottom of every page\"),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n\n # IPAM\n ConfigParam(\n name='ENFORCE_GLOBAL_UNIQUE',\n label=_('Globally unique IP space'),\n default=False,\n description=_(\"Enforce unique IP addressing within the global table\"),\n field=forms.BooleanField\n ),\n ConfigParam(\n name='PREFER_IPV4',\n label=_('Prefer IPv4'),\n default=False,\n description=_(\"Prefer IPv4 addresses over IPv6\"),\n field=forms.BooleanField\n ),\n\n # Racks\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_HEIGHT',\n label=_('Rack unit height'),\n default=22,\n description=_(\"Default unit height for rendered rack elevations\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_WIDTH',\n label=_('Rack unit width'),\n default=220,\n description=_(\"Default unit width for rendered rack elevations\"),\n field=forms.IntegerField\n ),\n\n # Power\n ConfigParam(\n name='POWERFEED_DEFAULT_VOLTAGE',\n label=_('Powerfeed voltage'),\n default=120,\n description=_(\"Default voltage for powerfeeds\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='POWERFEED_DEFAULT_AMPERAGE',\n label=_('Powerfeed amperage'),\n default=15,\n description=_(\"Default amperage for powerfeeds\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='POWERFEED_DEFAULT_MAX_UTILIZATION',\n label=_('Powerfeed max utilization'),\n default=80,\n description=_(\"Default max utilization for powerfeeds\"),\n field=forms.IntegerField\n ),\n\n # Security\n ConfigParam(\n name='ALLOWED_URL_SCHEMES',\n label=_('Allowed URL schemes'),\n default=(\n 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc',\n 'xmpp',\n ),\n description=_(\"Permitted schemes for URLs in user-provided content\"),\n field=SimpleArrayField,\n field_kwargs={'base_field': forms.CharField()}\n ),\n\n # Pagination\n ConfigParam(\n name='PAGINATE_COUNT',\n label=_('Default page size'),\n default=50,\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAX_PAGE_SIZE',\n label=_('Maximum page size'),\n default=1000,\n field=forms.IntegerField\n ),\n\n # Validation\n ConfigParam(\n name='CUSTOM_VALIDATORS',\n label=_('Custom validators'),\n default={},\n description=_(\"Custom validation rules (JSON)\"),\n field=forms.JSONField,\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n ConfigParam(\n name='PROTECTION_RULES',\n label=_('Protection rules'),\n default={},\n description=_(\"Deletion protection rules (JSON)\"),\n field=forms.JSONField,\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n\n # User preferences\n ConfigParam(\n name='DEFAULT_USER_PREFERENCES',\n label=_('Default preferences'),\n default={},\n description=_(\"Default preferences for new users\"),\n field=forms.JSONField\n ),\n\n # Miscellaneous\n ConfigParam(\n name='MAINTENANCE_MODE',\n label=_('Maintenance mode'),\n default=False,\n description=_(\"Enable maintenance mode\"),\n field=forms.BooleanField\n ),\n ConfigParam(\n name='GRAPHQL_ENABLED',\n label=_('GraphQL enabled'),\n default=True,\n description=_(\"Enable the GraphQL API\"),\n field=forms.BooleanField\n ),\n ConfigParam(\n name='CHANGELOG_RETENTION',\n label=_('Changelog retention'),\n default=90,\n description=_(\"Days to retain changelog history (set to zero for unlimited)\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='JOB_RETENTION',\n label=_('Job result retention'),\n default=90,\n description=_(\"Days to retain job result history (set to zero for unlimited)\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAPS_URL',\n label=_('Maps URL'),\n default='https://maps.google.com/?q=',\n description=_(\"Base URL for mapping geographic locations\")\n ),\n\n)\n", "path": "netbox/netbox/config/parameters.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass ConfigParam:\n\n def __init__(self, name, label, default, description='', field=None, field_kwargs=None):\n self.name = name\n self.label = label\n self.default = default\n self.field = field or forms.CharField\n self.description = description\n self.field_kwargs = field_kwargs or {}\n\n\nPARAMS = (\n\n # Banners\n ConfigParam(\n name='BANNER_LOGIN',\n label=_('Login banner'),\n default='',\n description=_(\"Additional content to display on the login page\"),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n ConfigParam(\n name='BANNER_MAINTENANCE',\n label=_('Maintenance banner'),\n default='NetBox is currently in maintenance mode. Functionality may be limited.',\n description=_('Additional content to display when in maintenance mode'),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n ConfigParam(\n name='BANNER_TOP',\n label=_('Top banner'),\n default='',\n description=_(\"Additional content to display at the top of every page\"),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n ConfigParam(\n name='BANNER_BOTTOM',\n label=_('Bottom banner'),\n default='',\n description=_(\"Additional content to display at the bottom of every page\"),\n field_kwargs={\n 'widget': forms.Textarea(\n attrs={'class': 'vLargeTextField'}\n ),\n },\n ),\n\n # IPAM\n ConfigParam(\n name='ENFORCE_GLOBAL_UNIQUE',\n label=_('Globally unique IP space'),\n default=True,\n description=_(\"Enforce unique IP addressing within the global table\"),\n field=forms.BooleanField\n ),\n ConfigParam(\n name='PREFER_IPV4',\n label=_('Prefer IPv4'),\n default=False,\n description=_(\"Prefer IPv4 addresses over IPv6\"),\n field=forms.BooleanField\n ),\n\n # Racks\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_HEIGHT',\n label=_('Rack unit height'),\n default=22,\n description=_(\"Default unit height for rendered rack elevations\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_WIDTH',\n label=_('Rack unit width'),\n default=220,\n description=_(\"Default unit width for rendered rack elevations\"),\n field=forms.IntegerField\n ),\n\n # Power\n ConfigParam(\n name='POWERFEED_DEFAULT_VOLTAGE',\n label=_('Powerfeed voltage'),\n default=120,\n description=_(\"Default voltage for powerfeeds\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='POWERFEED_DEFAULT_AMPERAGE',\n label=_('Powerfeed amperage'),\n default=15,\n description=_(\"Default amperage for powerfeeds\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='POWERFEED_DEFAULT_MAX_UTILIZATION',\n label=_('Powerfeed max utilization'),\n default=80,\n description=_(\"Default max utilization for powerfeeds\"),\n field=forms.IntegerField\n ),\n\n # Security\n ConfigParam(\n name='ALLOWED_URL_SCHEMES',\n label=_('Allowed URL schemes'),\n default=(\n 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc',\n 'xmpp',\n ),\n description=_(\"Permitted schemes for URLs in user-provided content\"),\n field=SimpleArrayField,\n field_kwargs={'base_field': forms.CharField()}\n ),\n\n # Pagination\n ConfigParam(\n name='PAGINATE_COUNT',\n label=_('Default page size'),\n default=50,\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAX_PAGE_SIZE',\n label=_('Maximum page size'),\n default=1000,\n field=forms.IntegerField\n ),\n\n # Validation\n ConfigParam(\n name='CUSTOM_VALIDATORS',\n label=_('Custom validators'),\n default={},\n description=_(\"Custom validation rules (JSON)\"),\n field=forms.JSONField,\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n ConfigParam(\n name='PROTECTION_RULES',\n label=_('Protection rules'),\n default={},\n description=_(\"Deletion protection rules (JSON)\"),\n field=forms.JSONField,\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n\n # User preferences\n ConfigParam(\n name='DEFAULT_USER_PREFERENCES',\n label=_('Default preferences'),\n default={},\n description=_(\"Default preferences for new users\"),\n field=forms.JSONField\n ),\n\n # Miscellaneous\n ConfigParam(\n name='MAINTENANCE_MODE',\n label=_('Maintenance mode'),\n default=False,\n description=_(\"Enable maintenance mode\"),\n field=forms.BooleanField\n ),\n ConfigParam(\n name='GRAPHQL_ENABLED',\n label=_('GraphQL enabled'),\n default=True,\n description=_(\"Enable the GraphQL API\"),\n field=forms.BooleanField\n ),\n ConfigParam(\n name='CHANGELOG_RETENTION',\n label=_('Changelog retention'),\n default=90,\n description=_(\"Days to retain changelog history (set to zero for unlimited)\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='JOB_RETENTION',\n label=_('Job result retention'),\n default=90,\n description=_(\"Days to retain job result history (set to zero for unlimited)\"),\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAPS_URL',\n label=_('Maps URL'),\n default='https://maps.google.com/?q=',\n description=_(\"Base URL for mapping geographic locations\")\n ),\n\n)\n", "path": "netbox/netbox/config/parameters.py"}]}
| 2,272 | 108 |
gh_patches_debug_42600
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-145
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide default implementation of batch_log_pdf
Could we provide a default implementation of `batch_log_pdf` as a simple for loop?
```py
class Distribution(object):
...
def batch_log_pdf(self, x, batch_size, *args, **kwargs):
result = torch.Tensor([batch_size])
for i in range(batch_size):
result[i] = self.log_pdf(x[i], *args, **kwargs)
return torch.autograd.Variable(result) # Caller decides whether to .sum().
```
Or do we want to instead implement correct handling of `NotImplementedError`s everywhere `batch_log_pdf` is used?
Disclaimer: I don't understand what `batch_log_pdf` does, and there is no docstring.
Edited to not sum the result.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/distribution.py`
Content:
```
1 class Distribution(object):
2 """
3 Distribution abstract base class
4 """
5
6 def __init__(self, *args, **kwargs):
7 """
8 Constructor for base distribution class.
9
10 Currently takes no explicit arguments.
11 """
12 self.reparameterized = False
13
14 def __call__(self, *args, **kwargs):
15 """
16 Samples on call
17 """
18 return self.sample(*args, **kwargs)
19
20 def sample(self, *args, **kwargs):
21 """
22 Virtual sample method.
23 """
24 raise NotImplementedError()
25
26 def log_pdf(self, x):
27 raise NotImplementedError()
28
29 def batch_log_pdf(self, x, batch_size):
30 raise NotImplementedError()
31
32 def support(self):
33 raise NotImplementedError("Support not supported for {}".format(str(type(self))))
34
35 def analytic_mean(self, *args, **kwargs):
36 """
37 Analytic mean of the distribution, to be implemented by derived classes.
38 Note that this is optional, and currently only used for testing distributions.
39 :return: Analytic mean, assuming it can be computed analytically given the distribution parameters
40 :rtype: torch.autograd.Variable.
41 """
42 raise NotImplementedError("Method not implemented by the subclass {}".format(str(type(self))))
43
44 def analytic_var(self, *args, **kwargs):
45 """
46 Analytic variance of the distribution, to be implemented by derived classes.
47 Note that this is optional, and currently only used for testing distributions.
48 :return: Analytic variance, assuming it can be computed analytically given the distribution parameters
49 :rtype: torch.autograd.Variable.
50 """
51 raise NotImplementedError("Method not implemented by the subclass {}".format(str(type(self))))
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyro/distributions/distribution.py b/pyro/distributions/distribution.py
--- a/pyro/distributions/distribution.py
+++ b/pyro/distributions/distribution.py
@@ -1,6 +1,17 @@
+import torch
+
+
class Distribution(object):
"""
- Distribution abstract base class
+ Abstract base class for probability distributions.
+
+ Instances can either be constructed from a fixed parameter and called without paramters,
+ or constructed without a parameter and called with a paramter.
+ It is not allowed to specify a parameter both during construction and when calling.
+ When calling with a parameter, it is preferred to use one of the singleton instances
+ in pyro.distributions rather than constructing a new instance without a parameter.
+
+ Derived classes must implement the `sample`, and `batch_log_pdf` methods.
"""
def __init__(self, *args, **kwargs):
@@ -13,39 +24,69 @@
def __call__(self, *args, **kwargs):
"""
- Samples on call
+ Samples a random value.
+
+ :return: A random value.
+ :rtype: torch.autograd.Variable
"""
return self.sample(*args, **kwargs)
def sample(self, *args, **kwargs):
"""
- Virtual sample method.
+ Samples a random value.
+
+ :return: A random value.
+ :rtype: torch.autograd.Variable
"""
- raise NotImplementedError()
+ raise NotImplementedError
- def log_pdf(self, x):
- raise NotImplementedError()
+ def log_pdf(self, x, *args, **kwargs):
+ """
+ Evaluates total log probability density for one or a batch of samples and parameters.
- def batch_log_pdf(self, x, batch_size):
- raise NotImplementedError()
+ :param torch.autograd.Variable x: A value.
+ :return: total log probability density as a one-dimensional torch.autograd.Variable of size 1.
+ :rtype: torch.autograd.Variable
+ """
+ return torch.sum(self.batch_log_pdf(x, *args, **kwargs))
- def support(self):
- raise NotImplementedError("Support not supported for {}".format(str(type(self))))
+ def batch_log_pdf(self, x, *args, **kwargs):
+ """
+ Evaluates log probability densities for one or a batch of samples and parameters.
+
+ :param torch.autograd.Variable x: A single value or a batch of values batched along axis 0.
+ :return: log probability densities as a one-dimensional torch.autograd.Variable.
+ :rtype: torch.autograd.Variable
+ """
+ raise NotImplementedError
+
+ def support(self, *args, **kwargs):
+ """
+ Returns a representation of the distribution's support.
+
+ :return: A representation of the distribution's support.
+ :rtype: torch.Tensor
+ """
+ raise NotImplementedError("Support not implemented for {}".format(type(self)))
def analytic_mean(self, *args, **kwargs):
"""
Analytic mean of the distribution, to be implemented by derived classes.
+
Note that this is optional, and currently only used for testing distributions.
+
:return: Analytic mean, assuming it can be computed analytically given the distribution parameters
:rtype: torch.autograd.Variable.
"""
- raise NotImplementedError("Method not implemented by the subclass {}".format(str(type(self))))
+ raise NotImplementedError("Method not implemented by the subclass {}".format(type(self)))
def analytic_var(self, *args, **kwargs):
"""
Analytic variance of the distribution, to be implemented by derived classes.
+
Note that this is optional, and currently only used for testing distributions.
+
:return: Analytic variance, assuming it can be computed analytically given the distribution parameters
:rtype: torch.autograd.Variable.
"""
- raise NotImplementedError("Method not implemented by the subclass {}".format(str(type(self))))
+ raise NotImplementedError("Method not implemented by the subclass {}".format(type(self)))
|
{"golden_diff": "diff --git a/pyro/distributions/distribution.py b/pyro/distributions/distribution.py\n--- a/pyro/distributions/distribution.py\n+++ b/pyro/distributions/distribution.py\n@@ -1,6 +1,17 @@\n+import torch\n+\n+\n class Distribution(object):\n \"\"\"\n- Distribution abstract base class\n+ Abstract base class for probability distributions.\n+\n+ Instances can either be constructed from a fixed parameter and called without paramters,\n+ or constructed without a parameter and called with a paramter.\n+ It is not allowed to specify a parameter both during construction and when calling.\n+ When calling with a parameter, it is preferred to use one of the singleton instances\n+ in pyro.distributions rather than constructing a new instance without a parameter.\n+\n+ Derived classes must implement the `sample`, and `batch_log_pdf` methods.\n \"\"\"\n \n def __init__(self, *args, **kwargs):\n@@ -13,39 +24,69 @@\n \n def __call__(self, *args, **kwargs):\n \"\"\"\n- Samples on call\n+ Samples a random value.\n+\n+ :return: A random value.\n+ :rtype: torch.autograd.Variable\n \"\"\"\n return self.sample(*args, **kwargs)\n \n def sample(self, *args, **kwargs):\n \"\"\"\n- Virtual sample method.\n+ Samples a random value.\n+\n+ :return: A random value.\n+ :rtype: torch.autograd.Variable\n \"\"\"\n- raise NotImplementedError()\n+ raise NotImplementedError\n \n- def log_pdf(self, x):\n- raise NotImplementedError()\n+ def log_pdf(self, x, *args, **kwargs):\n+ \"\"\"\n+ Evaluates total log probability density for one or a batch of samples and parameters.\n \n- def batch_log_pdf(self, x, batch_size):\n- raise NotImplementedError()\n+ :param torch.autograd.Variable x: A value.\n+ :return: total log probability density as a one-dimensional torch.autograd.Variable of size 1.\n+ :rtype: torch.autograd.Variable\n+ \"\"\"\n+ return torch.sum(self.batch_log_pdf(x, *args, **kwargs))\n \n- def support(self):\n- raise NotImplementedError(\"Support not supported for {}\".format(str(type(self))))\n+ def batch_log_pdf(self, x, *args, **kwargs):\n+ \"\"\"\n+ Evaluates log probability densities for one or a batch of samples and parameters.\n+\n+ :param torch.autograd.Variable x: A single value or a batch of values batched along axis 0.\n+ :return: log probability densities as a one-dimensional torch.autograd.Variable.\n+ :rtype: torch.autograd.Variable\n+ \"\"\"\n+ raise NotImplementedError\n+\n+ def support(self, *args, **kwargs):\n+ \"\"\"\n+ Returns a representation of the distribution's support.\n+\n+ :return: A representation of the distribution's support.\n+ :rtype: torch.Tensor\n+ \"\"\"\n+ raise NotImplementedError(\"Support not implemented for {}\".format(type(self)))\n \n def analytic_mean(self, *args, **kwargs):\n \"\"\"\n Analytic mean of the distribution, to be implemented by derived classes.\n+\n Note that this is optional, and currently only used for testing distributions.\n+\n :return: Analytic mean, assuming it can be computed analytically given the distribution parameters\n :rtype: torch.autograd.Variable.\n \"\"\"\n- raise NotImplementedError(\"Method not implemented by the subclass {}\".format(str(type(self))))\n+ raise NotImplementedError(\"Method not implemented by the subclass {}\".format(type(self)))\n \n def analytic_var(self, *args, **kwargs):\n \"\"\"\n Analytic variance of the distribution, to be implemented by derived classes.\n+\n Note that this is optional, and currently only used for testing distributions.\n+\n :return: Analytic variance, assuming it can be computed analytically given the distribution parameters\n :rtype: torch.autograd.Variable.\n \"\"\"\n- raise NotImplementedError(\"Method not implemented by the subclass {}\".format(str(type(self))))\n+ raise NotImplementedError(\"Method not implemented by the subclass {}\".format(type(self)))\n", "issue": "Provide default implementation of batch_log_pdf\nCould we provide a default implementation of `batch_log_pdf` as a simple for loop?\r\n```py\r\nclass Distribution(object):\r\n ...\r\n def batch_log_pdf(self, x, batch_size, *args, **kwargs):\r\n result = torch.Tensor([batch_size])\r\n for i in range(batch_size):\r\n result[i] = self.log_pdf(x[i], *args, **kwargs)\r\n return torch.autograd.Variable(result) # Caller decides whether to .sum().\r\n```\r\nOr do we want to instead implement correct handling of `NotImplementedError`s everywhere `batch_log_pdf` is used?\r\n\r\nDisclaimer: I don't understand what `batch_log_pdf` does, and there is no docstring.\r\n\r\nEdited to not sum the result.\n", "before_files": [{"content": "class Distribution(object):\n \"\"\"\n Distribution abstract base class\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor for base distribution class.\n\n Currently takes no explicit arguments.\n \"\"\"\n self.reparameterized = False\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Samples on call\n \"\"\"\n return self.sample(*args, **kwargs)\n\n def sample(self, *args, **kwargs):\n \"\"\"\n Virtual sample method.\n \"\"\"\n raise NotImplementedError()\n\n def log_pdf(self, x):\n raise NotImplementedError()\n\n def batch_log_pdf(self, x, batch_size):\n raise NotImplementedError()\n\n def support(self):\n raise NotImplementedError(\"Support not supported for {}\".format(str(type(self))))\n\n def analytic_mean(self, *args, **kwargs):\n \"\"\"\n Analytic mean of the distribution, to be implemented by derived classes.\n Note that this is optional, and currently only used for testing distributions.\n :return: Analytic mean, assuming it can be computed analytically given the distribution parameters\n :rtype: torch.autograd.Variable.\n \"\"\"\n raise NotImplementedError(\"Method not implemented by the subclass {}\".format(str(type(self))))\n\n def analytic_var(self, *args, **kwargs):\n \"\"\"\n Analytic variance of the distribution, to be implemented by derived classes.\n Note that this is optional, and currently only used for testing distributions.\n :return: Analytic variance, assuming it can be computed analytically given the distribution parameters\n :rtype: torch.autograd.Variable.\n \"\"\"\n raise NotImplementedError(\"Method not implemented by the subclass {}\".format(str(type(self))))\n", "path": "pyro/distributions/distribution.py"}], "after_files": [{"content": "import torch\n\n\nclass Distribution(object):\n \"\"\"\n Abstract base class for probability distributions.\n\n Instances can either be constructed from a fixed parameter and called without paramters,\n or constructed without a parameter and called with a paramter.\n It is not allowed to specify a parameter both during construction and when calling.\n When calling with a parameter, it is preferred to use one of the singleton instances\n in pyro.distributions rather than constructing a new instance without a parameter.\n\n Derived classes must implement the `sample`, and `batch_log_pdf` methods.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor for base distribution class.\n\n Currently takes no explicit arguments.\n \"\"\"\n self.reparameterized = False\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Samples a random value.\n\n :return: A random value.\n :rtype: torch.autograd.Variable\n \"\"\"\n return self.sample(*args, **kwargs)\n\n def sample(self, *args, **kwargs):\n \"\"\"\n Samples a random value.\n\n :return: A random value.\n :rtype: torch.autograd.Variable\n \"\"\"\n raise NotImplementedError\n\n def log_pdf(self, x, *args, **kwargs):\n \"\"\"\n Evaluates total log probability density for one or a batch of samples and parameters.\n\n :param torch.autograd.Variable x: A value.\n :return: total log probability density as a one-dimensional torch.autograd.Variable of size 1.\n :rtype: torch.autograd.Variable\n \"\"\"\n return torch.sum(self.batch_log_pdf(x, *args, **kwargs))\n\n def batch_log_pdf(self, x, *args, **kwargs):\n \"\"\"\n Evaluates log probability densities for one or a batch of samples and parameters.\n\n :param torch.autograd.Variable x: A single value or a batch of values batched along axis 0.\n :return: log probability densities as a one-dimensional torch.autograd.Variable.\n :rtype: torch.autograd.Variable\n \"\"\"\n raise NotImplementedError\n\n def support(self, *args, **kwargs):\n \"\"\"\n Returns a representation of the distribution's support.\n\n :return: A representation of the distribution's support.\n :rtype: torch.Tensor\n \"\"\"\n raise NotImplementedError(\"Support not implemented for {}\".format(type(self)))\n\n def analytic_mean(self, *args, **kwargs):\n \"\"\"\n Analytic mean of the distribution, to be implemented by derived classes.\n\n Note that this is optional, and currently only used for testing distributions.\n\n :return: Analytic mean, assuming it can be computed analytically given the distribution parameters\n :rtype: torch.autograd.Variable.\n \"\"\"\n raise NotImplementedError(\"Method not implemented by the subclass {}\".format(type(self)))\n\n def analytic_var(self, *args, **kwargs):\n \"\"\"\n Analytic variance of the distribution, to be implemented by derived classes.\n\n Note that this is optional, and currently only used for testing distributions.\n\n :return: Analytic variance, assuming it can be computed analytically given the distribution parameters\n :rtype: torch.autograd.Variable.\n \"\"\"\n raise NotImplementedError(\"Method not implemented by the subclass {}\".format(type(self)))\n", "path": "pyro/distributions/distribution.py"}]}
| 865 | 876 |
gh_patches_debug_32842
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-9069
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Delete base85 algorithm
### Describe your change:
Re #6216
Normally, I'm not in favour of just deleting algorithms, but I would make the argument that this is not an algorithm, rather just a snippet of code that utilises another library.
Per `CONTRIBTUING.md`
> Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values
This `base85` algorithm has essentially got two lines of code that purely utilise a singular library. The doctests only test an external library
This repository should not contains examples on how to use a certain library, that would be the library documentation here
https://docs.python.org/3/library/base64.html
* [ ] Add an algorithm?
* [ ] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
* [x] Delete an algorithm
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.
* [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): "Fixes #ISSUE-NUMBER".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ciphers/base85.py`
Content:
```
1 import base64
2
3
4 def base85_encode(string: str) -> bytes:
5 """
6 >>> base85_encode("")
7 b''
8 >>> base85_encode("12345")
9 b'0etOA2#'
10 >>> base85_encode("base 85")
11 b'@UX=h+?24'
12 """
13 # encoded the input to a bytes-like object and then a85encode that
14 return base64.a85encode(string.encode("utf-8"))
15
16
17 def base85_decode(a85encoded: bytes) -> str:
18 """
19 >>> base85_decode(b"")
20 ''
21 >>> base85_decode(b"0etOA2#")
22 '12345'
23 >>> base85_decode(b"@UX=h+?24")
24 'base 85'
25 """
26 # a85decode the input into bytes and decode that into a human readable string
27 return base64.a85decode(a85encoded).decode("utf-8")
28
29
30 if __name__ == "__main__":
31 import doctest
32
33 doctest.testmod()
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ciphers/base85.py b/ciphers/base85.py
--- a/ciphers/base85.py
+++ b/ciphers/base85.py
@@ -1,30 +1,55 @@
-import base64
+"""
+Base85 (Ascii85) encoding and decoding
+https://en.wikipedia.org/wiki/Ascii85
+"""
-def base85_encode(string: str) -> bytes:
+
+def _base10_to_85(d: int) -> str:
+ return "".join(chr(d % 85 + 33)) + _base10_to_85(d // 85) if d > 0 else ""
+
+
+def _base85_to_10(digits: list) -> int:
+ return sum(char * 85**i for i, char in enumerate(reversed(digits)))
+
+
+def ascii85_encode(data: bytes) -> bytes:
"""
- >>> base85_encode("")
+ >>> ascii85_encode(b"")
b''
- >>> base85_encode("12345")
+ >>> ascii85_encode(b"12345")
b'0etOA2#'
- >>> base85_encode("base 85")
+ >>> ascii85_encode(b"base 85")
b'@UX=h+?24'
"""
- # encoded the input to a bytes-like object and then a85encode that
- return base64.a85encode(string.encode("utf-8"))
+ binary_data = "".join(bin(ord(d))[2:].zfill(8) for d in data.decode("utf-8"))
+ null_values = (32 * ((len(binary_data) // 32) + 1) - len(binary_data)) // 8
+ binary_data = binary_data.ljust(32 * ((len(binary_data) // 32) + 1), "0")
+ b85_chunks = [int(_s, 2) for _s in map("".join, zip(*[iter(binary_data)] * 32))]
+ result = "".join(_base10_to_85(chunk)[::-1] for chunk in b85_chunks)
+ return bytes(result[:-null_values] if null_values % 4 != 0 else result, "utf-8")
-def base85_decode(a85encoded: bytes) -> str:
+def ascii85_decode(data: bytes) -> bytes:
"""
- >>> base85_decode(b"")
- ''
- >>> base85_decode(b"0etOA2#")
- '12345'
- >>> base85_decode(b"@UX=h+?24")
- 'base 85'
+ >>> ascii85_decode(b"")
+ b''
+ >>> ascii85_decode(b"0etOA2#")
+ b'12345'
+ >>> ascii85_decode(b"@UX=h+?24")
+ b'base 85'
"""
- # a85decode the input into bytes and decode that into a human readable string
- return base64.a85decode(a85encoded).decode("utf-8")
+ null_values = 5 * ((len(data) // 5) + 1) - len(data)
+ binary_data = data.decode("utf-8") + "u" * null_values
+ b85_chunks = map("".join, zip(*[iter(binary_data)] * 5))
+ b85_segments = [[ord(_s) - 33 for _s in chunk] for chunk in b85_chunks]
+ results = [bin(_base85_to_10(chunk))[2::].zfill(32) for chunk in b85_segments]
+ char_chunks = [
+ [chr(int(_s, 2)) for _s in map("".join, zip(*[iter(r)] * 8))] for r in results
+ ]
+ result = "".join("".join(char) for char in char_chunks)
+ offset = int(null_values % 5 == 0)
+ return bytes(result[: offset - null_values], "utf-8")
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/ciphers/base85.py b/ciphers/base85.py\n--- a/ciphers/base85.py\n+++ b/ciphers/base85.py\n@@ -1,30 +1,55 @@\n-import base64\n+\"\"\"\n+Base85 (Ascii85) encoding and decoding\n \n+https://en.wikipedia.org/wiki/Ascii85\n+\"\"\"\n \n-def base85_encode(string: str) -> bytes:\n+\n+def _base10_to_85(d: int) -> str:\n+ return \"\".join(chr(d % 85 + 33)) + _base10_to_85(d // 85) if d > 0 else \"\"\n+\n+\n+def _base85_to_10(digits: list) -> int:\n+ return sum(char * 85**i for i, char in enumerate(reversed(digits)))\n+\n+\n+def ascii85_encode(data: bytes) -> bytes:\n \"\"\"\n- >>> base85_encode(\"\")\n+ >>> ascii85_encode(b\"\")\n b''\n- >>> base85_encode(\"12345\")\n+ >>> ascii85_encode(b\"12345\")\n b'0etOA2#'\n- >>> base85_encode(\"base 85\")\n+ >>> ascii85_encode(b\"base 85\")\n b'@UX=h+?24'\n \"\"\"\n- # encoded the input to a bytes-like object and then a85encode that\n- return base64.a85encode(string.encode(\"utf-8\"))\n+ binary_data = \"\".join(bin(ord(d))[2:].zfill(8) for d in data.decode(\"utf-8\"))\n+ null_values = (32 * ((len(binary_data) // 32) + 1) - len(binary_data)) // 8\n+ binary_data = binary_data.ljust(32 * ((len(binary_data) // 32) + 1), \"0\")\n+ b85_chunks = [int(_s, 2) for _s in map(\"\".join, zip(*[iter(binary_data)] * 32))]\n+ result = \"\".join(_base10_to_85(chunk)[::-1] for chunk in b85_chunks)\n+ return bytes(result[:-null_values] if null_values % 4 != 0 else result, \"utf-8\")\n \n \n-def base85_decode(a85encoded: bytes) -> str:\n+def ascii85_decode(data: bytes) -> bytes:\n \"\"\"\n- >>> base85_decode(b\"\")\n- ''\n- >>> base85_decode(b\"0etOA2#\")\n- '12345'\n- >>> base85_decode(b\"@UX=h+?24\")\n- 'base 85'\n+ >>> ascii85_decode(b\"\")\n+ b''\n+ >>> ascii85_decode(b\"0etOA2#\")\n+ b'12345'\n+ >>> ascii85_decode(b\"@UX=h+?24\")\n+ b'base 85'\n \"\"\"\n- # a85decode the input into bytes and decode that into a human readable string\n- return base64.a85decode(a85encoded).decode(\"utf-8\")\n+ null_values = 5 * ((len(data) // 5) + 1) - len(data)\n+ binary_data = data.decode(\"utf-8\") + \"u\" * null_values\n+ b85_chunks = map(\"\".join, zip(*[iter(binary_data)] * 5))\n+ b85_segments = [[ord(_s) - 33 for _s in chunk] for chunk in b85_chunks]\n+ results = [bin(_base85_to_10(chunk))[2::].zfill(32) for chunk in b85_segments]\n+ char_chunks = [\n+ [chr(int(_s, 2)) for _s in map(\"\".join, zip(*[iter(r)] * 8))] for r in results\n+ ]\n+ result = \"\".join(\"\".join(char) for char in char_chunks)\n+ offset = int(null_values % 5 == 0)\n+ return bytes(result[: offset - null_values], \"utf-8\")\n \n \n if __name__ == \"__main__\":\n", "issue": "Delete base85 algorithm\n### Describe your change:\r\nRe #6216\r\n\r\nNormally, I'm not in favour of just deleting algorithms, but I would make the argument that this is not an algorithm, rather just a snippet of code that utilises another library.\r\n\r\nPer `CONTRIBTUING.md`\r\n> Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values\r\nThis `base85` algorithm has essentially got two lines of code that purely utilise a singular library. The doctests only test an external library\r\n\r\nThis repository should not contains examples on how to use a certain library, that would be the library documentation here\r\nhttps://docs.python.org/3/library/base64.html\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [ ] Fix a bug or typo in an existing algorithm?\r\n* [ ] Documentation change?\r\n* [x] Delete an algorithm\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [x] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [x] All new algorithms include at least one URL that points to Wikipedia or another similar explanation.\r\n* [x] If this pull request resolves one or more open issues then the description above includes the issue number(s) with a [closing keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue): \"Fixes #ISSUE-NUMBER\".\r\n\n", "before_files": [{"content": "import base64\n\n\ndef base85_encode(string: str) -> bytes:\n \"\"\"\n >>> base85_encode(\"\")\n b''\n >>> base85_encode(\"12345\")\n b'0etOA2#'\n >>> base85_encode(\"base 85\")\n b'@UX=h+?24'\n \"\"\"\n # encoded the input to a bytes-like object and then a85encode that\n return base64.a85encode(string.encode(\"utf-8\"))\n\n\ndef base85_decode(a85encoded: bytes) -> str:\n \"\"\"\n >>> base85_decode(b\"\")\n ''\n >>> base85_decode(b\"0etOA2#\")\n '12345'\n >>> base85_decode(b\"@UX=h+?24\")\n 'base 85'\n \"\"\"\n # a85decode the input into bytes and decode that into a human readable string\n return base64.a85decode(a85encoded).decode(\"utf-8\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "ciphers/base85.py"}], "after_files": [{"content": "\"\"\"\nBase85 (Ascii85) encoding and decoding\n\nhttps://en.wikipedia.org/wiki/Ascii85\n\"\"\"\n\n\ndef _base10_to_85(d: int) -> str:\n return \"\".join(chr(d % 85 + 33)) + _base10_to_85(d // 85) if d > 0 else \"\"\n\n\ndef _base85_to_10(digits: list) -> int:\n return sum(char * 85**i for i, char in enumerate(reversed(digits)))\n\n\ndef ascii85_encode(data: bytes) -> bytes:\n \"\"\"\n >>> ascii85_encode(b\"\")\n b''\n >>> ascii85_encode(b\"12345\")\n b'0etOA2#'\n >>> ascii85_encode(b\"base 85\")\n b'@UX=h+?24'\n \"\"\"\n binary_data = \"\".join(bin(ord(d))[2:].zfill(8) for d in data.decode(\"utf-8\"))\n null_values = (32 * ((len(binary_data) // 32) + 1) - len(binary_data)) // 8\n binary_data = binary_data.ljust(32 * ((len(binary_data) // 32) + 1), \"0\")\n b85_chunks = [int(_s, 2) for _s in map(\"\".join, zip(*[iter(binary_data)] * 32))]\n result = \"\".join(_base10_to_85(chunk)[::-1] for chunk in b85_chunks)\n return bytes(result[:-null_values] if null_values % 4 != 0 else result, \"utf-8\")\n\n\ndef ascii85_decode(data: bytes) -> bytes:\n \"\"\"\n >>> ascii85_decode(b\"\")\n b''\n >>> ascii85_decode(b\"0etOA2#\")\n b'12345'\n >>> ascii85_decode(b\"@UX=h+?24\")\n b'base 85'\n \"\"\"\n null_values = 5 * ((len(data) // 5) + 1) - len(data)\n binary_data = data.decode(\"utf-8\") + \"u\" * null_values\n b85_chunks = map(\"\".join, zip(*[iter(binary_data)] * 5))\n b85_segments = [[ord(_s) - 33 for _s in chunk] for chunk in b85_chunks]\n results = [bin(_base85_to_10(chunk))[2::].zfill(32) for chunk in b85_segments]\n char_chunks = [\n [chr(int(_s, 2)) for _s in map(\"\".join, zip(*[iter(r)] * 8))] for r in results\n ]\n result = \"\".join(\"\".join(char) for char in char_chunks)\n offset = int(null_values % 5 == 0)\n return bytes(result[: offset - null_values], \"utf-8\")\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "ciphers/base85.py"}]}
| 1,060 | 951 |
gh_patches_debug_28897
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-2663
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Database migration fails if the URI contains '%' signs
If the `SQLALCHEMY_DATABASE_URI` contains query parameters like `ssl_ca=/path/to/cert` the path separators will be url-encoded with `%` signs.
This fails when passing the URI to the alembic configuration (https://alembic.sqlalchemy.org/en/latest/api/config.html#alembic.config.Config.set_main_option).
The `%` signs should be escaped in the URI string before passing it to alembic.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/env.py`
Content:
```
1 from __future__ import with_statement
2 from alembic import context
3 from sqlalchemy import engine_from_config, pool
4 from sqlalchemy.engine.url import make_url
5 from logging.config import fileConfig
6
7 # this is the Alembic Config object, which provides
8 # access to the values within the .ini file in use.
9
10 config = context.config
11
12 # Interpret the config file for Python logging.
13 # This line sets up loggers basically.
14 fileConfig(config.config_file_name)
15
16 # add your model's MetaData object here
17 # for 'autogenerate' support
18 # from myapp import mymodel
19 # target_metadata = mymodel.Base.metadata
20 from flask import current_app
21
22
23 def set_database_url(config):
24 url = current_app.config.get('SQLALCHEMY_DATABASE_URI')
25 try:
26 # In case of MySQL, add ``charset=utf8`` to the parameters (if no charset is set),
27 # because this is what Flask-SQLAlchemy does
28 if url.startswith("mysql"):
29 parsed_url = make_url(url)
30 parsed_url.query.setdefault("charset", "utf8")
31 url = str(parsed_url)
32 except Exception as exx:
33 print(u"Attempted to set charset=utf8 on connection, but failed: {}".format(exx))
34 config.set_main_option('sqlalchemy.url', url)
35
36
37 set_database_url(config)
38 target_metadata = current_app.extensions['migrate'].db.metadata
39
40 # other values from the config, defined by the needs of env.py,
41 # can be acquired:
42 # my_important_option = config.get_main_option("my_important_option")
43 # ... etc.
44
45
46 def run_migrations_offline():
47 """Run migrations in 'offline' mode.
48
49 This configures the context with just a URL
50 and not an Engine, though an Engine is acceptable
51 here as well. By skipping the Engine creation
52 we don't even need a DBAPI to be available.
53
54 Calls to context.execute() here emit the given string to the
55 script output.
56
57 """
58 url = config.get_main_option("sqlalchemy.url")
59 context.configure(url=url)
60
61 with context.begin_transaction():
62 context.run_migrations()
63
64
65 def run_migrations_online():
66 """Run migrations in 'online' mode.
67
68 In this scenario we need to create an Engine
69 and associate a connection with the context.
70
71 """
72 # FIX for Postgres updates
73 url = config.get_section(config.config_ini_section).get("sqlalchemy.url")
74 driver = url.split(":")[0]
75
76 if driver == "postgresql+psycopg2":
77 engine = engine_from_config(
78 config.get_section(config.config_ini_section),
79 prefix='sqlalchemy.',
80 isolation_level="AUTOCOMMIT",
81 poolclass=pool.NullPool)
82 else:
83 engine = engine_from_config(
84 config.get_section(config.config_ini_section),
85 prefix='sqlalchemy.',
86 poolclass=pool.NullPool)
87
88 connection = engine.connect()
89 context.configure(
90 connection=connection,
91 target_metadata=target_metadata,
92 compare_type=True
93 )
94
95 try:
96 with context.begin_transaction():
97 context.run_migrations()
98 finally:
99 connection.close()
100
101 if context.is_offline_mode():
102 print("Running offline")
103 run_migrations_offline()
104 else:
105 print("Running online")
106 run_migrations_online()
107
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/migrations/env.py b/migrations/env.py
--- a/migrations/env.py
+++ b/migrations/env.py
@@ -3,6 +3,7 @@
from sqlalchemy import engine_from_config, pool
from sqlalchemy.engine.url import make_url
from logging.config import fileConfig
+from six.moves.urllib.parse import quote
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
@@ -28,10 +29,13 @@
if url.startswith("mysql"):
parsed_url = make_url(url)
parsed_url.query.setdefault("charset", "utf8")
+ # We need to quote the password in case it contains special chars
+ parsed_url.password = quote(parsed_url.password)
url = str(parsed_url)
except Exception as exx:
print(u"Attempted to set charset=utf8 on connection, but failed: {}".format(exx))
- config.set_main_option('sqlalchemy.url', url)
+ # set_main_option() requires escaped "%" signs in the string
+ config.set_main_option('sqlalchemy.url', url.replace('%', '%%'))
set_database_url(config)
@@ -98,10 +102,10 @@
finally:
connection.close()
+
if context.is_offline_mode():
print("Running offline")
run_migrations_offline()
else:
print("Running online")
run_migrations_online()
-
|
{"golden_diff": "diff --git a/migrations/env.py b/migrations/env.py\n--- a/migrations/env.py\n+++ b/migrations/env.py\n@@ -3,6 +3,7 @@\n from sqlalchemy import engine_from_config, pool\n from sqlalchemy.engine.url import make_url\n from logging.config import fileConfig\n+from six.moves.urllib.parse import quote\n \n # this is the Alembic Config object, which provides\n # access to the values within the .ini file in use.\n@@ -28,10 +29,13 @@\n if url.startswith(\"mysql\"):\n parsed_url = make_url(url)\n parsed_url.query.setdefault(\"charset\", \"utf8\")\n+ # We need to quote the password in case it contains special chars\n+ parsed_url.password = quote(parsed_url.password)\n url = str(parsed_url)\n except Exception as exx:\n print(u\"Attempted to set charset=utf8 on connection, but failed: {}\".format(exx))\n- config.set_main_option('sqlalchemy.url', url)\n+ # set_main_option() requires escaped \"%\" signs in the string\n+ config.set_main_option('sqlalchemy.url', url.replace('%', '%%'))\n \n \n set_database_url(config)\n@@ -98,10 +102,10 @@\n finally:\n connection.close()\n \n+\n if context.is_offline_mode():\n print(\"Running offline\")\n run_migrations_offline()\n else:\n print(\"Running online\")\n run_migrations_online()\n-\n", "issue": "Database migration fails if the URI contains '%' signs\nIf the `SQLALCHEMY_DATABASE_URI` contains query parameters like `ssl_ca=/path/to/cert` the path separators will be url-encoded with `%` signs.\r\nThis fails when passing the URI to the alembic configuration (https://alembic.sqlalchemy.org/en/latest/api/config.html#alembic.config.Config.set_main_option).\r\nThe `%` signs should be escaped in the URI string before passing it to alembic.\n", "before_files": [{"content": "from __future__ import with_statement\nfrom alembic import context\nfrom sqlalchemy import engine_from_config, pool\nfrom sqlalchemy.engine.url import make_url\nfrom logging.config import fileConfig\n\n# this is the Alembic Config object, which provides\n# access to the values within the .ini file in use.\n\nconfig = context.config\n\n# Interpret the config file for Python logging.\n# This line sets up loggers basically.\nfileConfig(config.config_file_name)\n\n# add your model's MetaData object here\n# for 'autogenerate' support\n# from myapp import mymodel\n# target_metadata = mymodel.Base.metadata\nfrom flask import current_app\n\n\ndef set_database_url(config):\n url = current_app.config.get('SQLALCHEMY_DATABASE_URI')\n try:\n # In case of MySQL, add ``charset=utf8`` to the parameters (if no charset is set),\n # because this is what Flask-SQLAlchemy does\n if url.startswith(\"mysql\"):\n parsed_url = make_url(url)\n parsed_url.query.setdefault(\"charset\", \"utf8\")\n url = str(parsed_url)\n except Exception as exx:\n print(u\"Attempted to set charset=utf8 on connection, but failed: {}\".format(exx))\n config.set_main_option('sqlalchemy.url', url)\n\n\nset_database_url(config)\ntarget_metadata = current_app.extensions['migrate'].db.metadata\n\n# other values from the config, defined by the needs of env.py,\n# can be acquired:\n# my_important_option = config.get_main_option(\"my_important_option\")\n# ... etc.\n\n\ndef run_migrations_offline():\n \"\"\"Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n \"\"\"\n url = config.get_main_option(\"sqlalchemy.url\")\n context.configure(url=url)\n\n with context.begin_transaction():\n context.run_migrations()\n\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n \"\"\"\n # FIX for Postgres updates\n url = config.get_section(config.config_ini_section).get(\"sqlalchemy.url\")\n driver = url.split(\":\")[0]\n\n if driver == \"postgresql+psycopg2\":\n engine = engine_from_config(\n config.get_section(config.config_ini_section),\n prefix='sqlalchemy.',\n isolation_level=\"AUTOCOMMIT\",\n poolclass=pool.NullPool)\n else:\n engine = engine_from_config(\n config.get_section(config.config_ini_section),\n prefix='sqlalchemy.',\n poolclass=pool.NullPool)\n\n connection = engine.connect()\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n compare_type=True\n )\n\n try:\n with context.begin_transaction():\n context.run_migrations()\n finally:\n connection.close()\n\nif context.is_offline_mode():\n print(\"Running offline\")\n run_migrations_offline()\nelse:\n print(\"Running online\")\n run_migrations_online()\n\n", "path": "migrations/env.py"}], "after_files": [{"content": "from __future__ import with_statement\nfrom alembic import context\nfrom sqlalchemy import engine_from_config, pool\nfrom sqlalchemy.engine.url import make_url\nfrom logging.config import fileConfig\nfrom six.moves.urllib.parse import quote\n\n# this is the Alembic Config object, which provides\n# access to the values within the .ini file in use.\n\nconfig = context.config\n\n# Interpret the config file for Python logging.\n# This line sets up loggers basically.\nfileConfig(config.config_file_name)\n\n# add your model's MetaData object here\n# for 'autogenerate' support\n# from myapp import mymodel\n# target_metadata = mymodel.Base.metadata\nfrom flask import current_app\n\n\ndef set_database_url(config):\n url = current_app.config.get('SQLALCHEMY_DATABASE_URI')\n try:\n # In case of MySQL, add ``charset=utf8`` to the parameters (if no charset is set),\n # because this is what Flask-SQLAlchemy does\n if url.startswith(\"mysql\"):\n parsed_url = make_url(url)\n parsed_url.query.setdefault(\"charset\", \"utf8\")\n # We need to quote the password in case it contains special chars\n parsed_url.password = quote(parsed_url.password)\n url = str(parsed_url)\n except Exception as exx:\n print(u\"Attempted to set charset=utf8 on connection, but failed: {}\".format(exx))\n # set_main_option() requires escaped \"%\" signs in the string\n config.set_main_option('sqlalchemy.url', url.replace('%', '%%'))\n\n\nset_database_url(config)\ntarget_metadata = current_app.extensions['migrate'].db.metadata\n\n# other values from the config, defined by the needs of env.py,\n# can be acquired:\n# my_important_option = config.get_main_option(\"my_important_option\")\n# ... etc.\n\n\ndef run_migrations_offline():\n \"\"\"Run migrations in 'offline' mode.\n\n This configures the context with just a URL\n and not an Engine, though an Engine is acceptable\n here as well. By skipping the Engine creation\n we don't even need a DBAPI to be available.\n\n Calls to context.execute() here emit the given string to the\n script output.\n\n \"\"\"\n url = config.get_main_option(\"sqlalchemy.url\")\n context.configure(url=url)\n\n with context.begin_transaction():\n context.run_migrations()\n\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n \"\"\"\n # FIX for Postgres updates\n url = config.get_section(config.config_ini_section).get(\"sqlalchemy.url\")\n driver = url.split(\":\")[0]\n\n if driver == \"postgresql+psycopg2\":\n engine = engine_from_config(\n config.get_section(config.config_ini_section),\n prefix='sqlalchemy.',\n isolation_level=\"AUTOCOMMIT\",\n poolclass=pool.NullPool)\n else:\n engine = engine_from_config(\n config.get_section(config.config_ini_section),\n prefix='sqlalchemy.',\n poolclass=pool.NullPool)\n\n connection = engine.connect()\n context.configure(\n connection=connection,\n target_metadata=target_metadata,\n compare_type=True\n )\n\n try:\n with context.begin_transaction():\n context.run_migrations()\n finally:\n connection.close()\n\n\nif context.is_offline_mode():\n print(\"Running offline\")\n run_migrations_offline()\nelse:\n print(\"Running online\")\n run_migrations_online()\n", "path": "migrations/env.py"}]}
| 1,277 | 313 |
gh_patches_debug_11387
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-592
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Uvicorn via gunicorn worker doesn't respect `--forwarded-allow-ips`
I use uvicorn in docker as uvicorn-worker for gunicorn for my fastapi app. My application needs to know the real client IP of each request, so I use proxy-server with the `X-Forwarded-For` header.
Gunicorn has a special option to change proxy-ip to real-ip, so I running gunicorn like this:
```
gunicorn \
ppm_telegram_bot.api:app \
--forwarded-allow-ips="*"
--worker-class=uvicorn.workers.UvicornWorker \
--bind=0.0.0.0:$PORT
```
Because I'm in a container, my WSGI/ASGI server receives requests not from the localhost, but from the docker network.
But uvicorn-worker doesn't respect gunicorn's `forwarded-allow-ips`, so in `ProxyHeadersMiddleware.trusted_hosts` I receive default `127.0.0.1` and proxy-ip instead of real-ip.
https://github.com/encode/uvicorn/blob/9d9f8820a8155e36dcb5e4d4023f470e51aa4e03/uvicorn/middleware/proxy_headers.py#L14-L17
It looks like uvicorn-worker can forward this information to config via `config_kwargs`: https://github.com/encode/uvicorn/blob/9d9f8820a8155e36dcb5e4d4023f470e51aa4e03/uvicorn/workers.py#L28-L35
I could do PR with this change, if required 🙌
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/workers.py`
Content:
```
1 import asyncio
2 import logging
3
4 from gunicorn.workers.base import Worker
5 from uvicorn.config import Config
6 from uvicorn.main import Server
7
8
9 class UvicornWorker(Worker):
10 """
11 A worker class for Gunicorn that interfaces with an ASGI consumer callable,
12 rather than a WSGI callable.
13 """
14
15 CONFIG_KWARGS = {"loop": "uvloop", "http": "httptools"}
16
17 def __init__(self, *args, **kwargs):
18 super(UvicornWorker, self).__init__(*args, **kwargs)
19
20 logger = logging.getLogger("uvicorn.error")
21 logger.handlers = self.log.error_log.handlers
22 logger.setLevel(self.log.error_log.level)
23
24 logger = logging.getLogger("uvicorn.access")
25 logger.handlers = self.log.access_log.handlers
26 logger.setLevel(self.log.access_log.level)
27
28 config_kwargs = {
29 "app": None,
30 "log_config": None,
31 "timeout_keep_alive": self.cfg.keepalive,
32 "timeout_notify": self.timeout,
33 "callback_notify": self.callback_notify,
34 "limit_max_requests": self.max_requests,
35 }
36
37 if self.cfg.is_ssl:
38 ssl_kwargs = {
39 "ssl_keyfile": self.cfg.ssl_options.get("keyfile"),
40 "ssl_certfile": self.cfg.ssl_options.get("certfile"),
41 "ssl_version": self.cfg.ssl_options.get("ssl_version"),
42 "ssl_cert_reqs": self.cfg.ssl_options.get("cert_reqs"),
43 "ssl_ca_certs": self.cfg.ssl_options.get("ca_certs"),
44 "ssl_ciphers": self.cfg.ssl_options.get("ciphers"),
45 }
46 config_kwargs.update(ssl_kwargs)
47
48 if self.cfg.settings["backlog"].value:
49 config_kwargs["backlog"] = self.cfg.settings["backlog"].value
50
51 config_kwargs.update(self.CONFIG_KWARGS)
52
53 self.config = Config(**config_kwargs)
54
55 def init_process(self):
56 self.config.setup_event_loop()
57 super(UvicornWorker, self).init_process()
58
59 def init_signals(self):
60 pass
61
62 def run(self):
63 self.config.app = self.wsgi
64 server = Server(config=self.config)
65 loop = asyncio.get_event_loop()
66 loop.run_until_complete(server.serve(sockets=self.sockets))
67
68 async def callback_notify(self):
69 self.notify()
70
71
72 class UvicornH11Worker(UvicornWorker):
73 CONFIG_KWARGS = {"loop": "asyncio", "http": "h11"}
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/workers.py b/uvicorn/workers.py
--- a/uvicorn/workers.py
+++ b/uvicorn/workers.py
@@ -2,6 +2,7 @@
import logging
from gunicorn.workers.base import Worker
+
from uvicorn.config import Config
from uvicorn.main import Server
@@ -32,6 +33,7 @@
"timeout_notify": self.timeout,
"callback_notify": self.callback_notify,
"limit_max_requests": self.max_requests,
+ "forwarded_allow_ips": self.cfg.forwarded_allow_ips,
}
if self.cfg.is_ssl:
|
{"golden_diff": "diff --git a/uvicorn/workers.py b/uvicorn/workers.py\n--- a/uvicorn/workers.py\n+++ b/uvicorn/workers.py\n@@ -2,6 +2,7 @@\n import logging\n \n from gunicorn.workers.base import Worker\n+\n from uvicorn.config import Config\n from uvicorn.main import Server\n \n@@ -32,6 +33,7 @@\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n+ \"forwarded_allow_ips\": self.cfg.forwarded_allow_ips,\n }\n \n if self.cfg.is_ssl:\n", "issue": "Uvicorn via gunicorn worker doesn't respect `--forwarded-allow-ips`\nI use uvicorn in docker as uvicorn-worker for gunicorn for my fastapi app. My application needs to know the real client IP of each request, so I use proxy-server with the `X-Forwarded-For` header.\r\n\r\nGunicorn has a special option to change proxy-ip to real-ip, so I running gunicorn like this:\r\n```\r\ngunicorn \\\r\n ppm_telegram_bot.api:app \\\r\n --forwarded-allow-ips=\"*\" \r\n --worker-class=uvicorn.workers.UvicornWorker \\\r\n --bind=0.0.0.0:$PORT\r\n```\r\n\r\nBecause I'm in a container, my WSGI/ASGI server receives requests not from the localhost, but from the docker network.\r\n\r\nBut uvicorn-worker doesn't respect gunicorn's `forwarded-allow-ips`, so in `ProxyHeadersMiddleware.trusted_hosts` I receive default `127.0.0.1` and proxy-ip instead of real-ip.\r\nhttps://github.com/encode/uvicorn/blob/9d9f8820a8155e36dcb5e4d4023f470e51aa4e03/uvicorn/middleware/proxy_headers.py#L14-L17\r\n\r\nIt looks like uvicorn-worker can forward this information to config via `config_kwargs`: https://github.com/encode/uvicorn/blob/9d9f8820a8155e36dcb5e4d4023f470e51aa4e03/uvicorn/workers.py#L28-L35\r\n\r\nI could do PR with this change, if required \ud83d\ude4c \n", "before_files": [{"content": "import asyncio\nimport logging\n\nfrom gunicorn.workers.base import Worker\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"uvloop\", \"http\": \"httptools\"}\n\n def __init__(self, *args, **kwargs):\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n logger = logging.getLogger(\"uvicorn.error\")\n logger.handlers = self.log.error_log.handlers\n logger.setLevel(self.log.error_log.level)\n\n logger = logging.getLogger(\"uvicorn.access\")\n logger.handlers = self.log.access_log.handlers\n logger.setLevel(self.log.access_log.level)\n\n config_kwargs = {\n \"app\": None,\n \"log_config\": None,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n if self.cfg.settings[\"backlog\"].value:\n config_kwargs[\"backlog\"] = self.cfg.settings[\"backlog\"].value\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self):\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self):\n pass\n\n def run(self):\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n\n async def callback_notify(self):\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}], "after_files": [{"content": "import asyncio\nimport logging\n\nfrom gunicorn.workers.base import Worker\n\nfrom uvicorn.config import Config\nfrom uvicorn.main import Server\n\n\nclass UvicornWorker(Worker):\n \"\"\"\n A worker class for Gunicorn that interfaces with an ASGI consumer callable,\n rather than a WSGI callable.\n \"\"\"\n\n CONFIG_KWARGS = {\"loop\": \"uvloop\", \"http\": \"httptools\"}\n\n def __init__(self, *args, **kwargs):\n super(UvicornWorker, self).__init__(*args, **kwargs)\n\n logger = logging.getLogger(\"uvicorn.error\")\n logger.handlers = self.log.error_log.handlers\n logger.setLevel(self.log.error_log.level)\n\n logger = logging.getLogger(\"uvicorn.access\")\n logger.handlers = self.log.access_log.handlers\n logger.setLevel(self.log.access_log.level)\n\n config_kwargs = {\n \"app\": None,\n \"log_config\": None,\n \"timeout_keep_alive\": self.cfg.keepalive,\n \"timeout_notify\": self.timeout,\n \"callback_notify\": self.callback_notify,\n \"limit_max_requests\": self.max_requests,\n \"forwarded_allow_ips\": self.cfg.forwarded_allow_ips,\n }\n\n if self.cfg.is_ssl:\n ssl_kwargs = {\n \"ssl_keyfile\": self.cfg.ssl_options.get(\"keyfile\"),\n \"ssl_certfile\": self.cfg.ssl_options.get(\"certfile\"),\n \"ssl_version\": self.cfg.ssl_options.get(\"ssl_version\"),\n \"ssl_cert_reqs\": self.cfg.ssl_options.get(\"cert_reqs\"),\n \"ssl_ca_certs\": self.cfg.ssl_options.get(\"ca_certs\"),\n \"ssl_ciphers\": self.cfg.ssl_options.get(\"ciphers\"),\n }\n config_kwargs.update(ssl_kwargs)\n\n if self.cfg.settings[\"backlog\"].value:\n config_kwargs[\"backlog\"] = self.cfg.settings[\"backlog\"].value\n\n config_kwargs.update(self.CONFIG_KWARGS)\n\n self.config = Config(**config_kwargs)\n\n def init_process(self):\n self.config.setup_event_loop()\n super(UvicornWorker, self).init_process()\n\n def init_signals(self):\n pass\n\n def run(self):\n self.config.app = self.wsgi\n server = Server(config=self.config)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(server.serve(sockets=self.sockets))\n\n async def callback_notify(self):\n self.notify()\n\n\nclass UvicornH11Worker(UvicornWorker):\n CONFIG_KWARGS = {\"loop\": \"asyncio\", \"http\": \"h11\"}\n", "path": "uvicorn/workers.py"}]}
| 1,316 | 137 |
gh_patches_debug_1288
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-555
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Version Bump in conf.py?
https://github.com/archlinux/archinstall/blob/a4033a7d3a94916f2b4972d212f9d0069fca39cd/docs/conf.py#L44
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 import os
2 import re
3 import sys
4
5 sys.path.insert(0, os.path.abspath('..'))
6
7
8 def process_docstring(app, what, name, obj, options, lines):
9 spaces_pat = re.compile(r"( {8})")
10 ll = []
11 for line in lines:
12 ll.append(spaces_pat.sub(" ", line))
13 lines[:] = ll
14
15
16 def setup(app):
17 app.connect('autodoc-process-docstring', process_docstring)
18
19
20 # Configuration file for the Sphinx documentation builder.
21 #
22 # This file only contains a selection of the most common options. For a full
23 # list see the documentation:
24 # https://www.sphinx-doc.org/en/master/usage/configuration.html
25
26 # -- Path setup --------------------------------------------------------------
27
28 # If extensions (or modules to document with autodoc) are in another directory,
29 # add these directories to sys.path here. If the directory is relative to the
30 # documentation root, use os.path.abspath to make it absolute, like shown here.
31 #
32 # import os
33 # import sys
34 # sys.path.insert(0, os.path.abspath('.'))
35
36
37 # -- Project information -----------------------------------------------------
38
39 project = 'python-archinstall'
40 copyright = '2020, Anton Hvornum'
41 author = 'Anton Hvornum'
42
43 # The full version, including alpha/beta/rc tags
44 release = 'v2.1.0'
45
46 # -- General configuration ---------------------------------------------------
47
48 master_doc = 'index'
49 # Add any Sphinx extension module names here, as strings. They can be
50 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
51 # ones.
52 extensions = [
53 'sphinx.ext.autodoc',
54 'sphinx.ext.inheritance_diagram',
55 'sphinx.ext.todo'
56 ]
57
58 # Add any paths that contain templates here, relative to this directory.
59 templates_path = ['_templates']
60
61 # List of patterns, relative to source directory, that match files and
62 # directories to ignore when looking for source files.
63 # This pattern also affects html_static_path and html_extra_path.
64 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
65
66 # -- Options for HTML output -------------------------------------------------
67
68 # The theme to use for HTML and HTML Help pages. See the documentation for
69 # a list of builtin themes.
70 #
71 # html_theme = 'alabaster'
72 html_theme = 'sphinx_rtd_theme'
73
74 html_logo = "_static/logo.png"
75
76 # Add any paths that contain custom static files (such as style sheets) here,
77 # relative to this directory. They are copied after the builtin static files,
78 # so a file named "default.css" will overwrite the builtin "default.css".
79 html_static_path = ['_static']
80
81 # If false, no module index is generated.
82 html_domain_indices = True
83
84 # If false, no index is generated.
85 html_use_index = True
86
87 # If true, the index is split into individual pages for each letter.
88 html_split_index = True
89
90 # If true, links to the reST sources are added to the pages.
91 html_show_sourcelink = False
92
93 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
94 # html_show_sphinx = True
95
96 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
97 # html_show_copyright = True
98
99 # If true, an OpenSearch description file will be output, and all pages will
100 # contain a <link> tag referring to it. The value of this option must be the
101 # base URL from which the finished HTML is served.
102 # html_use_opensearch = ''
103
104 # This is the file name suffix for HTML files (e.g. ".xhtml").
105 # html_file_suffix = None
106
107 # Output file base name for HTML help builder.
108 htmlhelp_basename = 'archinstalldoc'
109
110 # -- Options for manual page output --------------------------------------------
111
112 # One entry per manual page. List of tuples
113 # (source start file, name, description, authors, manual section).
114 man_pages = [("index", "archinstall", u"archinstall Documentation", [u"Anton Hvornum"], 1)]
115
116 # If true, show URL addresses after external links.
117 # man_show_urls = False
118
119
120 # -- Options for Texinfo output ------------------------------------------------
121
122 # Grouping the document tree into Texinfo files. List of tuples
123 # (source start file, target name, title, author,
124 # dir menu entry, description, category)
125 texinfo_documents = [
126 ("index", "archinstall", u"archinstall Documentation", u"Anton Hvornum", "archinstall", "Simple and minimal HTTP server."),
127 ]
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -41,7 +41,7 @@
author = 'Anton Hvornum'
# The full version, including alpha/beta/rc tags
-release = 'v2.1.0'
+release = 'v2.3.0.dev0'
# -- General configuration ---------------------------------------------------
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -41,7 +41,7 @@\n author = 'Anton Hvornum'\n \n # The full version, including alpha/beta/rc tags\n-release = 'v2.1.0'\n+release = 'v2.3.0.dev0'\n \n # -- General configuration ---------------------------------------------------\n", "issue": "Version Bump in conf.py?\nhttps://github.com/archlinux/archinstall/blob/a4033a7d3a94916f2b4972d212f9d0069fca39cd/docs/conf.py#L44\n", "before_files": [{"content": "import os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n\ndef process_docstring(app, what, name, obj, options, lines):\n\tspaces_pat = re.compile(r\"( {8})\")\n\tll = []\n\tfor line in lines:\n\t\tll.append(spaces_pat.sub(\" \", line))\n\tlines[:] = ll\n\n\ndef setup(app):\n\tapp.connect('autodoc-process-docstring', process_docstring)\n\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'python-archinstall'\ncopyright = '2020, Anton Hvornum'\nauthor = 'Anton Hvornum'\n\n# The full version, including alpha/beta/rc tags\nrelease = 'v2.1.0'\n\n# -- General configuration ---------------------------------------------------\n\nmaster_doc = 'index'\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n\t'sphinx.ext.autodoc',\n\t'sphinx.ext.inheritance_diagram',\n\t'sphinx.ext.todo'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_logo = \"_static/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\nhtml_split_index = True\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'archinstalldoc'\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"archinstall\", u\"archinstall Documentation\", [u\"Anton Hvornum\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n\t(\"index\", \"archinstall\", u\"archinstall Documentation\", u\"Anton Hvornum\", \"archinstall\", \"Simple and minimal HTTP server.\"),\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "import os\nimport re\nimport sys\n\nsys.path.insert(0, os.path.abspath('..'))\n\n\ndef process_docstring(app, what, name, obj, options, lines):\n\tspaces_pat = re.compile(r\"( {8})\")\n\tll = []\n\tfor line in lines:\n\t\tll.append(spaces_pat.sub(\" \", line))\n\tlines[:] = ll\n\n\ndef setup(app):\n\tapp.connect('autodoc-process-docstring', process_docstring)\n\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'python-archinstall'\ncopyright = '2020, Anton Hvornum'\nauthor = 'Anton Hvornum'\n\n# The full version, including alpha/beta/rc tags\nrelease = 'v2.3.0.dev0'\n\n# -- General configuration ---------------------------------------------------\n\nmaster_doc = 'index'\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n\t'sphinx.ext.autodoc',\n\t'sphinx.ext.inheritance_diagram',\n\t'sphinx.ext.todo'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_logo = \"_static/logo.png\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = True\n\n# If true, the index is split into individual pages for each letter.\nhtml_split_index = True\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'archinstalldoc'\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"archinstall\", u\"archinstall Documentation\", [u\"Anton Hvornum\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n\t(\"index\", \"archinstall\", u\"archinstall Documentation\", u\"Anton Hvornum\", \"archinstall\", \"Simple and minimal HTTP server.\"),\n]\n", "path": "docs/conf.py"}]}
| 1,589 | 88 |
gh_patches_debug_27326
|
rasdani/github-patches
|
git_diff
|
huggingface__dataset-viewer-207
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
regression: fallback if streaming fails is disabled
Causes https://github.com/huggingface/datasets/issues/3185 for example: the fallback should have loaded the dataset in normal mode.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/datasets_preview_backend/config.py`
Content:
```
1 import os
2
3 from dotenv import load_dotenv
4
5 from datasets_preview_backend.constants import (
6 DEFAULT_APP_HOSTNAME,
7 DEFAULT_APP_PORT,
8 DEFAULT_ASSETS_DIRECTORY,
9 DEFAULT_DATASETS_ENABLE_PRIVATE,
10 DEFAULT_DATASETS_REVISION,
11 DEFAULT_HF_TOKEN,
12 DEFAULT_LOG_LEVEL,
13 DEFAULT_MAX_AGE_LONG_SECONDS,
14 DEFAULT_MAX_AGE_SHORT_SECONDS,
15 DEFAULT_MONGO_CACHE_DATABASE,
16 DEFAULT_MONGO_QUEUE_DATABASE,
17 DEFAULT_MONGO_URL,
18 DEFAULT_ROWS_MAX_BYTES,
19 DEFAULT_ROWS_MAX_NUMBER,
20 DEFAULT_ROWS_MIN_NUMBER,
21 DEFAULT_WEB_CONCURRENCY,
22 )
23 from datasets_preview_backend.utils import (
24 get_bool_value,
25 get_int_value,
26 get_str_or_none_value,
27 get_str_value,
28 )
29
30 # Load environment variables defined in .env, if any
31 load_dotenv()
32
33 APP_HOSTNAME = get_str_value(d=os.environ, key="APP_HOSTNAME", default=DEFAULT_APP_HOSTNAME)
34 APP_PORT = get_int_value(d=os.environ, key="APP_PORT", default=DEFAULT_APP_PORT)
35 ASSETS_DIRECTORY = get_str_or_none_value(d=os.environ, key="ASSETS_DIRECTORY", default=DEFAULT_ASSETS_DIRECTORY)
36 DATASETS_ENABLE_PRIVATE = get_bool_value(
37 d=os.environ, key="DATASETS_ENABLE_PRIVATE", default=DEFAULT_DATASETS_ENABLE_PRIVATE
38 )
39 DATASETS_REVISION = get_str_value(d=os.environ, key="DATASETS_REVISION", default=DEFAULT_DATASETS_REVISION)
40 HF_TOKEN = get_str_or_none_value(d=os.environ, key="HF_TOKEN", default=DEFAULT_HF_TOKEN)
41 LOG_LEVEL = get_str_value(d=os.environ, key="LOG_LEVEL", default=DEFAULT_LOG_LEVEL)
42 MAX_AGE_LONG_SECONDS = get_int_value(d=os.environ, key="MAX_AGE_LONG_SECONDS", default=DEFAULT_MAX_AGE_LONG_SECONDS)
43 MAX_AGE_SHORT_SECONDS = get_int_value(d=os.environ, key="MAX_AGE_SHORT_SECONDS", default=DEFAULT_MAX_AGE_SHORT_SECONDS)
44 MONGO_CACHE_DATABASE = get_str_value(d=os.environ, key="MONGO_CACHE_DATABASE", default=DEFAULT_MONGO_CACHE_DATABASE)
45 MONGO_QUEUE_DATABASE = get_str_value(d=os.environ, key="MONGO_QUEUE_DATABASE", default=DEFAULT_MONGO_QUEUE_DATABASE)
46 MONGO_URL = get_str_value(d=os.environ, key="MONGO_URL", default=DEFAULT_MONGO_URL)
47 WEB_CONCURRENCY = get_int_value(d=os.environ, key="WEB_CONCURRENCY", default=DEFAULT_WEB_CONCURRENCY)
48
49 # Ensure datasets library uses the expected revision for canonical datasets
50 os.environ["HF_SCRIPTS_VERSION"] = DATASETS_REVISION
51
52 # for tests - to be removed
53 ROWS_MAX_BYTES = get_int_value(d=os.environ, key="ROWS_MAX_BYTES", default=DEFAULT_ROWS_MAX_BYTES)
54 ROWS_MAX_NUMBER = get_int_value(d=os.environ, key="ROWS_MAX_NUMBER", default=DEFAULT_ROWS_MAX_NUMBER)
55 ROWS_MIN_NUMBER = get_int_value(d=os.environ, key="ROWS_MIN_NUMBER", default=DEFAULT_ROWS_MIN_NUMBER)
56
```
Path: `src/datasets_preview_backend/models/row.py`
Content:
```
1 import itertools
2 import logging
3 from typing import Any, Dict, List, Optional
4
5 from datasets import Dataset, DownloadMode, IterableDataset, load_dataset
6
7 from datasets_preview_backend.constants import DEFAULT_ROWS_MAX_NUMBER
8 from datasets_preview_backend.utils import retry
9
10 logger = logging.getLogger(__name__)
11
12
13 Row = Dict[str, Any]
14
15
16 @retry(logger=logger)
17 def get_rows(
18 dataset_name: str,
19 config_name: str,
20 split_name: str,
21 hf_token: Optional[str] = None,
22 streaming: bool = True,
23 rows_max_number: Optional[int] = None,
24 ) -> List[Row]:
25 if rows_max_number is None:
26 rows_max_number = DEFAULT_ROWS_MAX_NUMBER
27 dataset = load_dataset(
28 dataset_name,
29 name=config_name,
30 split=split_name,
31 streaming=True,
32 download_mode=DownloadMode.FORCE_REDOWNLOAD,
33 use_auth_token=hf_token,
34 )
35 if streaming:
36 if not isinstance(dataset, IterableDataset):
37 raise TypeError("load_dataset should return an IterableDataset")
38 elif not isinstance(dataset, Dataset):
39 raise TypeError("load_dataset should return a Dataset")
40 rows_plus_one = list(itertools.islice(dataset, rows_max_number + 1))
41 # ^^ to be able to detect if a split has exactly ROWS_MAX_NUMBER rows
42 if len(rows_plus_one) <= rows_max_number:
43 logger.debug(f"all the rows in the split have been fetched ({len(rows_plus_one)})")
44 else:
45 logger.debug(f"the rows in the split have been truncated ({rows_max_number} rows)")
46 return rows_plus_one[:rows_max_number]
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/datasets_preview_backend/config.py b/src/datasets_preview_backend/config.py
--- a/src/datasets_preview_backend/config.py
+++ b/src/datasets_preview_backend/config.py
@@ -12,6 +12,7 @@
DEFAULT_LOG_LEVEL,
DEFAULT_MAX_AGE_LONG_SECONDS,
DEFAULT_MAX_AGE_SHORT_SECONDS,
+ DEFAULT_MAX_SIZE_FALLBACK,
DEFAULT_MONGO_CACHE_DATABASE,
DEFAULT_MONGO_QUEUE_DATABASE,
DEFAULT_MONGO_URL,
@@ -50,6 +51,7 @@
os.environ["HF_SCRIPTS_VERSION"] = DATASETS_REVISION
# for tests - to be removed
+MAX_SIZE_FALLBACK = get_int_value(os.environ, "MAX_SIZE_FALLBACK", DEFAULT_MAX_SIZE_FALLBACK)
ROWS_MAX_BYTES = get_int_value(d=os.environ, key="ROWS_MAX_BYTES", default=DEFAULT_ROWS_MAX_BYTES)
ROWS_MAX_NUMBER = get_int_value(d=os.environ, key="ROWS_MAX_NUMBER", default=DEFAULT_ROWS_MAX_NUMBER)
ROWS_MIN_NUMBER = get_int_value(d=os.environ, key="ROWS_MIN_NUMBER", default=DEFAULT_ROWS_MIN_NUMBER)
diff --git a/src/datasets_preview_backend/models/row.py b/src/datasets_preview_backend/models/row.py
--- a/src/datasets_preview_backend/models/row.py
+++ b/src/datasets_preview_backend/models/row.py
@@ -28,7 +28,7 @@
dataset_name,
name=config_name,
split=split_name,
- streaming=True,
+ streaming=streaming,
download_mode=DownloadMode.FORCE_REDOWNLOAD,
use_auth_token=hf_token,
)
|
{"golden_diff": "diff --git a/src/datasets_preview_backend/config.py b/src/datasets_preview_backend/config.py\n--- a/src/datasets_preview_backend/config.py\n+++ b/src/datasets_preview_backend/config.py\n@@ -12,6 +12,7 @@\n DEFAULT_LOG_LEVEL,\n DEFAULT_MAX_AGE_LONG_SECONDS,\n DEFAULT_MAX_AGE_SHORT_SECONDS,\n+ DEFAULT_MAX_SIZE_FALLBACK,\n DEFAULT_MONGO_CACHE_DATABASE,\n DEFAULT_MONGO_QUEUE_DATABASE,\n DEFAULT_MONGO_URL,\n@@ -50,6 +51,7 @@\n os.environ[\"HF_SCRIPTS_VERSION\"] = DATASETS_REVISION\n \n # for tests - to be removed\n+MAX_SIZE_FALLBACK = get_int_value(os.environ, \"MAX_SIZE_FALLBACK\", DEFAULT_MAX_SIZE_FALLBACK)\n ROWS_MAX_BYTES = get_int_value(d=os.environ, key=\"ROWS_MAX_BYTES\", default=DEFAULT_ROWS_MAX_BYTES)\n ROWS_MAX_NUMBER = get_int_value(d=os.environ, key=\"ROWS_MAX_NUMBER\", default=DEFAULT_ROWS_MAX_NUMBER)\n ROWS_MIN_NUMBER = get_int_value(d=os.environ, key=\"ROWS_MIN_NUMBER\", default=DEFAULT_ROWS_MIN_NUMBER)\ndiff --git a/src/datasets_preview_backend/models/row.py b/src/datasets_preview_backend/models/row.py\n--- a/src/datasets_preview_backend/models/row.py\n+++ b/src/datasets_preview_backend/models/row.py\n@@ -28,7 +28,7 @@\n dataset_name,\n name=config_name,\n split=split_name,\n- streaming=True,\n+ streaming=streaming,\n download_mode=DownloadMode.FORCE_REDOWNLOAD,\n use_auth_token=hf_token,\n )\n", "issue": "regression: fallback if streaming fails is disabled\nCauses https://github.com/huggingface/datasets/issues/3185 for example: the fallback should have loaded the dataset in normal mode.\n", "before_files": [{"content": "import os\n\nfrom dotenv import load_dotenv\n\nfrom datasets_preview_backend.constants import (\n DEFAULT_APP_HOSTNAME,\n DEFAULT_APP_PORT,\n DEFAULT_ASSETS_DIRECTORY,\n DEFAULT_DATASETS_ENABLE_PRIVATE,\n DEFAULT_DATASETS_REVISION,\n DEFAULT_HF_TOKEN,\n DEFAULT_LOG_LEVEL,\n DEFAULT_MAX_AGE_LONG_SECONDS,\n DEFAULT_MAX_AGE_SHORT_SECONDS,\n DEFAULT_MONGO_CACHE_DATABASE,\n DEFAULT_MONGO_QUEUE_DATABASE,\n DEFAULT_MONGO_URL,\n DEFAULT_ROWS_MAX_BYTES,\n DEFAULT_ROWS_MAX_NUMBER,\n DEFAULT_ROWS_MIN_NUMBER,\n DEFAULT_WEB_CONCURRENCY,\n)\nfrom datasets_preview_backend.utils import (\n get_bool_value,\n get_int_value,\n get_str_or_none_value,\n get_str_value,\n)\n\n# Load environment variables defined in .env, if any\nload_dotenv()\n\nAPP_HOSTNAME = get_str_value(d=os.environ, key=\"APP_HOSTNAME\", default=DEFAULT_APP_HOSTNAME)\nAPP_PORT = get_int_value(d=os.environ, key=\"APP_PORT\", default=DEFAULT_APP_PORT)\nASSETS_DIRECTORY = get_str_or_none_value(d=os.environ, key=\"ASSETS_DIRECTORY\", default=DEFAULT_ASSETS_DIRECTORY)\nDATASETS_ENABLE_PRIVATE = get_bool_value(\n d=os.environ, key=\"DATASETS_ENABLE_PRIVATE\", default=DEFAULT_DATASETS_ENABLE_PRIVATE\n)\nDATASETS_REVISION = get_str_value(d=os.environ, key=\"DATASETS_REVISION\", default=DEFAULT_DATASETS_REVISION)\nHF_TOKEN = get_str_or_none_value(d=os.environ, key=\"HF_TOKEN\", default=DEFAULT_HF_TOKEN)\nLOG_LEVEL = get_str_value(d=os.environ, key=\"LOG_LEVEL\", default=DEFAULT_LOG_LEVEL)\nMAX_AGE_LONG_SECONDS = get_int_value(d=os.environ, key=\"MAX_AGE_LONG_SECONDS\", default=DEFAULT_MAX_AGE_LONG_SECONDS)\nMAX_AGE_SHORT_SECONDS = get_int_value(d=os.environ, key=\"MAX_AGE_SHORT_SECONDS\", default=DEFAULT_MAX_AGE_SHORT_SECONDS)\nMONGO_CACHE_DATABASE = get_str_value(d=os.environ, key=\"MONGO_CACHE_DATABASE\", default=DEFAULT_MONGO_CACHE_DATABASE)\nMONGO_QUEUE_DATABASE = get_str_value(d=os.environ, key=\"MONGO_QUEUE_DATABASE\", default=DEFAULT_MONGO_QUEUE_DATABASE)\nMONGO_URL = get_str_value(d=os.environ, key=\"MONGO_URL\", default=DEFAULT_MONGO_URL)\nWEB_CONCURRENCY = get_int_value(d=os.environ, key=\"WEB_CONCURRENCY\", default=DEFAULT_WEB_CONCURRENCY)\n\n# Ensure datasets library uses the expected revision for canonical datasets\nos.environ[\"HF_SCRIPTS_VERSION\"] = DATASETS_REVISION\n\n# for tests - to be removed\nROWS_MAX_BYTES = get_int_value(d=os.environ, key=\"ROWS_MAX_BYTES\", default=DEFAULT_ROWS_MAX_BYTES)\nROWS_MAX_NUMBER = get_int_value(d=os.environ, key=\"ROWS_MAX_NUMBER\", default=DEFAULT_ROWS_MAX_NUMBER)\nROWS_MIN_NUMBER = get_int_value(d=os.environ, key=\"ROWS_MIN_NUMBER\", default=DEFAULT_ROWS_MIN_NUMBER)\n", "path": "src/datasets_preview_backend/config.py"}, {"content": "import itertools\nimport logging\nfrom typing import Any, Dict, List, Optional\n\nfrom datasets import Dataset, DownloadMode, IterableDataset, load_dataset\n\nfrom datasets_preview_backend.constants import DEFAULT_ROWS_MAX_NUMBER\nfrom datasets_preview_backend.utils import retry\n\nlogger = logging.getLogger(__name__)\n\n\nRow = Dict[str, Any]\n\n\n@retry(logger=logger)\ndef get_rows(\n dataset_name: str,\n config_name: str,\n split_name: str,\n hf_token: Optional[str] = None,\n streaming: bool = True,\n rows_max_number: Optional[int] = None,\n) -> List[Row]:\n if rows_max_number is None:\n rows_max_number = DEFAULT_ROWS_MAX_NUMBER\n dataset = load_dataset(\n dataset_name,\n name=config_name,\n split=split_name,\n streaming=True,\n download_mode=DownloadMode.FORCE_REDOWNLOAD,\n use_auth_token=hf_token,\n )\n if streaming:\n if not isinstance(dataset, IterableDataset):\n raise TypeError(\"load_dataset should return an IterableDataset\")\n elif not isinstance(dataset, Dataset):\n raise TypeError(\"load_dataset should return a Dataset\")\n rows_plus_one = list(itertools.islice(dataset, rows_max_number + 1))\n # ^^ to be able to detect if a split has exactly ROWS_MAX_NUMBER rows\n if len(rows_plus_one) <= rows_max_number:\n logger.debug(f\"all the rows in the split have been fetched ({len(rows_plus_one)})\")\n else:\n logger.debug(f\"the rows in the split have been truncated ({rows_max_number} rows)\")\n return rows_plus_one[:rows_max_number]\n", "path": "src/datasets_preview_backend/models/row.py"}], "after_files": [{"content": "import os\n\nfrom dotenv import load_dotenv\n\nfrom datasets_preview_backend.constants import (\n DEFAULT_APP_HOSTNAME,\n DEFAULT_APP_PORT,\n DEFAULT_ASSETS_DIRECTORY,\n DEFAULT_DATASETS_ENABLE_PRIVATE,\n DEFAULT_DATASETS_REVISION,\n DEFAULT_HF_TOKEN,\n DEFAULT_LOG_LEVEL,\n DEFAULT_MAX_AGE_LONG_SECONDS,\n DEFAULT_MAX_AGE_SHORT_SECONDS,\n DEFAULT_MAX_SIZE_FALLBACK,\n DEFAULT_MONGO_CACHE_DATABASE,\n DEFAULT_MONGO_QUEUE_DATABASE,\n DEFAULT_MONGO_URL,\n DEFAULT_ROWS_MAX_BYTES,\n DEFAULT_ROWS_MAX_NUMBER,\n DEFAULT_ROWS_MIN_NUMBER,\n DEFAULT_WEB_CONCURRENCY,\n)\nfrom datasets_preview_backend.utils import (\n get_bool_value,\n get_int_value,\n get_str_or_none_value,\n get_str_value,\n)\n\n# Load environment variables defined in .env, if any\nload_dotenv()\n\nAPP_HOSTNAME = get_str_value(d=os.environ, key=\"APP_HOSTNAME\", default=DEFAULT_APP_HOSTNAME)\nAPP_PORT = get_int_value(d=os.environ, key=\"APP_PORT\", default=DEFAULT_APP_PORT)\nASSETS_DIRECTORY = get_str_or_none_value(d=os.environ, key=\"ASSETS_DIRECTORY\", default=DEFAULT_ASSETS_DIRECTORY)\nDATASETS_ENABLE_PRIVATE = get_bool_value(\n d=os.environ, key=\"DATASETS_ENABLE_PRIVATE\", default=DEFAULT_DATASETS_ENABLE_PRIVATE\n)\nDATASETS_REVISION = get_str_value(d=os.environ, key=\"DATASETS_REVISION\", default=DEFAULT_DATASETS_REVISION)\nHF_TOKEN = get_str_or_none_value(d=os.environ, key=\"HF_TOKEN\", default=DEFAULT_HF_TOKEN)\nLOG_LEVEL = get_str_value(d=os.environ, key=\"LOG_LEVEL\", default=DEFAULT_LOG_LEVEL)\nMAX_AGE_LONG_SECONDS = get_int_value(d=os.environ, key=\"MAX_AGE_LONG_SECONDS\", default=DEFAULT_MAX_AGE_LONG_SECONDS)\nMAX_AGE_SHORT_SECONDS = get_int_value(d=os.environ, key=\"MAX_AGE_SHORT_SECONDS\", default=DEFAULT_MAX_AGE_SHORT_SECONDS)\nMONGO_CACHE_DATABASE = get_str_value(d=os.environ, key=\"MONGO_CACHE_DATABASE\", default=DEFAULT_MONGO_CACHE_DATABASE)\nMONGO_QUEUE_DATABASE = get_str_value(d=os.environ, key=\"MONGO_QUEUE_DATABASE\", default=DEFAULT_MONGO_QUEUE_DATABASE)\nMONGO_URL = get_str_value(d=os.environ, key=\"MONGO_URL\", default=DEFAULT_MONGO_URL)\nWEB_CONCURRENCY = get_int_value(d=os.environ, key=\"WEB_CONCURRENCY\", default=DEFAULT_WEB_CONCURRENCY)\n\n# Ensure datasets library uses the expected revision for canonical datasets\nos.environ[\"HF_SCRIPTS_VERSION\"] = DATASETS_REVISION\n\n# for tests - to be removed\nMAX_SIZE_FALLBACK = get_int_value(os.environ, \"MAX_SIZE_FALLBACK\", DEFAULT_MAX_SIZE_FALLBACK)\nROWS_MAX_BYTES = get_int_value(d=os.environ, key=\"ROWS_MAX_BYTES\", default=DEFAULT_ROWS_MAX_BYTES)\nROWS_MAX_NUMBER = get_int_value(d=os.environ, key=\"ROWS_MAX_NUMBER\", default=DEFAULT_ROWS_MAX_NUMBER)\nROWS_MIN_NUMBER = get_int_value(d=os.environ, key=\"ROWS_MIN_NUMBER\", default=DEFAULT_ROWS_MIN_NUMBER)\n", "path": "src/datasets_preview_backend/config.py"}, {"content": "import itertools\nimport logging\nfrom typing import Any, Dict, List, Optional\n\nfrom datasets import Dataset, DownloadMode, IterableDataset, load_dataset\n\nfrom datasets_preview_backend.constants import DEFAULT_ROWS_MAX_NUMBER\nfrom datasets_preview_backend.utils import retry\n\nlogger = logging.getLogger(__name__)\n\n\nRow = Dict[str, Any]\n\n\n@retry(logger=logger)\ndef get_rows(\n dataset_name: str,\n config_name: str,\n split_name: str,\n hf_token: Optional[str] = None,\n streaming: bool = True,\n rows_max_number: Optional[int] = None,\n) -> List[Row]:\n if rows_max_number is None:\n rows_max_number = DEFAULT_ROWS_MAX_NUMBER\n dataset = load_dataset(\n dataset_name,\n name=config_name,\n split=split_name,\n streaming=streaming,\n download_mode=DownloadMode.FORCE_REDOWNLOAD,\n use_auth_token=hf_token,\n )\n if streaming:\n if not isinstance(dataset, IterableDataset):\n raise TypeError(\"load_dataset should return an IterableDataset\")\n elif not isinstance(dataset, Dataset):\n raise TypeError(\"load_dataset should return a Dataset\")\n rows_plus_one = list(itertools.islice(dataset, rows_max_number + 1))\n # ^^ to be able to detect if a split has exactly ROWS_MAX_NUMBER rows\n if len(rows_plus_one) <= rows_max_number:\n logger.debug(f\"all the rows in the split have been fetched ({len(rows_plus_one)})\")\n else:\n logger.debug(f\"the rows in the split have been truncated ({rows_max_number} rows)\")\n return rows_plus_one[:rows_max_number]\n", "path": "src/datasets_preview_backend/models/row.py"}]}
| 1,485 | 344 |
gh_patches_debug_51276
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-3848
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lint takes a long time
Fix that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from os import getpid
16 from socket import gethostname
17 from time import time
18
19 # pylint: disable=wrong-import-position
20 from google.protobuf.timestamp_pb2 import Timestamp
21 from opencensus.proto.agent.common.v1 import common_pb2
22 from opencensus.proto.trace.v1 import trace_pb2
23
24 from opentelemetry.exporter.opencensus.version import (
25 __version__ as opencensusexporter_exporter_version,
26 )
27 from opentelemetry.trace import SpanKind
28 from opentelemetry.util._importlib_metadata import version
29
30 OPENTELEMETRY_VERSION = version("opentelemetry-api")
31
32
33 def proto_timestamp_from_time_ns(time_ns):
34 """Converts datetime to protobuf timestamp.
35
36 Args:
37 time_ns: Time in nanoseconds
38
39 Returns:
40 Returns protobuf timestamp.
41 """
42 ts = Timestamp()
43 if time_ns is not None:
44 # pylint: disable=no-member
45 ts.FromNanoseconds(time_ns)
46 return ts
47
48
49 # pylint: disable=no-member
50 def get_collector_span_kind(kind: SpanKind):
51 if kind is SpanKind.SERVER:
52 return trace_pb2.Span.SpanKind.SERVER
53 if kind is SpanKind.CLIENT:
54 return trace_pb2.Span.SpanKind.CLIENT
55 return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED
56
57
58 def add_proto_attribute_value(pb_attributes, key, value):
59 """Sets string, int, boolean or float value on protobuf
60 span, link or annotation attributes.
61
62 Args:
63 pb_attributes: protobuf Span's attributes property.
64 key: attribute key to set.
65 value: attribute value
66 """
67
68 if isinstance(value, bool):
69 pb_attributes.attribute_map[key].bool_value = value
70 elif isinstance(value, int):
71 pb_attributes.attribute_map[key].int_value = value
72 elif isinstance(value, str):
73 pb_attributes.attribute_map[key].string_value.value = value
74 elif isinstance(value, float):
75 pb_attributes.attribute_map[key].double_value = value
76 else:
77 pb_attributes.attribute_map[key].string_value.value = str(value)
78
79
80 # pylint: disable=no-member
81 def get_node(service_name, host_name):
82 """Generates Node message from params and system information.
83
84 Args:
85 service_name: Name of Collector service.
86 host_name: Host name.
87 """
88 return common_pb2.Node(
89 identifier=common_pb2.ProcessIdentifier(
90 host_name=gethostname() if host_name is None else host_name,
91 pid=getpid(),
92 start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),
93 ),
94 library_info=common_pb2.LibraryInfo(
95 language=common_pb2.LibraryInfo.Language.Value("PYTHON"),
96 exporter_version=opencensusexporter_exporter_version,
97 core_library_version=OPENTELEMETRY_VERSION,
98 ),
99 service_info=common_pb2.ServiceInfo(name=service_name),
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
--- a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
+++ b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
@@ -17,7 +17,9 @@
from time import time
# pylint: disable=wrong-import-position
-from google.protobuf.timestamp_pb2 import Timestamp
+from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module
+ Timestamp,
+)
from opencensus.proto.agent.common.v1 import common_pb2
from opencensus.proto.trace.v1 import trace_pb2
|
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py\n--- a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py\n+++ b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py\n@@ -17,7 +17,9 @@\n from time import time\n \n # pylint: disable=wrong-import-position\n-from google.protobuf.timestamp_pb2 import Timestamp\n+from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module\n+ Timestamp,\n+)\n from opencensus.proto.agent.common.v1 import common_pb2\n from opencensus.proto.trace.v1 import trace_pb2\n", "issue": "lint takes a long time\nFix that.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import getpid\nfrom socket import gethostname\nfrom time import time\n\n# pylint: disable=wrong-import-position\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom opencensus.proto.agent.common.v1 import common_pb2\nfrom opencensus.proto.trace.v1 import trace_pb2\n\nfrom opentelemetry.exporter.opencensus.version import (\n __version__ as opencensusexporter_exporter_version,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.util._importlib_metadata import version\n\nOPENTELEMETRY_VERSION = version(\"opentelemetry-api\")\n\n\ndef proto_timestamp_from_time_ns(time_ns):\n \"\"\"Converts datetime to protobuf timestamp.\n\n Args:\n time_ns: Time in nanoseconds\n\n Returns:\n Returns protobuf timestamp.\n \"\"\"\n ts = Timestamp()\n if time_ns is not None:\n # pylint: disable=no-member\n ts.FromNanoseconds(time_ns)\n return ts\n\n\n# pylint: disable=no-member\ndef get_collector_span_kind(kind: SpanKind):\n if kind is SpanKind.SERVER:\n return trace_pb2.Span.SpanKind.SERVER\n if kind is SpanKind.CLIENT:\n return trace_pb2.Span.SpanKind.CLIENT\n return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED\n\n\ndef add_proto_attribute_value(pb_attributes, key, value):\n \"\"\"Sets string, int, boolean or float value on protobuf\n span, link or annotation attributes.\n\n Args:\n pb_attributes: protobuf Span's attributes property.\n key: attribute key to set.\n value: attribute value\n \"\"\"\n\n if isinstance(value, bool):\n pb_attributes.attribute_map[key].bool_value = value\n elif isinstance(value, int):\n pb_attributes.attribute_map[key].int_value = value\n elif isinstance(value, str):\n pb_attributes.attribute_map[key].string_value.value = value\n elif isinstance(value, float):\n pb_attributes.attribute_map[key].double_value = value\n else:\n pb_attributes.attribute_map[key].string_value.value = str(value)\n\n\n# pylint: disable=no-member\ndef get_node(service_name, host_name):\n \"\"\"Generates Node message from params and system information.\n\n Args:\n service_name: Name of Collector service.\n host_name: Host name.\n \"\"\"\n return common_pb2.Node(\n identifier=common_pb2.ProcessIdentifier(\n host_name=gethostname() if host_name is None else host_name,\n pid=getpid(),\n start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),\n ),\n library_info=common_pb2.LibraryInfo(\n language=common_pb2.LibraryInfo.Language.Value(\"PYTHON\"),\n exporter_version=opencensusexporter_exporter_version,\n core_library_version=OPENTELEMETRY_VERSION,\n ),\n service_info=common_pb2.ServiceInfo(name=service_name),\n )\n", "path": "exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import getpid\nfrom socket import gethostname\nfrom time import time\n\n# pylint: disable=wrong-import-position\nfrom google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module\n Timestamp,\n)\nfrom opencensus.proto.agent.common.v1 import common_pb2\nfrom opencensus.proto.trace.v1 import trace_pb2\n\nfrom opentelemetry.exporter.opencensus.version import (\n __version__ as opencensusexporter_exporter_version,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.util._importlib_metadata import version\n\nOPENTELEMETRY_VERSION = version(\"opentelemetry-api\")\n\n\ndef proto_timestamp_from_time_ns(time_ns):\n \"\"\"Converts datetime to protobuf timestamp.\n\n Args:\n time_ns: Time in nanoseconds\n\n Returns:\n Returns protobuf timestamp.\n \"\"\"\n ts = Timestamp()\n if time_ns is not None:\n # pylint: disable=no-member\n ts.FromNanoseconds(time_ns)\n return ts\n\n\n# pylint: disable=no-member\ndef get_collector_span_kind(kind: SpanKind):\n if kind is SpanKind.SERVER:\n return trace_pb2.Span.SpanKind.SERVER\n if kind is SpanKind.CLIENT:\n return trace_pb2.Span.SpanKind.CLIENT\n return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED\n\n\ndef add_proto_attribute_value(pb_attributes, key, value):\n \"\"\"Sets string, int, boolean or float value on protobuf\n span, link or annotation attributes.\n\n Args:\n pb_attributes: protobuf Span's attributes property.\n key: attribute key to set.\n value: attribute value\n \"\"\"\n\n if isinstance(value, bool):\n pb_attributes.attribute_map[key].bool_value = value\n elif isinstance(value, int):\n pb_attributes.attribute_map[key].int_value = value\n elif isinstance(value, str):\n pb_attributes.attribute_map[key].string_value.value = value\n elif isinstance(value, float):\n pb_attributes.attribute_map[key].double_value = value\n else:\n pb_attributes.attribute_map[key].string_value.value = str(value)\n\n\n# pylint: disable=no-member\ndef get_node(service_name, host_name):\n \"\"\"Generates Node message from params and system information.\n\n Args:\n service_name: Name of Collector service.\n host_name: Host name.\n \"\"\"\n return common_pb2.Node(\n identifier=common_pb2.ProcessIdentifier(\n host_name=gethostname() if host_name is None else host_name,\n pid=getpid(),\n start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),\n ),\n library_info=common_pb2.LibraryInfo(\n language=common_pb2.LibraryInfo.Language.Value(\"PYTHON\"),\n exporter_version=opencensusexporter_exporter_version,\n core_library_version=OPENTELEMETRY_VERSION,\n ),\n service_info=common_pb2.ServiceInfo(name=service_name),\n )\n", "path": "exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py"}]}
| 1,220 | 183 |
gh_patches_debug_12574
|
rasdani/github-patches
|
git_diff
|
celery__celery-4399
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build task documentation with sphinx fails (error while formatting arguments)
## Checklist
this has been tested with both version 4.0.2 and master (8c8354f)
## Steps to reproduce
```bash
$ git clone https://github.com/inveniosoftware/invenio-indexer.git
$ cd invenio-indexer/
$ pip install -e .[all]
$ sphinx-build -qnNW docs docs/_build/html
```
You can see that `invenio-indexer` correctly implements the requirements to document a celery task:
- https://github.com/inveniosoftware/invenio-indexer/blob/master/docs/conf.py#L52
- https://github.com/inveniosoftware/invenio-indexer/blob/master/docs/api.rst#celery-tasks
## Expected behavior
It should build the documentation of the tasks. This is **working** in Celery 3.1.25.
## Actual behavior
I get the following error:
```
invenio-indexer/docs/api.rst:54: WARNING: error while formatting arguments for invenio_indexer.tasks.index_record: 'NoneType' object is not callable
```
Am I missing something? Should it work differently than Celery 3?
Request on_timeout should ignore soft time limit exception
When Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller.
But the task may catch this exception and eg. return (this is what soft timeout are for).
This cause:
1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task
2. the task status to be passed to failure and to success on the same manner
3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks…
1, 2 and 3 can leads of course to strange race conditions…
## Steps to reproduce (Illustration)
with the program in test_timeout.py:
```python
import time
import celery
app = celery.Celery('test_timeout')
app.conf.update(
result_backend="redis://localhost/0",
broker_url="amqp://celery:celery@localhost:5672/host",
)
@app.task(soft_time_limit=1)
def test():
try:
time.sleep(2)
except Exception:
return 1
@app.task()
def add(args):
print("### adding", args)
return sum(args)
@app.task()
def on_error(context, exception, traceback, **kwargs):
print("### on_error: ", exception)
if __name__ == "__main__":
result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s())
result.get()
```
start a worker and the program:
```
$ celery -A test_timeout worker -l WARNING
$ python3 test_timeout.py
```
## Expected behavior
add method is called with `[1, 1]` as argument and test_timeout.py return normally
## Actual behavior
The test_timeout.py fails, with
```
celery.backends.base.ChordError: Callback error: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",
```
On the worker side, the **on_error is called but the add method as well !**
```
[2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2]
[2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error:
[2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,)
[2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757]
[2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError("Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)",)
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in on_chord_part_return
callback.delay([unpack(tup, decode) for tup in resl])
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 290, in <listcomp>
callback.delay([unpack(tup, decode) for tup in resl])
File "/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py", line 243, in _unpack_chord_result
raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))
celery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)
[2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error:
[2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,)
[2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding
[2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1]
```
Of course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact:
- the chord result is incremented twice by the error of soft time limit
- the chord result is again incremented twice by the correct returning of `test` task
## Conclusion
Request.on_timeout should not process soft time limit exception.
here is a quick monkey patch (correction of celery is trivial)
```python
def patch_celery_request_on_timeout():
from celery.worker import request
orig = request.Request.on_timeout
def patched_on_timeout(self, soft, timeout):
if not soft:
orig(self, soft, timeout)
request.Request.on_timeout = patched_on_timeout
patch_celery_request_on_timeout()
```
## version info
software -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3
billiard:3.5.0.2 py-amqp:2.1.4
platform -> system:Linux arch:64bit, ELF imp:CPython
loader -> celery.loaders.app.AppLoader
settings -> transport:amqp results:redis://10.0.3.253/0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `celery/contrib/sphinx.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Sphinx documentation plugin used to document tasks.
3
4 Introduction
5 ============
6
7 Usage
8 -----
9
10 Add the extension to your :file:`docs/conf.py` configuration module:
11
12 .. code-block:: python
13
14 extensions = (...,
15 'celery.contrib.sphinx')
16
17 If you'd like to change the prefix for tasks in reference documentation
18 then you can change the ``celery_task_prefix`` configuration value:
19
20 .. code-block:: python
21
22 celery_task_prefix = '(task)' # < default
23
24 With the extension installed `autodoc` will automatically find
25 task decorated objects and generate the correct (as well as
26 add a ``(task)`` prefix), and you can also refer to the tasks
27 using `:task:proj.tasks.add` syntax.
28
29 Use ``.. autotask::`` to manually document a task.
30 """
31 from __future__ import absolute_import, unicode_literals
32 from inspect import formatargspec
33 from sphinx.domains.python import PyModulelevel
34 from sphinx.ext.autodoc import FunctionDocumenter
35 from celery.app.task import BaseTask
36 from celery.five import getfullargspec
37
38
39 class TaskDocumenter(FunctionDocumenter):
40 """Document task definitions."""
41
42 objtype = 'task'
43 member_order = 11
44
45 @classmethod
46 def can_document_member(cls, member, membername, isattr, parent):
47 return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
48
49 def format_args(self):
50 wrapped = getattr(self.object, '__wrapped__', None)
51 if wrapped is not None:
52 argspec = getfullargspec(wrapped)
53 fmt = formatargspec(*argspec)
54 fmt = fmt.replace('\\', '\\\\')
55 return fmt
56 return ''
57
58 def document_members(self, all_members=False):
59 pass
60
61
62 class TaskDirective(PyModulelevel):
63 """Sphinx task directive."""
64
65 def get_signature_prefix(self, sig):
66 return self.env.config.celery_task_prefix
67
68
69 def setup(app):
70 """Setup Sphinx extension."""
71 app.add_autodocumenter(TaskDocumenter)
72 app.add_directive_to_domain('py', 'task', TaskDirective)
73 app.add_config_value('celery_task_prefix', '(task)', True)
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py
--- a/celery/contrib/sphinx.py
+++ b/celery/contrib/sphinx.py
@@ -29,11 +29,13 @@
Use ``.. autotask::`` to manually document a task.
"""
from __future__ import absolute_import, unicode_literals
-from inspect import formatargspec
from sphinx.domains.python import PyModulelevel
from sphinx.ext.autodoc import FunctionDocumenter
from celery.app.task import BaseTask
-from celery.five import getfullargspec
+try: # pragma: no cover
+ from inspect import formatargspec, getfullargspec
+except ImportError: # Py2
+ from inspect import formatargspec, getargspec as getfullargspec # noqa
class TaskDocumenter(FunctionDocumenter):
|
{"golden_diff": "diff --git a/celery/contrib/sphinx.py b/celery/contrib/sphinx.py\n--- a/celery/contrib/sphinx.py\n+++ b/celery/contrib/sphinx.py\n@@ -29,11 +29,13 @@\n Use ``.. autotask::`` to manually document a task.\n \"\"\"\n from __future__ import absolute_import, unicode_literals\n-from inspect import formatargspec\n from sphinx.domains.python import PyModulelevel\n from sphinx.ext.autodoc import FunctionDocumenter\n from celery.app.task import BaseTask\n-from celery.five import getfullargspec\n+try: # pragma: no cover\n+ from inspect import formatargspec, getfullargspec\n+except ImportError: # Py2\n+ from inspect import formatargspec, getargspec as getfullargspec # noqa\n \n \n class TaskDocumenter(FunctionDocumenter):\n", "issue": "Build task documentation with sphinx fails (error while formatting arguments)\n## Checklist\r\n\r\nthis has been tested with both version 4.0.2 and master (8c8354f)\r\n\r\n## Steps to reproduce\r\n\r\n```bash\r\n$ git clone https://github.com/inveniosoftware/invenio-indexer.git\r\n$ cd invenio-indexer/\r\n$ pip install -e .[all]\r\n$ sphinx-build -qnNW docs docs/_build/html\r\n```\r\n\r\nYou can see that `invenio-indexer` correctly implements the requirements to document a celery task:\r\n- https://github.com/inveniosoftware/invenio-indexer/blob/master/docs/conf.py#L52\r\n- https://github.com/inveniosoftware/invenio-indexer/blob/master/docs/api.rst#celery-tasks\r\n\r\n## Expected behavior\r\n\r\nIt should build the documentation of the tasks. This is **working** in Celery 3.1.25.\r\n\r\n## Actual behavior\r\n\r\nI get the following error:\r\n\r\n```\r\ninvenio-indexer/docs/api.rst:54: WARNING: error while formatting arguments for invenio_indexer.tasks.index_record: 'NoneType' object is not callable\r\n```\r\n\r\nAm I missing something? Should it work differently than Celery 3?\nRequest on_timeout should ignore soft time limit exception\nWhen Request.on_timeout receive a soft timeout from billiard, it does the same as if it was receiving a hard time limit exception. This is ran by the controller.\r\n\r\nBut the task may catch this exception and eg. return (this is what soft timeout are for).\r\n\r\nThis cause:\r\n1. the result to be saved once as an exception by the controller (on_timeout) and another time with the result returned by the task\r\n2. the task status to be passed to failure and to success on the same manner\r\n3. if the task is participating to a chord, the chord result counter (at least with redis) is incremented twice (instead of once), making the chord to return prematurely and eventually loose tasks\u2026\r\n\r\n1, 2 and 3 can leads of course to strange race conditions\u2026\r\n\r\n## Steps to reproduce (Illustration)\r\n\r\nwith the program in test_timeout.py:\r\n\r\n```python\r\nimport time\r\nimport celery\r\n\r\n\r\napp = celery.Celery('test_timeout')\r\napp.conf.update(\r\n result_backend=\"redis://localhost/0\",\r\n broker_url=\"amqp://celery:celery@localhost:5672/host\",\r\n)\r\n\r\[email protected](soft_time_limit=1)\r\ndef test():\r\n try:\r\n time.sleep(2)\r\n except Exception:\r\n return 1\r\n\r\[email protected]()\r\ndef add(args):\r\n print(\"### adding\", args)\r\n return sum(args)\r\n\r\[email protected]()\r\ndef on_error(context, exception, traceback, **kwargs):\r\n print(\"### on_error:\u00a0\", exception)\r\n\r\nif __name__ == \"__main__\":\r\n result = celery.chord([test.s().set(link_error=on_error.s()), test.s().set(link_error=on_error.s())])(add.s())\r\n result.get()\r\n```\r\n\r\nstart a worker and the program:\r\n\r\n```\r\n$ celery -A test_timeout worker -l WARNING\r\n$ python3 test_timeout.py\r\n```\r\n\r\n## Expected behavior\r\n\r\nadd method is called with `[1, 1]` as argument and test_timeout.py return normally\r\n\r\n## Actual behavior\r\n\r\nThe test_timeout.py fails, with\r\n```\r\ncelery.backends.base.ChordError: Callback error: ChordError(\"Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\",\r\n```\r\nOn the worker side, the **on_error is called but the add method as well !**\r\n\r\n```\r\n[2017-11-29 23:07:25,538: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[15109e05-da43-449f-9081-85d839ac0ef2]\r\n[2017-11-29 23:07:25,546: WARNING/MainProcess] ### on_error:\r\n[2017-11-29 23:07:25,546: WARNING/MainProcess] SoftTimeLimitExceeded(True,)\r\n[2017-11-29 23:07:25,547: WARNING/MainProcess] Soft time limit (1s) exceeded for test_timeout.test[38f3f7f2-4a89-4318-8ee9-36a987f73757]\r\n[2017-11-29 23:07:25,553: ERROR/MainProcess] Chord callback for 'ef6d7a38-d1b4-40ad-b937-ffa84e40bb23' raised: ChordError(\"Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\",)\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 290, in on_chord_part_return\r\n callback.delay([unpack(tup, decode) for tup in resl])\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 290, in <listcomp>\r\n callback.delay([unpack(tup, decode) for tup in resl])\r\n File \"/usr/local/lib/python3.4/dist-packages/celery/backends/redis.py\", line 243, in _unpack_chord_result\r\n raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval))\r\ncelery.exceptions.ChordError: Dependency 15109e05-da43-449f-9081-85d839ac0ef2 raised SoftTimeLimitExceeded('SoftTimeLimitExceeded(True,)',)\r\n[2017-11-29 23:07:25,565: WARNING/MainProcess] ### on_error:\r\n[2017-11-29 23:07:25,565: WARNING/MainProcess] SoftTimeLimitExceeded(True,)\r\n[2017-11-29 23:07:27,262: WARNING/PoolWorker-2] ### adding\r\n[2017-11-29 23:07:27,264: WARNING/PoolWorker-2] [1, 1]\r\n```\r\n\r\nOf course, on purpose did I choose to call the test.s() twice, to show that the count in the chord continues. In fact:\r\n- the chord result is incremented twice by the error of soft time limit\r\n- the chord result is again incremented twice by the correct returning of `test` task\r\n\r\n## Conclusion\r\n\r\nRequest.on_timeout should not process soft time limit exception. \r\n\r\nhere is a quick monkey patch (correction of celery is trivial)\r\n\r\n```python\r\ndef patch_celery_request_on_timeout():\r\n from celery.worker import request\r\n orig = request.Request.on_timeout\r\n def patched_on_timeout(self, soft, timeout):\r\n if not soft:\r\n orig(self, soft, timeout)\r\n request.Request.on_timeout = patched_on_timeout\r\npatch_celery_request_on_timeout()\r\n```\r\n\r\n\r\n\r\n## version info\r\n\r\nsoftware -> celery:4.1.0 (latentcall) kombu:4.0.2 py:3.4.3\r\n billiard:3.5.0.2 py-amqp:2.1.4\r\nplatform -> system:Linux arch:64bit, ELF imp:CPython\r\nloader -> celery.loaders.app.AppLoader\r\nsettings -> transport:amqp results:redis://10.0.3.253/0\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom inspect import formatargspec\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\nfrom celery.five import getfullargspec\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Sphinx documentation plugin used to document tasks.\n\nIntroduction\n============\n\nUsage\n-----\n\nAdd the extension to your :file:`docs/conf.py` configuration module:\n\n.. code-block:: python\n\n extensions = (...,\n 'celery.contrib.sphinx')\n\nIf you'd like to change the prefix for tasks in reference documentation\nthen you can change the ``celery_task_prefix`` configuration value:\n\n.. code-block:: python\n\n celery_task_prefix = '(task)' # < default\n\nWith the extension installed `autodoc` will automatically find\ntask decorated objects and generate the correct (as well as\nadd a ``(task)`` prefix), and you can also refer to the tasks\nusing `:task:proj.tasks.add` syntax.\n\nUse ``.. autotask::`` to manually document a task.\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom sphinx.domains.python import PyModulelevel\nfrom sphinx.ext.autodoc import FunctionDocumenter\nfrom celery.app.task import BaseTask\ntry: # pragma: no cover\n from inspect import formatargspec, getfullargspec\nexcept ImportError: # Py2\n from inspect import formatargspec, getargspec as getfullargspec # noqa\n\n\nclass TaskDocumenter(FunctionDocumenter):\n \"\"\"Document task definitions.\"\"\"\n\n objtype = 'task'\n member_order = 11\n\n @classmethod\n def can_document_member(cls, member, membername, isattr, parent):\n return isinstance(member, BaseTask) and getattr(member, '__wrapped__')\n\n def format_args(self):\n wrapped = getattr(self.object, '__wrapped__', None)\n if wrapped is not None:\n argspec = getfullargspec(wrapped)\n fmt = formatargspec(*argspec)\n fmt = fmt.replace('\\\\', '\\\\\\\\')\n return fmt\n return ''\n\n def document_members(self, all_members=False):\n pass\n\n\nclass TaskDirective(PyModulelevel):\n \"\"\"Sphinx task directive.\"\"\"\n\n def get_signature_prefix(self, sig):\n return self.env.config.celery_task_prefix\n\n\ndef setup(app):\n \"\"\"Setup Sphinx extension.\"\"\"\n app.add_autodocumenter(TaskDocumenter)\n app.add_directive_to_domain('py', 'task', TaskDirective)\n app.add_config_value('celery_task_prefix', '(task)', True)\n", "path": "celery/contrib/sphinx.py"}]}
| 2,648 | 195 |
gh_patches_debug_22398
|
rasdani/github-patches
|
git_diff
|
fonttools__fonttools-1605
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Float yMin value: required argument is not an integer
If a font file has a float value in `yMin`—and I assume equally in `xMin`, `xMax` or `yMax`—it will fail to save with the error `required argument is not an integer` ([`fontTools/misc/sstruct.py in pack at line 75`](https://github.com/fonttools/fonttools/blob/3.40.0/Lib/fontTools/misc/sstruct.py#L75), fonttools v3.40.0).
Trace:
```
fontTools/misc/sstruct.py in pack at line 75
fontTools/ttLib/tables/_h_e_a_d.py in compile at line 69
fontTools/ttLib/ttFont.py in getTableData at line 651
fontTools/ttLib/ttFont.py in _writeTable at line 633
fontTools/ttLib/ttFont.py in _save at line 212
fontTools/ttLib/ttFont.py in save at line 173
```
Variables at point of error:
```python
formatstring = ">llIIHHQQhhhhHHhhh"
elements = [
65536,
65601,
1208942685,
1594834165,
3,
1000,
3551183604,
3640213847,
-132,
-170.009,
788,
835,
0,
3,
2,
0,
0
]
```
As you can see the value `-170.009` would trigger the error. If integers are expected then rounding should probably be applied.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Lib/fontTools/ttLib/tables/_h_e_a_d.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2 from fontTools.misc.py23 import *
3 from fontTools.misc import sstruct
4 from fontTools.misc.textTools import safeEval, num2binary, binary2num
5 from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
6 from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
7 from . import DefaultTable
8 import logging
9
10
11 log = logging.getLogger(__name__)
12
13 headFormat = """
14 > # big endian
15 tableVersion: 16.16F
16 fontRevision: 16.16F
17 checkSumAdjustment: I
18 magicNumber: I
19 flags: H
20 unitsPerEm: H
21 created: Q
22 modified: Q
23 xMin: h
24 yMin: h
25 xMax: h
26 yMax: h
27 macStyle: H
28 lowestRecPPEM: H
29 fontDirectionHint: h
30 indexToLocFormat: h
31 glyphDataFormat: h
32 """
33
34 class table__h_e_a_d(DefaultTable.DefaultTable):
35
36 dependencies = ['maxp', 'loca', 'CFF ']
37
38 def decompile(self, data, ttFont):
39 dummy, rest = sstruct.unpack2(headFormat, data, self)
40 if rest:
41 # this is quite illegal, but there seem to be fonts out there that do this
42 log.warning("extra bytes at the end of 'head' table")
43 assert rest == "\0\0"
44
45 # For timestamp fields, ignore the top four bytes. Some fonts have
46 # bogus values there. Since till 2038 those bytes only can be zero,
47 # ignore them.
48 #
49 # https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
50 for stamp in 'created', 'modified':
51 value = getattr(self, stamp)
52 if value > 0xFFFFFFFF:
53 log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
54 value &= 0xFFFFFFFF
55 setattr(self, stamp, value)
56 if value < 0x7C259DC0: # January 1, 1970 00:00:00
57 log.warning("'%s' timestamp seems very low; regarding as unix timestamp", stamp)
58 value += 0x7C259DC0
59 setattr(self, stamp, value)
60
61 def compile(self, ttFont):
62 if ttFont.recalcBBoxes:
63 # For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
64 if 'CFF ' in ttFont:
65 topDict = ttFont['CFF '].cff.topDictIndex[0]
66 self.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox
67 if ttFont.recalcTimestamp:
68 self.modified = timestampNow()
69 data = sstruct.pack(headFormat, self)
70 return data
71
72 def toXML(self, writer, ttFont):
73 writer.comment("Most of this table will be recalculated by the compiler")
74 writer.newline()
75 formatstring, names, fixes = sstruct.getformat(headFormat)
76 for name in names:
77 value = getattr(self, name)
78 if name in ("created", "modified"):
79 value = timestampToString(value)
80 if name in ("magicNumber", "checkSumAdjustment"):
81 if value < 0:
82 value = value + 0x100000000
83 value = hex(value)
84 if value[-1:] == "L":
85 value = value[:-1]
86 elif name in ("macStyle", "flags"):
87 value = num2binary(value, 16)
88 writer.simpletag(name, value=value)
89 writer.newline()
90
91 def fromXML(self, name, attrs, content, ttFont):
92 value = attrs["value"]
93 if name in ("created", "modified"):
94 value = timestampFromString(value)
95 elif name in ("macStyle", "flags"):
96 value = binary2num(value)
97 else:
98 value = safeEval(value)
99 setattr(self, name, value)
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Lib/fontTools/ttLib/tables/_h_e_a_d.py b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
--- a/Lib/fontTools/ttLib/tables/_h_e_a_d.py
+++ b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
@@ -4,6 +4,7 @@
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
+from fontTools.misc.arrayTools import intRect
from . import DefaultTable
import logging
@@ -63,7 +64,7 @@
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
if 'CFF ' in ttFont:
topDict = ttFont['CFF '].cff.topDictIndex[0]
- self.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox
+ self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
if ttFont.recalcTimestamp:
self.modified = timestampNow()
data = sstruct.pack(headFormat, self)
|
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_h_e_a_d.py b/Lib/fontTools/ttLib/tables/_h_e_a_d.py\n--- a/Lib/fontTools/ttLib/tables/_h_e_a_d.py\n+++ b/Lib/fontTools/ttLib/tables/_h_e_a_d.py\n@@ -4,6 +4,7 @@\n from fontTools.misc.textTools import safeEval, num2binary, binary2num\n from fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow\n from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat\n+from fontTools.misc.arrayTools import intRect\n from . import DefaultTable\n import logging\n \n@@ -63,7 +64,7 @@\n \t\t\t# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().\n \t\t\tif 'CFF ' in ttFont:\n \t\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n-\t\t\t\tself.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox\n+\t\t\t\tself.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)\n \t\tif ttFont.recalcTimestamp:\n \t\t\tself.modified = timestampNow()\n \t\tdata = sstruct.pack(headFormat, self)\n", "issue": "Float yMin value: required argument is not an integer\nIf a font file has a float value in `yMin`\u2014and I assume equally in `xMin`, `xMax` or `yMax`\u2014it will fail to save with the error `required argument is not an integer` ([`fontTools/misc/sstruct.py in pack at line 75`](https://github.com/fonttools/fonttools/blob/3.40.0/Lib/fontTools/misc/sstruct.py#L75), fonttools v3.40.0).\r\n\r\nTrace:\r\n```\r\nfontTools/misc/sstruct.py in pack at line 75\r\nfontTools/ttLib/tables/_h_e_a_d.py in compile at line 69\r\nfontTools/ttLib/ttFont.py in getTableData at line 651\r\nfontTools/ttLib/ttFont.py in _writeTable at line 633\r\nfontTools/ttLib/ttFont.py in _save at line 212\r\nfontTools/ttLib/ttFont.py in save at line 173\r\n```\r\n\r\nVariables at point of error:\r\n```python\r\nformatstring = \">llIIHHQQhhhhHHhhh\"\r\nelements = [\r\n 65536, \r\n 65601, \r\n 1208942685, \r\n 1594834165, \r\n 3, \r\n 1000, \r\n 3551183604, \r\n 3640213847, \r\n -132, \r\n -170.009, \r\n 788, \r\n 835, \r\n 0, \r\n 3, \r\n 2, \r\n 0, \r\n 0\r\n]\r\n```\r\n\r\nAs you can see the value `-170.009` would trigger the error. If integers are expected then rounding should probably be applied.\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval, num2binary, binary2num\nfrom fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow\nfrom fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat\nfrom . import DefaultTable\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\nheadFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion: 16.16F\n\t\tfontRevision: 16.16F\n\t\tcheckSumAdjustment: I\n\t\tmagicNumber: I\n\t\tflags: H\n\t\tunitsPerEm: H\n\t\tcreated: Q\n\t\tmodified: Q\n\t\txMin: h\n\t\tyMin: h\n\t\txMax: h\n\t\tyMax: h\n\t\tmacStyle: H\n\t\tlowestRecPPEM: H\n\t\tfontDirectionHint: h\n\t\tindexToLocFormat: h\n\t\tglyphDataFormat: h\n\"\"\"\n\nclass table__h_e_a_d(DefaultTable.DefaultTable):\n\n\tdependencies = ['maxp', 'loca', 'CFF ']\n\n\tdef decompile(self, data, ttFont):\n\t\tdummy, rest = sstruct.unpack2(headFormat, data, self)\n\t\tif rest:\n\t\t\t# this is quite illegal, but there seem to be fonts out there that do this\n\t\t\tlog.warning(\"extra bytes at the end of 'head' table\")\n\t\t\tassert rest == \"\\0\\0\"\n\n\t\t# For timestamp fields, ignore the top four bytes. Some fonts have\n\t\t# bogus values there. Since till 2038 those bytes only can be zero,\n\t\t# ignore them.\n\t\t#\n\t\t# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810\n\t\tfor stamp in 'created', 'modified':\n\t\t\tvalue = getattr(self, stamp)\n\t\t\tif value > 0xFFFFFFFF:\n\t\t\t\tlog.warning(\"'%s' timestamp out of range; ignoring top bytes\", stamp)\n\t\t\t\tvalue &= 0xFFFFFFFF\n\t\t\t\tsetattr(self, stamp, value)\n\t\t\tif value < 0x7C259DC0: # January 1, 1970 00:00:00\n\t\t\t\tlog.warning(\"'%s' timestamp seems very low; regarding as unix timestamp\", stamp)\n\t\t\t\tvalue += 0x7C259DC0\n\t\t\t\tsetattr(self, stamp, value)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes:\n\t\t\t# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().\n\t\t\tif 'CFF ' in ttFont:\n\t\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\t\tself.xMin, self.yMin, self.xMax, self.yMax = topDict.FontBBox\n\t\tif ttFont.recalcTimestamp:\n\t\t\tself.modified = timestampNow()\n\t\tdata = sstruct.pack(headFormat, self)\n\t\treturn data\n\n\tdef toXML(self, writer, ttFont):\n\t\twriter.comment(\"Most of this table will be recalculated by the compiler\")\n\t\twriter.newline()\n\t\tformatstring, names, fixes = sstruct.getformat(headFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name in (\"created\", \"modified\"):\n\t\t\t\tvalue = timestampToString(value)\n\t\t\tif name in (\"magicNumber\", \"checkSumAdjustment\"):\n\t\t\t\tif value < 0:\n\t\t\t\t\tvalue = value + 0x100000000\n\t\t\t\tvalue = hex(value)\n\t\t\t\tif value[-1:] == \"L\":\n\t\t\t\t\tvalue = value[:-1]\n\t\t\telif name in (\"macStyle\", \"flags\"):\n\t\t\t\tvalue = num2binary(value, 16)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tvalue = attrs[\"value\"]\n\t\tif name in (\"created\", \"modified\"):\n\t\t\tvalue = timestampFromString(value)\n\t\telif name in (\"macStyle\", \"flags\"):\n\t\t\tvalue = binary2num(value)\n\t\telse:\n\t\t\tvalue = safeEval(value)\n\t\tsetattr(self, name, value)\n", "path": "Lib/fontTools/ttLib/tables/_h_e_a_d.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval, num2binary, binary2num\nfrom fontTools.misc.timeTools import timestampFromString, timestampToString, timestampNow\nfrom fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat\nfrom fontTools.misc.arrayTools import intRect\nfrom . import DefaultTable\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\nheadFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion: 16.16F\n\t\tfontRevision: 16.16F\n\t\tcheckSumAdjustment: I\n\t\tmagicNumber: I\n\t\tflags: H\n\t\tunitsPerEm: H\n\t\tcreated: Q\n\t\tmodified: Q\n\t\txMin: h\n\t\tyMin: h\n\t\txMax: h\n\t\tyMax: h\n\t\tmacStyle: H\n\t\tlowestRecPPEM: H\n\t\tfontDirectionHint: h\n\t\tindexToLocFormat: h\n\t\tglyphDataFormat: h\n\"\"\"\n\nclass table__h_e_a_d(DefaultTable.DefaultTable):\n\n\tdependencies = ['maxp', 'loca', 'CFF ']\n\n\tdef decompile(self, data, ttFont):\n\t\tdummy, rest = sstruct.unpack2(headFormat, data, self)\n\t\tif rest:\n\t\t\t# this is quite illegal, but there seem to be fonts out there that do this\n\t\t\tlog.warning(\"extra bytes at the end of 'head' table\")\n\t\t\tassert rest == \"\\0\\0\"\n\n\t\t# For timestamp fields, ignore the top four bytes. Some fonts have\n\t\t# bogus values there. Since till 2038 those bytes only can be zero,\n\t\t# ignore them.\n\t\t#\n\t\t# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810\n\t\tfor stamp in 'created', 'modified':\n\t\t\tvalue = getattr(self, stamp)\n\t\t\tif value > 0xFFFFFFFF:\n\t\t\t\tlog.warning(\"'%s' timestamp out of range; ignoring top bytes\", stamp)\n\t\t\t\tvalue &= 0xFFFFFFFF\n\t\t\t\tsetattr(self, stamp, value)\n\t\t\tif value < 0x7C259DC0: # January 1, 1970 00:00:00\n\t\t\t\tlog.warning(\"'%s' timestamp seems very low; regarding as unix timestamp\", stamp)\n\t\t\t\tvalue += 0x7C259DC0\n\t\t\t\tsetattr(self, stamp, value)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.recalcBBoxes:\n\t\t\t# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().\n\t\t\tif 'CFF ' in ttFont:\n\t\t\t\ttopDict = ttFont['CFF '].cff.topDictIndex[0]\n\t\t\t\tself.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)\n\t\tif ttFont.recalcTimestamp:\n\t\t\tself.modified = timestampNow()\n\t\tdata = sstruct.pack(headFormat, self)\n\t\treturn data\n\n\tdef toXML(self, writer, ttFont):\n\t\twriter.comment(\"Most of this table will be recalculated by the compiler\")\n\t\twriter.newline()\n\t\tformatstring, names, fixes = sstruct.getformat(headFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\tif name in (\"created\", \"modified\"):\n\t\t\t\tvalue = timestampToString(value)\n\t\t\tif name in (\"magicNumber\", \"checkSumAdjustment\"):\n\t\t\t\tif value < 0:\n\t\t\t\t\tvalue = value + 0x100000000\n\t\t\t\tvalue = hex(value)\n\t\t\t\tif value[-1:] == \"L\":\n\t\t\t\t\tvalue = value[:-1]\n\t\t\telif name in (\"macStyle\", \"flags\"):\n\t\t\t\tvalue = num2binary(value, 16)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tvalue = attrs[\"value\"]\n\t\tif name in (\"created\", \"modified\"):\n\t\t\tvalue = timestampFromString(value)\n\t\telif name in (\"macStyle\", \"flags\"):\n\t\t\tvalue = binary2num(value)\n\t\telse:\n\t\t\tvalue = safeEval(value)\n\t\tsetattr(self, name, value)\n", "path": "Lib/fontTools/ttLib/tables/_h_e_a_d.py"}]}
| 1,866 | 302 |
gh_patches_debug_23708
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-1421
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Revisit TPU training
## 🚀 Feature
just curious, have you used TPU on Kaggle recently?
https://www.kaggle.com/code/jirkaborovec/demo-flash-image-classification-on-tpu
### Motivation
Kaggle has offered 20 hours of training which is mostly used with TF of Keras as any other lib is rather difficult, so there is huge potential to take this space by Flash...
Also With Flash, it shall be trivial to start your work with CPU for exploration and then alternate between GPU and TPU offering to finish with the best-trained model :rabbit:
### Pitch
<!-- A clear and concise description of what you want to happen. -->
### Alternatives
Reason: https://pytorch-lightning.readthedocs.io/en/latest/accelerators/tpu_faq.html#unsupported-datatype-transfer-to-tpus
Unsupported data type transfer to TPUs?
```
File "/usr/local/lib/python3.8/dist-packages/torch_xla/utils/utils.py", line 205, in _for_each_instance_rewrite
v = _for_each_instance_rewrite(result.__dict__[k], select_fn, fn, rwmap)
File "/usr/local/lib/python3.8/dist-packages/torch_xla/utils/utils.py", line 206, in _for_each_instance_rewrite
result.__dict__[k] = v
TypeError: 'mappingproxy' object does not support item assignment
```
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/core/data/io/input.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import functools
15 import os
16 import sys
17 from copy import deepcopy
18 from typing import Any, cast, Dict, Iterable, List, Sequence, Tuple, Union
19
20 from pytorch_lightning.utilities.enums import LightningEnum
21 from pytorch_lightning.utilities.exceptions import MisconfigurationException
22 from torch.utils.data import Dataset
23
24 from flash.core.data.properties import Properties
25 from flash.core.data.utils import _STAGES_PREFIX
26 from flash.core.utilities.stages import RunningStage
27
28 if sys.version_info < (3, 7):
29 from typing import GenericMeta
30 else:
31 GenericMeta = type
32
33
34 if not os.environ.get("READTHEDOCS", False):
35 from torch.utils.data import IterableDataset
36 else:
37 # ReadTheDocs mocks the `IterableDataset` import so it's type cannot be used as a base for a metaclass, so we
38 # replace it here.
39 IterableDataset = object
40
41
42 class InputFormat(LightningEnum):
43 """The ``InputFormat`` enum contains the data source names used by all of the default ``from_*`` methods in
44 :class:`~flash.core.data.data_module.DataModule`."""
45
46 FOLDERS = "folders"
47 FILES = "files"
48 NUMPY = "numpy"
49 TENSORS = "tensors"
50 CSV = "csv"
51 JSON = "json"
52 PARQUET = "parquet"
53 DATASETS = "datasets"
54 HUGGINGFACE_DATASET = "hf_datasets"
55 FIFTYONE = "fiftyone"
56 DATAFRAME = "data_frame"
57 LISTS = "lists"
58 LABELSTUDIO = "labelstudio"
59
60 # TODO: Create a FlashEnum class???
61 def __hash__(self) -> int:
62 return hash(self.value)
63
64
65 class DataKeys(LightningEnum):
66 """The ``DataKeys`` enum contains the keys that are used by built-in data sources to refer to inputs and
67 targets."""
68
69 INPUT = "input"
70 PREDS = "preds"
71 TARGET = "target"
72 METADATA = "metadata"
73
74 # TODO: Create a FlashEnum class???
75 def __hash__(self) -> int:
76 return hash(self.value)
77
78
79 class BaseDataFormat(LightningEnum):
80 """The base class for creating ``data_format`` for :class:`~flash.core.data.io.input.Input`."""
81
82 def __hash__(self) -> int:
83 return hash(self.value)
84
85
86 def _has_len(data: Union[Sequence, Iterable]) -> bool:
87 """Duck typing check to see if the argument supports getting the length.
88
89 Args:
90 data: The object to check for length support.
91 """
92 try:
93 len(data)
94 return True
95 except (TypeError, NotImplementedError):
96 return False
97
98
99 def _validate_input(input: "InputBase") -> None:
100 """Helper function to validate that the type of an ``InputBase.data`` is appropriate for the type of
101 ``InputBase`` being used.
102
103 Args:
104 input: The ``InputBase`` instance to validate.
105
106 Raises:
107 RuntimeError: If the ``input`` is of type ``Input`` and it's ``data`` attribute does not support ``len``.
108 RuntimeError: If the ``input`` is of type ``IterableInput`` and it's ``data`` attribute does support ``len``.
109 """
110 if input.data is not None:
111 if isinstance(input, Input) and not _has_len(input.data):
112 raise RuntimeError("`Input.data` is not a sequence with a defined length. Use `IterableInput` instead.")
113 elif isinstance(input, IterableInput) and _has_len(input.data):
114 raise RuntimeError("`IterableInput.data` is a sequence with a defined length. Use `Input` instead.")
115
116
117 def _wrap_init(class_dict: Dict[str, Any]) -> None:
118 """Helper function to wrap the ``__init__`` (if present) from a class construction dict to apply the
119 ``_validate_input`` function after instantiation. Modifies the dict inplace.
120
121 Args:
122 class_dict: The class construction dict, optionally containing an init to wrap.
123 """
124 if "__init__" in class_dict:
125 fn = class_dict["__init__"]
126
127 @functools.wraps(fn)
128 def wrapper(self, *args, **kwargs):
129 fn(self, *args, **kwargs)
130 _validate_input(self)
131
132 class_dict["__init__"] = wrapper
133
134
135 class _InputMeta(GenericMeta):
136 """Metaclass for the ``InputBase`` which wraps any init defined in a subclass with the ``_validate_input``
137 helper."""
138
139 def __new__(mcs, name: str, bases: Tuple, class_dict: Dict[str, Any]) -> "_InputMeta":
140 _wrap_init(class_dict)
141 return cast(_InputMeta, super().__new__(mcs, name, bases, class_dict))
142
143
144 class _IterableInputMeta(_InputMeta, type(IterableDataset)):
145 """Metaclass for the ``IterableInput`` which extends ``_InputMeta`` and avoids metaclass conflict with
146 ``IterableDataset``."""
147
148 def __new__(mcs, name: str, bases: Tuple, class_dict: Dict[str, Any]) -> "_IterableInputMeta":
149 return cast(_IterableInputMeta, super().__new__(mcs, name, bases, class_dict))
150
151
152 class InputBase(Properties, metaclass=_InputMeta):
153 """``InputBase`` is the base class for the :class:`~flash.core.data.io.input.Input` and
154 :class:`~flash.core.data.io.input.IterableInput` dataset implementations in Flash. These datasets are
155 constructed via the ``load_data`` and ``load_sample`` hooks, which allow a single dataset object to include custom
156 loading logic according to the running stage (e.g. train, validate, test, predict).
157
158 Args:
159 running_stage: The running stage for which the input will be used.
160 *args: Any arguments that are to be passed to the ``load_data`` hook.
161 **kwargs: Any additional keyword arguments to pass to the ``load_data`` hook.
162 """
163
164 def __init__(self, running_stage: RunningStage, *args: Any, **kwargs: Any) -> None:
165
166 super().__init__(running_stage=running_stage)
167
168 self.data = None
169 if len(args) >= 1 and args[0] is not None:
170 self.data = getattr(self, f"{_STAGES_PREFIX[running_stage]}_load_data")(*args, **kwargs)
171
172 def _call_load_sample(self, sample: Any) -> Any:
173 # Deepcopy the sample to avoid leaks with complex data structures
174 return getattr(self, f"{_STAGES_PREFIX[self.running_stage]}_load_sample")(deepcopy(sample))
175
176 @staticmethod
177 def load_data(*args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:
178 """The ``load_data`` hook should return a collection of samples. To reduce the memory footprint, these
179 samples should typically not have been loaded. For example, an input which loads images from disk would
180 only return the list of filenames here rather than the loaded images.
181
182 Args:
183 *args: Any arguments that the input requires.
184 **kwargs: Any additional keyword arguments that the input requires.
185 """
186 return args[0]
187
188 def train_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:
189 """Override the ``train_load_data`` hook with data loading logic that is only required during training.
190
191 Args:
192 *args: Any arguments that the input requires.
193 **kwargs: Any additional keyword arguments that the input requires.
194 """
195 return self.load_data(*args, **kwargs)
196
197 def val_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:
198 """Override the ``val_load_data`` hook with data loading logic that is only required during validating.
199
200 Args:
201 *args: Any arguments that the input requires.
202 **kwargs: Any additional keyword arguments that the input requires.
203 """
204 return self.load_data(*args, **kwargs)
205
206 def test_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:
207 """Override the ``test_load_data`` hook with data loading logic that is only required during testing.
208
209 Args:
210 *args: Any arguments that the input requires.
211 **kwargs: Any additional keyword arguments that the input requires.
212 """
213 return self.load_data(*args, **kwargs)
214
215 def predict_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:
216 """Override the ``predict_load_data`` hook with data loading logic that is only required during predicting.
217
218 Args:
219 *args: Any arguments that the input requires.
220 **kwargs: Any additional keyword arguments that the input requires.
221 """
222 return self.load_data(*args, **kwargs)
223
224 @staticmethod
225 def load_sample(sample: Dict[str, Any]) -> Any:
226 """The ``load_sample`` hook is called for each ``__getitem__`` or ``__next__`` call to the dataset with a
227 single sample from the output of the ``load_data`` hook as input.
228
229 Args:
230 sample: A single sample from the output of the ``load_data`` hook.
231 """
232 return sample
233
234 def train_load_sample(self, sample: Dict[str, Any]) -> Any:
235 """Override the ``train_load_sample`` hook with data loading logic that is only required during training.
236
237 Args:
238 sample: A single sample from the output of the ``load_data`` hook.
239 """
240 return self.load_sample(sample)
241
242 def val_load_sample(self, sample: Dict[str, Any]) -> Any:
243 """Override the ``val_load_sample`` hook with data loading logic that is only required during validating.
244
245 Args:
246 sample: A single sample from the output of the ``load_data`` hook.
247 """
248 return self.load_sample(sample)
249
250 def test_load_sample(self, sample: Dict[str, Any]) -> Any:
251 """Override the ``test_load_sample`` hook with data loading logic that is only required during testing.
252
253 Args:
254 sample: A single sample from the output of the ``load_data`` hook.
255 """
256 return self.load_sample(sample)
257
258 def predict_load_sample(self, sample: Dict[str, Any]) -> Any:
259 """Override the ``predict_load_sample`` hook with data loading logic that is only required during
260 predicting.
261
262 Args:
263 sample: A single sample from the output of the ``load_data`` hook.
264 """
265 return self.load_sample(sample)
266
267 def __bool__(self):
268 """If ``self.data`` is ``None`` then the ``InputBase`` is considered falsey.
269
270 This allows for quickly checking whether or not the ``InputBase`` is populated with data.
271 """
272 return self.data is not None
273
274
275 class Input(InputBase, Dataset):
276 def __getitem__(self, index: int) -> Any:
277 return self._call_load_sample(self.data[index])
278
279 def __len__(self) -> int:
280 return len(self.data) if self.data is not None else 0
281
282
283 class IterableInput(InputBase, IterableDataset, metaclass=_IterableInputMeta):
284 def __iter__(self):
285 self.data_iter = iter(self.data)
286 return self
287
288 def __next__(self) -> Any:
289 return self._call_load_sample(next(self.data_iter))
290
291
292 class ServeInput(Input):
293 def __init__(self) -> None:
294 if hasattr(self, "serve_load_data"):
295 raise MisconfigurationException("`serve_load_data` shouldn't be implemented.")
296
297 super().__init__(RunningStage.SERVING)
298
299 def serve_load_sample(self, sample: Any) -> List[Any]:
300 raise NotImplementedError
301
302 def example_input(self) -> str:
303 raise NotImplementedError
304
305 def __bool__(self):
306 return True
307
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flash/core/data/io/input.py b/flash/core/data/io/input.py
--- a/flash/core/data/io/input.py
+++ b/flash/core/data/io/input.py
@@ -15,6 +15,7 @@
import os
import sys
from copy import deepcopy
+from enum import Enum
from typing import Any, cast, Dict, Iterable, List, Sequence, Tuple, Union
from pytorch_lightning.utilities.enums import LightningEnum
@@ -171,7 +172,18 @@
def _call_load_sample(self, sample: Any) -> Any:
# Deepcopy the sample to avoid leaks with complex data structures
- return getattr(self, f"{_STAGES_PREFIX[self.running_stage]}_load_sample")(deepcopy(sample))
+ sample_output = getattr(self, f"{_STAGES_PREFIX[self.running_stage]}_load_sample")(deepcopy(sample))
+
+ # Change DataKeys Enum to strings
+ if isinstance(sample_output, dict):
+ output_dict = {}
+ for key, val in sample_output.items():
+ if isinstance(key, Enum) and hasattr(key, "value"):
+ output_dict[key.value] = val
+ else:
+ output_dict[key] = val
+ return output_dict
+ return sample_output
@staticmethod
def load_data(*args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:
|
{"golden_diff": "diff --git a/flash/core/data/io/input.py b/flash/core/data/io/input.py\n--- a/flash/core/data/io/input.py\n+++ b/flash/core/data/io/input.py\n@@ -15,6 +15,7 @@\n import os\n import sys\n from copy import deepcopy\n+from enum import Enum\n from typing import Any, cast, Dict, Iterable, List, Sequence, Tuple, Union\n \n from pytorch_lightning.utilities.enums import LightningEnum\n@@ -171,7 +172,18 @@\n \n def _call_load_sample(self, sample: Any) -> Any:\n # Deepcopy the sample to avoid leaks with complex data structures\n- return getattr(self, f\"{_STAGES_PREFIX[self.running_stage]}_load_sample\")(deepcopy(sample))\n+ sample_output = getattr(self, f\"{_STAGES_PREFIX[self.running_stage]}_load_sample\")(deepcopy(sample))\n+\n+ # Change DataKeys Enum to strings\n+ if isinstance(sample_output, dict):\n+ output_dict = {}\n+ for key, val in sample_output.items():\n+ if isinstance(key, Enum) and hasattr(key, \"value\"):\n+ output_dict[key.value] = val\n+ else:\n+ output_dict[key] = val\n+ return output_dict\n+ return sample_output\n \n @staticmethod\n def load_data(*args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n", "issue": "Revisit TPU training\n## \ud83d\ude80 Feature\r\n\r\njust curious, have you used TPU on Kaggle recently?\r\nhttps://www.kaggle.com/code/jirkaborovec/demo-flash-image-classification-on-tpu\r\n\r\n### Motivation\r\n\r\nKaggle has offered 20 hours of training which is mostly used with TF of Keras as any other lib is rather difficult, so there is huge potential to take this space by Flash...\r\nAlso With Flash, it shall be trivial to start your work with CPU for exploration and then alternate between GPU and TPU offering to finish with the best-trained model :rabbit: \r\n\r\n### Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n### Alternatives\r\n\r\nReason: https://pytorch-lightning.readthedocs.io/en/latest/accelerators/tpu_faq.html#unsupported-datatype-transfer-to-tpus\r\n\r\nUnsupported data type transfer to TPUs?\r\n```\r\nFile \"/usr/local/lib/python3.8/dist-packages/torch_xla/utils/utils.py\", line 205, in _for_each_instance_rewrite\r\n v = _for_each_instance_rewrite(result.__dict__[k], select_fn, fn, rwmap)\r\nFile \"/usr/local/lib/python3.8/dist-packages/torch_xla/utils/utils.py\", line 206, in _for_each_instance_rewrite\r\n result.__dict__[k] = v\r\nTypeError: 'mappingproxy' object does not support item assignment\r\n```\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport functools\nimport os\nimport sys\nfrom copy import deepcopy\nfrom typing import Any, cast, Dict, Iterable, List, Sequence, Tuple, Union\n\nfrom pytorch_lightning.utilities.enums import LightningEnum\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import Dataset\n\nfrom flash.core.data.properties import Properties\nfrom flash.core.data.utils import _STAGES_PREFIX\nfrom flash.core.utilities.stages import RunningStage\n\nif sys.version_info < (3, 7):\n from typing import GenericMeta\nelse:\n GenericMeta = type\n\n\nif not os.environ.get(\"READTHEDOCS\", False):\n from torch.utils.data import IterableDataset\nelse:\n # ReadTheDocs mocks the `IterableDataset` import so it's type cannot be used as a base for a metaclass, so we\n # replace it here.\n IterableDataset = object\n\n\nclass InputFormat(LightningEnum):\n \"\"\"The ``InputFormat`` enum contains the data source names used by all of the default ``from_*`` methods in\n :class:`~flash.core.data.data_module.DataModule`.\"\"\"\n\n FOLDERS = \"folders\"\n FILES = \"files\"\n NUMPY = \"numpy\"\n TENSORS = \"tensors\"\n CSV = \"csv\"\n JSON = \"json\"\n PARQUET = \"parquet\"\n DATASETS = \"datasets\"\n HUGGINGFACE_DATASET = \"hf_datasets\"\n FIFTYONE = \"fiftyone\"\n DATAFRAME = \"data_frame\"\n LISTS = \"lists\"\n LABELSTUDIO = \"labelstudio\"\n\n # TODO: Create a FlashEnum class???\n def __hash__(self) -> int:\n return hash(self.value)\n\n\nclass DataKeys(LightningEnum):\n \"\"\"The ``DataKeys`` enum contains the keys that are used by built-in data sources to refer to inputs and\n targets.\"\"\"\n\n INPUT = \"input\"\n PREDS = \"preds\"\n TARGET = \"target\"\n METADATA = \"metadata\"\n\n # TODO: Create a FlashEnum class???\n def __hash__(self) -> int:\n return hash(self.value)\n\n\nclass BaseDataFormat(LightningEnum):\n \"\"\"The base class for creating ``data_format`` for :class:`~flash.core.data.io.input.Input`.\"\"\"\n\n def __hash__(self) -> int:\n return hash(self.value)\n\n\ndef _has_len(data: Union[Sequence, Iterable]) -> bool:\n \"\"\"Duck typing check to see if the argument supports getting the length.\n\n Args:\n data: The object to check for length support.\n \"\"\"\n try:\n len(data)\n return True\n except (TypeError, NotImplementedError):\n return False\n\n\ndef _validate_input(input: \"InputBase\") -> None:\n \"\"\"Helper function to validate that the type of an ``InputBase.data`` is appropriate for the type of\n ``InputBase`` being used.\n\n Args:\n input: The ``InputBase`` instance to validate.\n\n Raises:\n RuntimeError: If the ``input`` is of type ``Input`` and it's ``data`` attribute does not support ``len``.\n RuntimeError: If the ``input`` is of type ``IterableInput`` and it's ``data`` attribute does support ``len``.\n \"\"\"\n if input.data is not None:\n if isinstance(input, Input) and not _has_len(input.data):\n raise RuntimeError(\"`Input.data` is not a sequence with a defined length. Use `IterableInput` instead.\")\n elif isinstance(input, IterableInput) and _has_len(input.data):\n raise RuntimeError(\"`IterableInput.data` is a sequence with a defined length. Use `Input` instead.\")\n\n\ndef _wrap_init(class_dict: Dict[str, Any]) -> None:\n \"\"\"Helper function to wrap the ``__init__`` (if present) from a class construction dict to apply the\n ``_validate_input`` function after instantiation. Modifies the dict inplace.\n\n Args:\n class_dict: The class construction dict, optionally containing an init to wrap.\n \"\"\"\n if \"__init__\" in class_dict:\n fn = class_dict[\"__init__\"]\n\n @functools.wraps(fn)\n def wrapper(self, *args, **kwargs):\n fn(self, *args, **kwargs)\n _validate_input(self)\n\n class_dict[\"__init__\"] = wrapper\n\n\nclass _InputMeta(GenericMeta):\n \"\"\"Metaclass for the ``InputBase`` which wraps any init defined in a subclass with the ``_validate_input``\n helper.\"\"\"\n\n def __new__(mcs, name: str, bases: Tuple, class_dict: Dict[str, Any]) -> \"_InputMeta\":\n _wrap_init(class_dict)\n return cast(_InputMeta, super().__new__(mcs, name, bases, class_dict))\n\n\nclass _IterableInputMeta(_InputMeta, type(IterableDataset)):\n \"\"\"Metaclass for the ``IterableInput`` which extends ``_InputMeta`` and avoids metaclass conflict with\n ``IterableDataset``.\"\"\"\n\n def __new__(mcs, name: str, bases: Tuple, class_dict: Dict[str, Any]) -> \"_IterableInputMeta\":\n return cast(_IterableInputMeta, super().__new__(mcs, name, bases, class_dict))\n\n\nclass InputBase(Properties, metaclass=_InputMeta):\n \"\"\"``InputBase`` is the base class for the :class:`~flash.core.data.io.input.Input` and\n :class:`~flash.core.data.io.input.IterableInput` dataset implementations in Flash. These datasets are\n constructed via the ``load_data`` and ``load_sample`` hooks, which allow a single dataset object to include custom\n loading logic according to the running stage (e.g. train, validate, test, predict).\n\n Args:\n running_stage: The running stage for which the input will be used.\n *args: Any arguments that are to be passed to the ``load_data`` hook.\n **kwargs: Any additional keyword arguments to pass to the ``load_data`` hook.\n \"\"\"\n\n def __init__(self, running_stage: RunningStage, *args: Any, **kwargs: Any) -> None:\n\n super().__init__(running_stage=running_stage)\n\n self.data = None\n if len(args) >= 1 and args[0] is not None:\n self.data = getattr(self, f\"{_STAGES_PREFIX[running_stage]}_load_data\")(*args, **kwargs)\n\n def _call_load_sample(self, sample: Any) -> Any:\n # Deepcopy the sample to avoid leaks with complex data structures\n return getattr(self, f\"{_STAGES_PREFIX[self.running_stage]}_load_sample\")(deepcopy(sample))\n\n @staticmethod\n def load_data(*args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"The ``load_data`` hook should return a collection of samples. To reduce the memory footprint, these\n samples should typically not have been loaded. For example, an input which loads images from disk would\n only return the list of filenames here rather than the loaded images.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return args[0]\n\n def train_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``train_load_data`` hook with data loading logic that is only required during training.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n def val_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``val_load_data`` hook with data loading logic that is only required during validating.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n def test_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``test_load_data`` hook with data loading logic that is only required during testing.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n def predict_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``predict_load_data`` hook with data loading logic that is only required during predicting.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n @staticmethod\n def load_sample(sample: Dict[str, Any]) -> Any:\n \"\"\"The ``load_sample`` hook is called for each ``__getitem__`` or ``__next__`` call to the dataset with a\n single sample from the output of the ``load_data`` hook as input.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return sample\n\n def train_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``train_load_sample`` hook with data loading logic that is only required during training.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def val_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``val_load_sample`` hook with data loading logic that is only required during validating.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def test_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``test_load_sample`` hook with data loading logic that is only required during testing.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def predict_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``predict_load_sample`` hook with data loading logic that is only required during\n predicting.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def __bool__(self):\n \"\"\"If ``self.data`` is ``None`` then the ``InputBase`` is considered falsey.\n\n This allows for quickly checking whether or not the ``InputBase`` is populated with data.\n \"\"\"\n return self.data is not None\n\n\nclass Input(InputBase, Dataset):\n def __getitem__(self, index: int) -> Any:\n return self._call_load_sample(self.data[index])\n\n def __len__(self) -> int:\n return len(self.data) if self.data is not None else 0\n\n\nclass IterableInput(InputBase, IterableDataset, metaclass=_IterableInputMeta):\n def __iter__(self):\n self.data_iter = iter(self.data)\n return self\n\n def __next__(self) -> Any:\n return self._call_load_sample(next(self.data_iter))\n\n\nclass ServeInput(Input):\n def __init__(self) -> None:\n if hasattr(self, \"serve_load_data\"):\n raise MisconfigurationException(\"`serve_load_data` shouldn't be implemented.\")\n\n super().__init__(RunningStage.SERVING)\n\n def serve_load_sample(self, sample: Any) -> List[Any]:\n raise NotImplementedError\n\n def example_input(self) -> str:\n raise NotImplementedError\n\n def __bool__(self):\n return True\n", "path": "flash/core/data/io/input.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport functools\nimport os\nimport sys\nfrom copy import deepcopy\nfrom enum import Enum\nfrom typing import Any, cast, Dict, Iterable, List, Sequence, Tuple, Union\n\nfrom pytorch_lightning.utilities.enums import LightningEnum\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import Dataset\n\nfrom flash.core.data.properties import Properties\nfrom flash.core.data.utils import _STAGES_PREFIX\nfrom flash.core.utilities.stages import RunningStage\n\nif sys.version_info < (3, 7):\n from typing import GenericMeta\nelse:\n GenericMeta = type\n\n\nif not os.environ.get(\"READTHEDOCS\", False):\n from torch.utils.data import IterableDataset\nelse:\n # ReadTheDocs mocks the `IterableDataset` import so it's type cannot be used as a base for a metaclass, so we\n # replace it here.\n IterableDataset = object\n\n\nclass InputFormat(LightningEnum):\n \"\"\"The ``InputFormat`` enum contains the data source names used by all of the default ``from_*`` methods in\n :class:`~flash.core.data.data_module.DataModule`.\"\"\"\n\n FOLDERS = \"folders\"\n FILES = \"files\"\n NUMPY = \"numpy\"\n TENSORS = \"tensors\"\n CSV = \"csv\"\n JSON = \"json\"\n PARQUET = \"parquet\"\n DATASETS = \"datasets\"\n HUGGINGFACE_DATASET = \"hf_datasets\"\n FIFTYONE = \"fiftyone\"\n DATAFRAME = \"data_frame\"\n LISTS = \"lists\"\n LABELSTUDIO = \"labelstudio\"\n\n # TODO: Create a FlashEnum class???\n def __hash__(self) -> int:\n return hash(self.value)\n\n\nclass DataKeys(LightningEnum):\n \"\"\"The ``DataKeys`` enum contains the keys that are used by built-in data sources to refer to inputs and\n targets.\"\"\"\n\n INPUT = \"input\"\n PREDS = \"preds\"\n TARGET = \"target\"\n METADATA = \"metadata\"\n\n # TODO: Create a FlashEnum class???\n def __hash__(self) -> int:\n return hash(self.value)\n\n\nclass BaseDataFormat(LightningEnum):\n \"\"\"The base class for creating ``data_format`` for :class:`~flash.core.data.io.input.Input`.\"\"\"\n\n def __hash__(self) -> int:\n return hash(self.value)\n\n\ndef _has_len(data: Union[Sequence, Iterable]) -> bool:\n \"\"\"Duck typing check to see if the argument supports getting the length.\n\n Args:\n data: The object to check for length support.\n \"\"\"\n try:\n len(data)\n return True\n except (TypeError, NotImplementedError):\n return False\n\n\ndef _validate_input(input: \"InputBase\") -> None:\n \"\"\"Helper function to validate that the type of an ``InputBase.data`` is appropriate for the type of\n ``InputBase`` being used.\n\n Args:\n input: The ``InputBase`` instance to validate.\n\n Raises:\n RuntimeError: If the ``input`` is of type ``Input`` and it's ``data`` attribute does not support ``len``.\n RuntimeError: If the ``input`` is of type ``IterableInput`` and it's ``data`` attribute does support ``len``.\n \"\"\"\n if input.data is not None:\n if isinstance(input, Input) and not _has_len(input.data):\n raise RuntimeError(\"`Input.data` is not a sequence with a defined length. Use `IterableInput` instead.\")\n elif isinstance(input, IterableInput) and _has_len(input.data):\n raise RuntimeError(\"`IterableInput.data` is a sequence with a defined length. Use `Input` instead.\")\n\n\ndef _wrap_init(class_dict: Dict[str, Any]) -> None:\n \"\"\"Helper function to wrap the ``__init__`` (if present) from a class construction dict to apply the\n ``_validate_input`` function after instantiation. Modifies the dict inplace.\n\n Args:\n class_dict: The class construction dict, optionally containing an init to wrap.\n \"\"\"\n if \"__init__\" in class_dict:\n fn = class_dict[\"__init__\"]\n\n @functools.wraps(fn)\n def wrapper(self, *args, **kwargs):\n fn(self, *args, **kwargs)\n _validate_input(self)\n\n class_dict[\"__init__\"] = wrapper\n\n\nclass _InputMeta(GenericMeta):\n \"\"\"Metaclass for the ``InputBase`` which wraps any init defined in a subclass with the ``_validate_input``\n helper.\"\"\"\n\n def __new__(mcs, name: str, bases: Tuple, class_dict: Dict[str, Any]) -> \"_InputMeta\":\n _wrap_init(class_dict)\n return cast(_InputMeta, super().__new__(mcs, name, bases, class_dict))\n\n\nclass _IterableInputMeta(_InputMeta, type(IterableDataset)):\n \"\"\"Metaclass for the ``IterableInput`` which extends ``_InputMeta`` and avoids metaclass conflict with\n ``IterableDataset``.\"\"\"\n\n def __new__(mcs, name: str, bases: Tuple, class_dict: Dict[str, Any]) -> \"_IterableInputMeta\":\n return cast(_IterableInputMeta, super().__new__(mcs, name, bases, class_dict))\n\n\nclass InputBase(Properties, metaclass=_InputMeta):\n \"\"\"``InputBase`` is the base class for the :class:`~flash.core.data.io.input.Input` and\n :class:`~flash.core.data.io.input.IterableInput` dataset implementations in Flash. These datasets are\n constructed via the ``load_data`` and ``load_sample`` hooks, which allow a single dataset object to include custom\n loading logic according to the running stage (e.g. train, validate, test, predict).\n\n Args:\n running_stage: The running stage for which the input will be used.\n *args: Any arguments that are to be passed to the ``load_data`` hook.\n **kwargs: Any additional keyword arguments to pass to the ``load_data`` hook.\n \"\"\"\n\n def __init__(self, running_stage: RunningStage, *args: Any, **kwargs: Any) -> None:\n\n super().__init__(running_stage=running_stage)\n\n self.data = None\n if len(args) >= 1 and args[0] is not None:\n self.data = getattr(self, f\"{_STAGES_PREFIX[running_stage]}_load_data\")(*args, **kwargs)\n\n def _call_load_sample(self, sample: Any) -> Any:\n # Deepcopy the sample to avoid leaks with complex data structures\n sample_output = getattr(self, f\"{_STAGES_PREFIX[self.running_stage]}_load_sample\")(deepcopy(sample))\n\n # Change DataKeys Enum to strings\n if isinstance(sample_output, dict):\n output_dict = {}\n for key, val in sample_output.items():\n if isinstance(key, Enum) and hasattr(key, \"value\"):\n output_dict[key.value] = val\n else:\n output_dict[key] = val\n return output_dict\n return sample_output\n\n @staticmethod\n def load_data(*args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"The ``load_data`` hook should return a collection of samples. To reduce the memory footprint, these\n samples should typically not have been loaded. For example, an input which loads images from disk would\n only return the list of filenames here rather than the loaded images.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return args[0]\n\n def train_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``train_load_data`` hook with data loading logic that is only required during training.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n def val_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``val_load_data`` hook with data loading logic that is only required during validating.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n def test_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``test_load_data`` hook with data loading logic that is only required during testing.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n def predict_load_data(self, *args: Any, **kwargs: Any) -> Union[Sequence, Iterable]:\n \"\"\"Override the ``predict_load_data`` hook with data loading logic that is only required during predicting.\n\n Args:\n *args: Any arguments that the input requires.\n **kwargs: Any additional keyword arguments that the input requires.\n \"\"\"\n return self.load_data(*args, **kwargs)\n\n @staticmethod\n def load_sample(sample: Dict[str, Any]) -> Any:\n \"\"\"The ``load_sample`` hook is called for each ``__getitem__`` or ``__next__`` call to the dataset with a\n single sample from the output of the ``load_data`` hook as input.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return sample\n\n def train_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``train_load_sample`` hook with data loading logic that is only required during training.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def val_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``val_load_sample`` hook with data loading logic that is only required during validating.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def test_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``test_load_sample`` hook with data loading logic that is only required during testing.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def predict_load_sample(self, sample: Dict[str, Any]) -> Any:\n \"\"\"Override the ``predict_load_sample`` hook with data loading logic that is only required during\n predicting.\n\n Args:\n sample: A single sample from the output of the ``load_data`` hook.\n \"\"\"\n return self.load_sample(sample)\n\n def __bool__(self):\n \"\"\"If ``self.data`` is ``None`` then the ``InputBase`` is considered falsey.\n\n This allows for quickly checking whether or not the ``InputBase`` is populated with data.\n \"\"\"\n return self.data is not None\n\n\nclass Input(InputBase, Dataset):\n def __getitem__(self, index: int) -> Any:\n return self._call_load_sample(self.data[index])\n\n def __len__(self) -> int:\n return len(self.data) if self.data is not None else 0\n\n\nclass IterableInput(InputBase, IterableDataset, metaclass=_IterableInputMeta):\n def __iter__(self):\n self.data_iter = iter(self.data)\n return self\n\n def __next__(self) -> Any:\n return self._call_load_sample(next(self.data_iter))\n\n\nclass ServeInput(Input):\n def __init__(self) -> None:\n if hasattr(self, \"serve_load_data\"):\n raise MisconfigurationException(\"`serve_load_data` shouldn't be implemented.\")\n\n super().__init__(RunningStage.SERVING)\n\n def serve_load_sample(self, sample: Any) -> List[Any]:\n raise NotImplementedError\n\n def example_input(self) -> str:\n raise NotImplementedError\n\n def __bool__(self):\n return True\n", "path": "flash/core/data/io/input.py"}]}
| 4,093 | 306 |
gh_patches_debug_58005
|
rasdani/github-patches
|
git_diff
|
CiviWiki__OpenCiviWiki-1042
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
{FEAT}: Automated testing with actions.
### Idea summary
Usage of GitHub actions.
### Further details
We can use GitHub Actions to check/test the code that is being pushed upstream via PRs and it can be tested before merging automatically (Technically it is Continuous Integration).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `project/accounts/models.py`
Content:
```
1 from django.contrib.auth.models import AbstractUser
2 import os
3 import io
4 from django.core.files.storage import default_storage
5 from django.conf import settings
6 from django.db import models
7 from PIL import Image, ImageOps
8 from django.core.files.uploadedfile import InMemoryUploadedFile
9
10 from taggit.managers import TaggableManager
11
12 from api.models.category import Category
13 from common.utils import PathAndRename
14
15
16 class User(AbstractUser):
17 """
18 A new custom User model for any functionality needed in the future. Extending AbstractUser
19 allows for adding new fields to the user model as needed.
20 """
21
22 class Meta:
23 db_table = "users"
24
25
26 # Image manipulation constants
27 PROFILE_IMG_SIZE = (171, 171)
28 PROFILE_IMG_THUMB_SIZE = (40, 40)
29 WHITE_BG = (255, 255, 255)
30
31
32 class ProfileManager(models.Manager):
33 def summarize(self, profile):
34 from api.models.civi import Civi
35
36 data = {
37 "username": profile.user.username,
38 "first_name": profile.first_name,
39 "last_name": profile.last_name,
40 "about_me": profile.about_me,
41 "history": [
42 Civi.objects.serialize(c)
43 for c in Civi.objects.filter(author_id=profile.id).order_by("-created")
44 ],
45 "profile_image": profile.profile_image_url,
46 "followers": self.followers(profile),
47 "following": self.following(profile),
48 }
49 return data
50
51 def chip_summarize(self, profile):
52 data = {
53 "username": profile.user.username,
54 "first_name": profile.first_name,
55 "last_name": profile.last_name,
56 "profile_image": profile.profile_image_url,
57 }
58 return data
59
60 def card_summarize(self, profile, request_profile):
61 # Length at which to truncate 'about me' text
62 about_me_truncate_length = 150
63
64 # If 'about me' text is longer than 150 characters... add elipsis (truncate)
65 ellipsis_if_too_long = (
66 "" if len(profile.about_me) <= about_me_truncate_length else "..."
67 )
68
69 data = {
70 "id": profile.user.id,
71 "username": profile.user.username,
72 "first_name": profile.first_name,
73 "last_name": profile.last_name,
74 "about_me": profile.about_me[:about_me_truncate_length] + ellipsis_if_too_long,
75 "profile_image": profile.profile_image_url,
76 "follow_state": True
77 if profile in request_profile.following.all()
78 else False,
79 "request_profile": request_profile.first_name,
80 }
81 return data
82
83 def followers(self, profile):
84 return [self.chip_summarize(follower) for follower in profile.followers.all()]
85
86 def following(self, profile):
87 return [self.chip_summarize(following) for following in profile.following.all()]
88
89
90 profile_upload_path = PathAndRename("")
91
92
93 class Profile(models.Model):
94 user = models.ForeignKey(User, on_delete=models.CASCADE)
95 first_name = models.CharField(max_length=63, blank=False)
96 last_name = models.CharField(max_length=63, blank=False)
97 about_me = models.CharField(max_length=511, blank=True)
98
99 categories = models.ManyToManyField(
100 Category, related_name="user_categories", symmetrical=False
101 )
102 tags = TaggableManager()
103
104 followers = models.ManyToManyField(
105 "self", related_name="follower", symmetrical=False
106 )
107 following = models.ManyToManyField(
108 "self", related_name="followings", symmetrical=False
109 )
110
111 is_verified = models.BooleanField(default=False)
112 full_profile = models.BooleanField(default=False)
113
114 objects = ProfileManager()
115 profile_image = models.ImageField(
116 upload_to=profile_upload_path, blank=True, null=True
117 )
118 profile_image_thumb = models.ImageField(
119 upload_to=profile_upload_path, blank=True, null=True
120 )
121
122 @property
123 def full_name(self):
124 """Returns the person's full name."""
125
126 return f"{self.first_name} {self.last_name}"
127
128 @property
129 def profile_image_url(self):
130 """Return placeholder profile image if user didn't upload one"""
131
132 if self.profile_image:
133 file_exists = default_storage.exists(
134 os.path.join(settings.MEDIA_ROOT, self.profile_image.name)
135 )
136 if file_exists:
137 return self.profile_image.url
138
139 return "/static/img/no_image_md.png"
140
141 @property
142 def profile_image_thumb_url(self):
143 """Return placeholder profile image if user didn't upload one"""
144
145 if self.profile_image_thumb:
146 file_exists = default_storage.exists(
147 os.path.join(settings.MEDIA_ROOT, self.profile_image_thumb.name)
148 )
149 if file_exists:
150 return self.profile_image_thumb.url
151
152 return "/static/img/no_image_md.png"
153
154 def __init__(self, *args, **kwargs):
155 super(Profile, self).__init__(*args, **kwargs)
156
157 def save(self, *args, **kwargs):
158 """ Image crop/resize and thumbnail creation """
159
160 # New Profile image --
161 if self.profile_image:
162 self.resize_profile_image()
163
164 self.full_profile = self.is_full_profile()
165
166 super(Profile, self).save(*args, **kwargs)
167
168 def resize_profile_image(self):
169 """
170 Resizes and crops the user uploaded image and creates a thumbnail version of it
171 """
172 profile_image_field = self.profile_image
173 image_file = io.StringIO(profile_image_field.read())
174 profile_image = Image.open(image_file)
175 profile_image.load()
176
177 # Resize image
178 profile_image = ImageOps.fit(
179 profile_image, PROFILE_IMG_SIZE, Image.ANTIALIAS, centering=(0.5, 0.5)
180 )
181
182 # Convert to JPG image format with white background
183 if profile_image.mode not in ("L", "RGB"):
184 white_bg_img = Image.new("RGB", PROFILE_IMG_SIZE, WHITE_BG)
185 white_bg_img.paste(profile_image, mask=profile_image.split()[3])
186 profile_image = white_bg_img
187
188 # Save new cropped image
189 tmp_image_file = io.StringIO()
190 profile_image.save(tmp_image_file, "JPEG", quality=90)
191 tmp_image_file.seek(0)
192 self.profile_image = InMemoryUploadedFile(
193 tmp_image_file,
194 "ImageField",
195 self.profile_image.name,
196 "image/jpeg",
197 tmp_image_file.len,
198 None,
199 )
200 # Make a Thumbnail Image for the new resized image
201 thumb_image = profile_image.copy()
202 thumb_image.thumbnail(PROFILE_IMG_THUMB_SIZE, resample=Image.ANTIALIAS)
203 tmp_image_file = io.StringIO()
204 thumb_image.save(tmp_image_file, "JPEG", quality=90)
205 tmp_image_file.seek(0)
206 self.profile_image_thumb = InMemoryUploadedFile(
207 tmp_image_file,
208 "ImageField",
209 self.profile_image.name,
210 "image/jpeg",
211 tmp_image_file.len,
212 None,
213 )
214
215 def is_full_profile(self):
216 if self.first_name and self.last_name:
217 return True
218 else:
219 return False
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/project/accounts/models.py b/project/accounts/models.py
--- a/project/accounts/models.py
+++ b/project/accounts/models.py
@@ -9,7 +9,7 @@
from taggit.managers import TaggableManager
-from api.models.category import Category
+from api.models import Category
from common.utils import PathAndRename
|
{"golden_diff": "diff --git a/project/accounts/models.py b/project/accounts/models.py\n--- a/project/accounts/models.py\n+++ b/project/accounts/models.py\n@@ -9,7 +9,7 @@\n \n from taggit.managers import TaggableManager\n \n-from api.models.category import Category\n+from api.models import Category\n from common.utils import PathAndRename\n", "issue": "{FEAT}: Automated testing with actions.\n### Idea summary\n\nUsage of GitHub actions.\n\n### Further details\n\nWe can use GitHub Actions to check/test the code that is being pushed upstream via PRs and it can be tested before merging automatically (Technically it is Continuous Integration).\n", "before_files": [{"content": "from django.contrib.auth.models import AbstractUser\nimport os\nimport io\nfrom django.core.files.storage import default_storage\nfrom django.conf import settings\nfrom django.db import models\nfrom PIL import Image, ImageOps\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom taggit.managers import TaggableManager\n\nfrom api.models.category import Category\nfrom common.utils import PathAndRename\n\n\nclass User(AbstractUser):\n \"\"\"\n A new custom User model for any functionality needed in the future. Extending AbstractUser\n allows for adding new fields to the user model as needed.\n \"\"\"\n\n class Meta:\n db_table = \"users\"\n\n\n# Image manipulation constants\nPROFILE_IMG_SIZE = (171, 171)\nPROFILE_IMG_THUMB_SIZE = (40, 40)\nWHITE_BG = (255, 255, 255)\n\n\nclass ProfileManager(models.Manager):\n def summarize(self, profile):\n from api.models.civi import Civi\n\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me,\n \"history\": [\n Civi.objects.serialize(c)\n for c in Civi.objects.filter(author_id=profile.id).order_by(\"-created\")\n ],\n \"profile_image\": profile.profile_image_url,\n \"followers\": self.followers(profile),\n \"following\": self.following(profile),\n }\n return data\n\n def chip_summarize(self, profile):\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"profile_image\": profile.profile_image_url,\n }\n return data\n\n def card_summarize(self, profile, request_profile):\n # Length at which to truncate 'about me' text\n about_me_truncate_length = 150\n\n # If 'about me' text is longer than 150 characters... add elipsis (truncate)\n ellipsis_if_too_long = (\n \"\" if len(profile.about_me) <= about_me_truncate_length else \"...\"\n )\n\n data = {\n \"id\": profile.user.id,\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me[:about_me_truncate_length] + ellipsis_if_too_long,\n \"profile_image\": profile.profile_image_url,\n \"follow_state\": True\n if profile in request_profile.following.all()\n else False,\n \"request_profile\": request_profile.first_name,\n }\n return data\n\n def followers(self, profile):\n return [self.chip_summarize(follower) for follower in profile.followers.all()]\n\n def following(self, profile):\n return [self.chip_summarize(following) for following in profile.following.all()]\n\n\nprofile_upload_path = PathAndRename(\"\")\n\n\nclass Profile(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=63, blank=False)\n last_name = models.CharField(max_length=63, blank=False)\n about_me = models.CharField(max_length=511, blank=True)\n\n categories = models.ManyToManyField(\n Category, related_name=\"user_categories\", symmetrical=False\n )\n tags = TaggableManager()\n\n followers = models.ManyToManyField(\n \"self\", related_name=\"follower\", symmetrical=False\n )\n following = models.ManyToManyField(\n \"self\", related_name=\"followings\", symmetrical=False\n )\n\n is_verified = models.BooleanField(default=False)\n full_profile = models.BooleanField(default=False)\n\n objects = ProfileManager()\n profile_image = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n profile_image_thumb = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n\n @property\n def full_name(self):\n \"\"\"Returns the person's full name.\"\"\"\n\n return f\"{self.first_name} {self.last_name}\"\n\n @property\n def profile_image_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image.name)\n )\n if file_exists:\n return self.profile_image.url\n\n return \"/static/img/no_image_md.png\"\n\n @property\n def profile_image_thumb_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image_thumb:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image_thumb.name)\n )\n if file_exists:\n return self.profile_image_thumb.url\n\n return \"/static/img/no_image_md.png\"\n\n def __init__(self, *args, **kwargs):\n super(Profile, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n \"\"\" Image crop/resize and thumbnail creation \"\"\"\n\n # New Profile image --\n if self.profile_image:\n self.resize_profile_image()\n\n self.full_profile = self.is_full_profile()\n\n super(Profile, self).save(*args, **kwargs)\n\n def resize_profile_image(self):\n \"\"\"\n Resizes and crops the user uploaded image and creates a thumbnail version of it\n \"\"\"\n profile_image_field = self.profile_image\n image_file = io.StringIO(profile_image_field.read())\n profile_image = Image.open(image_file)\n profile_image.load()\n\n # Resize image\n profile_image = ImageOps.fit(\n profile_image, PROFILE_IMG_SIZE, Image.ANTIALIAS, centering=(0.5, 0.5)\n )\n\n # Convert to JPG image format with white background\n if profile_image.mode not in (\"L\", \"RGB\"):\n white_bg_img = Image.new(\"RGB\", PROFILE_IMG_SIZE, WHITE_BG)\n white_bg_img.paste(profile_image, mask=profile_image.split()[3])\n profile_image = white_bg_img\n\n # Save new cropped image\n tmp_image_file = io.StringIO()\n profile_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n # Make a Thumbnail Image for the new resized image\n thumb_image = profile_image.copy()\n thumb_image.thumbnail(PROFILE_IMG_THUMB_SIZE, resample=Image.ANTIALIAS)\n tmp_image_file = io.StringIO()\n thumb_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image_thumb = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n\n def is_full_profile(self):\n if self.first_name and self.last_name:\n return True\n else:\n return False\n", "path": "project/accounts/models.py"}], "after_files": [{"content": "from django.contrib.auth.models import AbstractUser\nimport os\nimport io\nfrom django.core.files.storage import default_storage\nfrom django.conf import settings\nfrom django.db import models\nfrom PIL import Image, ImageOps\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\n\nfrom taggit.managers import TaggableManager\n\nfrom api.models import Category\nfrom common.utils import PathAndRename\n\n\nclass User(AbstractUser):\n \"\"\"\n A new custom User model for any functionality needed in the future. Extending AbstractUser\n allows for adding new fields to the user model as needed.\n \"\"\"\n\n class Meta:\n db_table = \"users\"\n\n\n# Image manipulation constants\nPROFILE_IMG_SIZE = (171, 171)\nPROFILE_IMG_THUMB_SIZE = (40, 40)\nWHITE_BG = (255, 255, 255)\n\n\nclass ProfileManager(models.Manager):\n def summarize(self, profile):\n from api.models.civi import Civi\n\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me,\n \"history\": [\n Civi.objects.serialize(c)\n for c in Civi.objects.filter(author_id=profile.id).order_by(\"-created\")\n ],\n \"profile_image\": profile.profile_image_url,\n \"followers\": self.followers(profile),\n \"following\": self.following(profile),\n }\n return data\n\n def chip_summarize(self, profile):\n data = {\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"profile_image\": profile.profile_image_url,\n }\n return data\n\n def card_summarize(self, profile, request_profile):\n # Length at which to truncate 'about me' text\n about_me_truncate_length = 150\n\n # If 'about me' text is longer than 150 characters... add elipsis (truncate)\n ellipsis_if_too_long = (\n \"\" if len(profile.about_me) <= about_me_truncate_length else \"...\"\n )\n\n data = {\n \"id\": profile.user.id,\n \"username\": profile.user.username,\n \"first_name\": profile.first_name,\n \"last_name\": profile.last_name,\n \"about_me\": profile.about_me[:about_me_truncate_length] + ellipsis_if_too_long,\n \"profile_image\": profile.profile_image_url,\n \"follow_state\": True\n if profile in request_profile.following.all()\n else False,\n \"request_profile\": request_profile.first_name,\n }\n return data\n\n def followers(self, profile):\n return [self.chip_summarize(follower) for follower in profile.followers.all()]\n\n def following(self, profile):\n return [self.chip_summarize(following) for following in profile.following.all()]\n\n\nprofile_upload_path = PathAndRename(\"\")\n\n\nclass Profile(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=63, blank=False)\n last_name = models.CharField(max_length=63, blank=False)\n about_me = models.CharField(max_length=511, blank=True)\n\n categories = models.ManyToManyField(\n Category, related_name=\"user_categories\", symmetrical=False\n )\n tags = TaggableManager()\n\n followers = models.ManyToManyField(\n \"self\", related_name=\"follower\", symmetrical=False\n )\n following = models.ManyToManyField(\n \"self\", related_name=\"followings\", symmetrical=False\n )\n\n is_verified = models.BooleanField(default=False)\n full_profile = models.BooleanField(default=False)\n\n objects = ProfileManager()\n profile_image = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n profile_image_thumb = models.ImageField(\n upload_to=profile_upload_path, blank=True, null=True\n )\n\n @property\n def full_name(self):\n \"\"\"Returns the person's full name.\"\"\"\n\n return f\"{self.first_name} {self.last_name}\"\n\n @property\n def profile_image_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image.name)\n )\n if file_exists:\n return self.profile_image.url\n\n return \"/static/img/no_image_md.png\"\n\n @property\n def profile_image_thumb_url(self):\n \"\"\"Return placeholder profile image if user didn't upload one\"\"\"\n\n if self.profile_image_thumb:\n file_exists = default_storage.exists(\n os.path.join(settings.MEDIA_ROOT, self.profile_image_thumb.name)\n )\n if file_exists:\n return self.profile_image_thumb.url\n\n return \"/static/img/no_image_md.png\"\n\n def __init__(self, *args, **kwargs):\n super(Profile, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n \"\"\" Image crop/resize and thumbnail creation \"\"\"\n\n # New Profile image --\n if self.profile_image:\n self.resize_profile_image()\n\n self.full_profile = self.is_full_profile()\n\n super(Profile, self).save(*args, **kwargs)\n\n def resize_profile_image(self):\n \"\"\"\n Resizes and crops the user uploaded image and creates a thumbnail version of it\n \"\"\"\n profile_image_field = self.profile_image\n image_file = io.StringIO(profile_image_field.read())\n profile_image = Image.open(image_file)\n profile_image.load()\n\n # Resize image\n profile_image = ImageOps.fit(\n profile_image, PROFILE_IMG_SIZE, Image.ANTIALIAS, centering=(0.5, 0.5)\n )\n\n # Convert to JPG image format with white background\n if profile_image.mode not in (\"L\", \"RGB\"):\n white_bg_img = Image.new(\"RGB\", PROFILE_IMG_SIZE, WHITE_BG)\n white_bg_img.paste(profile_image, mask=profile_image.split()[3])\n profile_image = white_bg_img\n\n # Save new cropped image\n tmp_image_file = io.StringIO()\n profile_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n # Make a Thumbnail Image for the new resized image\n thumb_image = profile_image.copy()\n thumb_image.thumbnail(PROFILE_IMG_THUMB_SIZE, resample=Image.ANTIALIAS)\n tmp_image_file = io.StringIO()\n thumb_image.save(tmp_image_file, \"JPEG\", quality=90)\n tmp_image_file.seek(0)\n self.profile_image_thumb = InMemoryUploadedFile(\n tmp_image_file,\n \"ImageField\",\n self.profile_image.name,\n \"image/jpeg\",\n tmp_image_file.len,\n None,\n )\n\n def is_full_profile(self):\n if self.first_name and self.last_name:\n return True\n else:\n return False\n", "path": "project/accounts/models.py"}]}
| 2,413 | 71 |
gh_patches_debug_6860
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5858
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TLS logging broken with new cryptography
https://github.com/pyca/cryptography/pull/8391 dropped `SSL_get_server_tmp_key()` so we need to disable the code that uses it if it's not available.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/ssl.py`
Content:
```
1 import OpenSSL._util as pyOpenSSLutil
2 import OpenSSL.SSL
3
4 from scrapy.utils.python import to_unicode
5
6
7 def ffi_buf_to_string(buf):
8 return to_unicode(pyOpenSSLutil.ffi.string(buf))
9
10
11 def x509name_to_string(x509name):
12 # from OpenSSL.crypto.X509Name.__repr__
13 result_buffer = pyOpenSSLutil.ffi.new("char[]", 512)
14 pyOpenSSLutil.lib.X509_NAME_oneline(
15 x509name._name, result_buffer, len(result_buffer)
16 )
17
18 return ffi_buf_to_string(result_buffer)
19
20
21 def get_temp_key_info(ssl_object):
22 # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()
23 temp_key_p = pyOpenSSLutil.ffi.new("EVP_PKEY **")
24 if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):
25 return None
26 temp_key = temp_key_p[0]
27 if temp_key == pyOpenSSLutil.ffi.NULL:
28 return None
29 temp_key = pyOpenSSLutil.ffi.gc(temp_key, pyOpenSSLutil.lib.EVP_PKEY_free)
30 key_info = []
31 key_type = pyOpenSSLutil.lib.EVP_PKEY_id(temp_key)
32 if key_type == pyOpenSSLutil.lib.EVP_PKEY_RSA:
33 key_info.append("RSA")
34 elif key_type == pyOpenSSLutil.lib.EVP_PKEY_DH:
35 key_info.append("DH")
36 elif key_type == pyOpenSSLutil.lib.EVP_PKEY_EC:
37 key_info.append("ECDH")
38 ec_key = pyOpenSSLutil.lib.EVP_PKEY_get1_EC_KEY(temp_key)
39 ec_key = pyOpenSSLutil.ffi.gc(ec_key, pyOpenSSLutil.lib.EC_KEY_free)
40 nid = pyOpenSSLutil.lib.EC_GROUP_get_curve_name(
41 pyOpenSSLutil.lib.EC_KEY_get0_group(ec_key)
42 )
43 cname = pyOpenSSLutil.lib.EC_curve_nid2nist(nid)
44 if cname == pyOpenSSLutil.ffi.NULL:
45 cname = pyOpenSSLutil.lib.OBJ_nid2sn(nid)
46 key_info.append(ffi_buf_to_string(cname))
47 else:
48 key_info.append(ffi_buf_to_string(pyOpenSSLutil.lib.OBJ_nid2sn(key_type)))
49 key_info.append(f"{pyOpenSSLutil.lib.EVP_PKEY_bits(temp_key)} bits")
50 return ", ".join(key_info)
51
52
53 def get_openssl_version():
54 system_openssl = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION).decode(
55 "ascii", errors="replace"
56 )
57 return f"{OpenSSL.version.__version__} ({system_openssl})"
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/ssl.py b/scrapy/utils/ssl.py
--- a/scrapy/utils/ssl.py
+++ b/scrapy/utils/ssl.py
@@ -20,6 +20,9 @@
def get_temp_key_info(ssl_object):
# adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()
+ if not hasattr(pyOpenSSLutil.lib, "SSL_get_server_tmp_key"):
+ # removed in cryptography 40.0.0
+ return None
temp_key_p = pyOpenSSLutil.ffi.new("EVP_PKEY **")
if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):
return None
|
{"golden_diff": "diff --git a/scrapy/utils/ssl.py b/scrapy/utils/ssl.py\n--- a/scrapy/utils/ssl.py\n+++ b/scrapy/utils/ssl.py\n@@ -20,6 +20,9 @@\n \n def get_temp_key_info(ssl_object):\n # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()\n+ if not hasattr(pyOpenSSLutil.lib, \"SSL_get_server_tmp_key\"):\n+ # removed in cryptography 40.0.0\n+ return None\n temp_key_p = pyOpenSSLutil.ffi.new(\"EVP_PKEY **\")\n if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):\n return None\n", "issue": "TLS logging broken with new cryptography\nhttps://github.com/pyca/cryptography/pull/8391 dropped `SSL_get_server_tmp_key()` so we need to disable the code that uses it if it's not available.\n", "before_files": [{"content": "import OpenSSL._util as pyOpenSSLutil\nimport OpenSSL.SSL\n\nfrom scrapy.utils.python import to_unicode\n\n\ndef ffi_buf_to_string(buf):\n return to_unicode(pyOpenSSLutil.ffi.string(buf))\n\n\ndef x509name_to_string(x509name):\n # from OpenSSL.crypto.X509Name.__repr__\n result_buffer = pyOpenSSLutil.ffi.new(\"char[]\", 512)\n pyOpenSSLutil.lib.X509_NAME_oneline(\n x509name._name, result_buffer, len(result_buffer)\n )\n\n return ffi_buf_to_string(result_buffer)\n\n\ndef get_temp_key_info(ssl_object):\n # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()\n temp_key_p = pyOpenSSLutil.ffi.new(\"EVP_PKEY **\")\n if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):\n return None\n temp_key = temp_key_p[0]\n if temp_key == pyOpenSSLutil.ffi.NULL:\n return None\n temp_key = pyOpenSSLutil.ffi.gc(temp_key, pyOpenSSLutil.lib.EVP_PKEY_free)\n key_info = []\n key_type = pyOpenSSLutil.lib.EVP_PKEY_id(temp_key)\n if key_type == pyOpenSSLutil.lib.EVP_PKEY_RSA:\n key_info.append(\"RSA\")\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_DH:\n key_info.append(\"DH\")\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_EC:\n key_info.append(\"ECDH\")\n ec_key = pyOpenSSLutil.lib.EVP_PKEY_get1_EC_KEY(temp_key)\n ec_key = pyOpenSSLutil.ffi.gc(ec_key, pyOpenSSLutil.lib.EC_KEY_free)\n nid = pyOpenSSLutil.lib.EC_GROUP_get_curve_name(\n pyOpenSSLutil.lib.EC_KEY_get0_group(ec_key)\n )\n cname = pyOpenSSLutil.lib.EC_curve_nid2nist(nid)\n if cname == pyOpenSSLutil.ffi.NULL:\n cname = pyOpenSSLutil.lib.OBJ_nid2sn(nid)\n key_info.append(ffi_buf_to_string(cname))\n else:\n key_info.append(ffi_buf_to_string(pyOpenSSLutil.lib.OBJ_nid2sn(key_type)))\n key_info.append(f\"{pyOpenSSLutil.lib.EVP_PKEY_bits(temp_key)} bits\")\n return \", \".join(key_info)\n\n\ndef get_openssl_version():\n system_openssl = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION).decode(\n \"ascii\", errors=\"replace\"\n )\n return f\"{OpenSSL.version.__version__} ({system_openssl})\"\n", "path": "scrapy/utils/ssl.py"}], "after_files": [{"content": "import OpenSSL._util as pyOpenSSLutil\nimport OpenSSL.SSL\n\nfrom scrapy.utils.python import to_unicode\n\n\ndef ffi_buf_to_string(buf):\n return to_unicode(pyOpenSSLutil.ffi.string(buf))\n\n\ndef x509name_to_string(x509name):\n # from OpenSSL.crypto.X509Name.__repr__\n result_buffer = pyOpenSSLutil.ffi.new(\"char[]\", 512)\n pyOpenSSLutil.lib.X509_NAME_oneline(\n x509name._name, result_buffer, len(result_buffer)\n )\n\n return ffi_buf_to_string(result_buffer)\n\n\ndef get_temp_key_info(ssl_object):\n # adapted from OpenSSL apps/s_cb.c::ssl_print_tmp_key()\n if not hasattr(pyOpenSSLutil.lib, \"SSL_get_server_tmp_key\"):\n # removed in cryptography 40.0.0\n return None\n temp_key_p = pyOpenSSLutil.ffi.new(\"EVP_PKEY **\")\n if not pyOpenSSLutil.lib.SSL_get_server_tmp_key(ssl_object, temp_key_p):\n return None\n temp_key = temp_key_p[0]\n if temp_key == pyOpenSSLutil.ffi.NULL:\n return None\n temp_key = pyOpenSSLutil.ffi.gc(temp_key, pyOpenSSLutil.lib.EVP_PKEY_free)\n key_info = []\n key_type = pyOpenSSLutil.lib.EVP_PKEY_id(temp_key)\n if key_type == pyOpenSSLutil.lib.EVP_PKEY_RSA:\n key_info.append(\"RSA\")\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_DH:\n key_info.append(\"DH\")\n elif key_type == pyOpenSSLutil.lib.EVP_PKEY_EC:\n key_info.append(\"ECDH\")\n ec_key = pyOpenSSLutil.lib.EVP_PKEY_get1_EC_KEY(temp_key)\n ec_key = pyOpenSSLutil.ffi.gc(ec_key, pyOpenSSLutil.lib.EC_KEY_free)\n nid = pyOpenSSLutil.lib.EC_GROUP_get_curve_name(\n pyOpenSSLutil.lib.EC_KEY_get0_group(ec_key)\n )\n cname = pyOpenSSLutil.lib.EC_curve_nid2nist(nid)\n if cname == pyOpenSSLutil.ffi.NULL:\n cname = pyOpenSSLutil.lib.OBJ_nid2sn(nid)\n key_info.append(ffi_buf_to_string(cname))\n else:\n key_info.append(ffi_buf_to_string(pyOpenSSLutil.lib.OBJ_nid2sn(key_type)))\n key_info.append(f\"{pyOpenSSLutil.lib.EVP_PKEY_bits(temp_key)} bits\")\n return \", \".join(key_info)\n\n\ndef get_openssl_version():\n system_openssl = OpenSSL.SSL.SSLeay_version(OpenSSL.SSL.SSLEAY_VERSION).decode(\n \"ascii\", errors=\"replace\"\n )\n return f\"{OpenSSL.version.__version__} ({system_openssl})\"\n", "path": "scrapy/utils/ssl.py"}]}
| 1,019 | 155 |
gh_patches_debug_50223
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-916
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.6
On the docket:
+ [x] Don't delete the root `__init__.py` when devendoring. #915
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.5'
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.5'
+__version__ = '2.1.6'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.5'\n+__version__ = '2.1.6'\n", "issue": "Release 2.1.6\nOn the docket:\r\n+ [x] Don't delete the root `__init__.py` when devendoring. #915\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.5'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.6'\n", "path": "pex/version.py"}]}
| 345 | 94 |
gh_patches_debug_14868
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-6571
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tune] Ray Tune fails to parse typing hints of the function for experiment
### What is the problem?
If the function for experiment has a [typing hint](https://docs.python.org/3/library/typing.html) for its argument `config`, then Ray Tune fails to parse the argument and assumes that there is a reporter signature. The cause of this problem is in this source code:
https://github.com/ray-project/ray/blob/1eaa57c98f8870a43e1ea14ec011b6bd4be97c8d/python/ray/tune/function_runner.py#L250-L257
Changing
`func_args = inspect.getargspec(train_func).args`
to
`func_args = inspect.getfullargspec(train_func).args`
might solve the problem.
*Ray version and other system information (Python version, TensorFlow version, OS):*
Ray: 0.8.0
Python: 3.7.5
OS: Ubuntu 18.04
*Does the problem occur on the [latest wheels](https://ray.readthedocs.io/en/latest/installation.html)?*
I couldn't install the latest wheel. So I can't confirm it.
### Reproduction
The following is a modification of the first examples in the [Ray Tune Documentation](https://ray.readthedocs.io/en/latest/tune.html#quick-start), where I added a typing hint `config: Dict[str, Any]` for the argument of function `train_mnist`.
```
from typing import Dict, Any
import torch.optim as optim
from ray import tune
from ray.tune.examples.mnist_pytorch import get_data_loaders, ConvNet, train, test
def train_mnist(config: Dict[str, Any]):
train_loader, test_loader = get_data_loaders()
model = ConvNet()
optimizer = optim.SGD(model.parameters(), lr=config["lr"])
for i in range(10):
train(model, optimizer, train_loader)
acc = test(model, test_loader)
tune.track.log(mean_accuracy=acc)
analysis = tune.run(train_mnist, config={"lr": tune.grid_search([0.001, 0.01, 0.1])})
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
# Get a dataframe for analyzing trial results.
df = analysis.dataframe()
```
When running the code you get this error message:
**TypeError: train_mnist() takes 1 positional argument but 2 were given**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/tune/function_runner.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import logging
6 import time
7 import inspect
8 import threading
9 import traceback
10 from six.moves import queue
11
12 from ray.tune import track
13 from ray.tune import TuneError
14 from ray.tune.trainable import Trainable
15 from ray.tune.result import TIME_THIS_ITER_S, RESULT_DUPLICATE
16
17 logger = logging.getLogger(__name__)
18
19 # Time between FunctionRunner checks when fetching
20 # new results after signaling the reporter to continue
21 RESULT_FETCH_TIMEOUT = 0.2
22
23 ERROR_REPORT_TIMEOUT = 10
24 ERROR_FETCH_TIMEOUT = 1
25
26
27 class StatusReporter(object):
28 """Object passed into your function that you can report status through.
29
30 Example:
31 >>> def trainable_function(config, reporter):
32 >>> assert isinstance(reporter, StatusReporter)
33 >>> reporter(timesteps_this_iter=1)
34 """
35
36 def __init__(self, result_queue, continue_semaphore, logdir=None):
37 self._queue = result_queue
38 self._last_report_time = None
39 self._continue_semaphore = continue_semaphore
40 self._logdir = logdir
41
42 def __call__(self, **kwargs):
43 """Report updated training status.
44
45 Pass in `done=True` when the training job is completed.
46
47 Args:
48 kwargs: Latest training result status.
49
50 Example:
51 >>> reporter(mean_accuracy=1, training_iteration=4)
52 >>> reporter(mean_accuracy=1, training_iteration=4, done=True)
53
54 Raises:
55 StopIteration: A StopIteration exception is raised if the trial has
56 been signaled to stop.
57 """
58
59 assert self._last_report_time is not None, (
60 "StatusReporter._start() must be called before the first "
61 "report __call__ is made to ensure correct runtime metrics.")
62
63 # time per iteration is recorded directly in the reporter to ensure
64 # any delays in logging results aren't counted
65 report_time = time.time()
66 if TIME_THIS_ITER_S not in kwargs:
67 kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time
68 self._last_report_time = report_time
69
70 # add results to a thread-safe queue
71 self._queue.put(kwargs.copy(), block=True)
72
73 # This blocks until notification from the FunctionRunner that the last
74 # result has been returned to Tune and that the function is safe to
75 # resume training.
76 self._continue_semaphore.acquire()
77
78 def _start(self):
79 self._last_report_time = time.time()
80
81 @property
82 def logdir(self):
83 return self._logdir
84
85
86 class _RunnerThread(threading.Thread):
87 """Supervisor thread that runs your script."""
88
89 def __init__(self, entrypoint, error_queue):
90 threading.Thread.__init__(self)
91 self._entrypoint = entrypoint
92 self._error_queue = error_queue
93 self.daemon = True
94
95 def run(self):
96 try:
97 self._entrypoint()
98 except StopIteration:
99 logger.debug(
100 ("Thread runner raised StopIteration. Interperting it as a "
101 "signal to terminate the thread without error."))
102 except Exception as e:
103 logger.exception("Runner Thread raised error.")
104 try:
105 # report the error but avoid indefinite blocking which would
106 # prevent the exception from being propagated in the unlikely
107 # case that something went terribly wrong
108 err_tb_str = traceback.format_exc()
109 self._error_queue.put(
110 err_tb_str, block=True, timeout=ERROR_REPORT_TIMEOUT)
111 except queue.Full:
112 logger.critical(
113 ("Runner Thread was unable to report error to main "
114 "function runner thread. This means a previous error "
115 "was not processed. This should never happen."))
116 raise e
117
118
119 class FunctionRunner(Trainable):
120 """Trainable that runs a user function reporting results.
121
122 This mode of execution does not support checkpoint/restore."""
123
124 _name = "func"
125
126 def _setup(self, config):
127 # Semaphore for notifying the reporter to continue with the computation
128 # and to generate the next result.
129 self._continue_semaphore = threading.Semaphore(0)
130
131 # Queue for passing results between threads
132 self._results_queue = queue.Queue(1)
133
134 # Queue for passing errors back from the thread runner. The error queue
135 # has a max size of one to prevent stacking error and force error
136 # reporting to block until finished.
137 self._error_queue = queue.Queue(1)
138
139 self._status_reporter = StatusReporter(
140 self._results_queue, self._continue_semaphore, self.logdir)
141 self._last_result = {}
142 config = config.copy()
143
144 def entrypoint():
145 return self._trainable_func(config, self._status_reporter)
146
147 # the runner thread is not started until the first call to _train
148 self._runner = _RunnerThread(entrypoint, self._error_queue)
149
150 def _trainable_func(self):
151 """Subclasses can override this to set the trainable func."""
152
153 raise NotImplementedError
154
155 def _train(self):
156 """Implements train() for a Function API.
157
158 If the RunnerThread finishes without reporting "done",
159 Tune will automatically provide a magic keyword __duplicate__
160 along with a result with "done=True". The TrialRunner will handle the
161 result accordingly (see tune/trial_runner.py).
162 """
163 if self._runner.is_alive():
164 # if started and alive, inform the reporter to continue and
165 # generate the next result
166 self._continue_semaphore.release()
167 else:
168 # if not alive, try to start
169 self._status_reporter._start()
170 try:
171 self._runner.start()
172 except RuntimeError:
173 # If this is reached, it means the thread was started and is
174 # now done or has raised an exception.
175 pass
176
177 result = None
178 while result is None and self._runner.is_alive():
179 # fetch the next produced result
180 try:
181 result = self._results_queue.get(
182 block=True, timeout=RESULT_FETCH_TIMEOUT)
183 except queue.Empty:
184 pass
185
186 # if no result were found, then the runner must no longer be alive
187 if result is None:
188 # Try one last time to fetch results in case results were reported
189 # in between the time of the last check and the termination of the
190 # thread runner.
191 try:
192 result = self._results_queue.get(block=False)
193 except queue.Empty:
194 pass
195
196 # check if error occured inside the thread runner
197 if result is None:
198 # only raise an error from the runner if all results are consumed
199 self._report_thread_runner_error(block=True)
200
201 # Under normal conditions, this code should never be reached since
202 # this branch should only be visited if the runner thread raised
203 # an exception. If no exception were raised, it means that the
204 # runner thread never reported any results which should not be
205 # possible when wrapping functions with `wrap_function`.
206 raise TuneError(
207 ("Wrapped function ran until completion without reporting "
208 "results or raising an exception."))
209
210 else:
211 if not self._error_queue.empty():
212 logger.warning(
213 ("Runner error waiting to be raised in main thread. "
214 "Logging all available results first."))
215
216 # This keyword appears if the train_func using the Function API
217 # finishes without "done=True". This duplicates the last result, but
218 # the TrialRunner will not log this result again.
219 if "__duplicate__" in result:
220 new_result = self._last_result.copy()
221 new_result.update(result)
222 result = new_result
223
224 self._last_result = result
225 return result
226
227 def _stop(self):
228 # If everything stayed in synch properly, this should never happen.
229 if not self._results_queue.empty():
230 logger.warning(
231 ("Some results were added after the trial stop condition. "
232 "These results won't be logged."))
233
234 # Check for any errors that might have been missed.
235 self._report_thread_runner_error()
236
237 def _report_thread_runner_error(self, block=False):
238 try:
239 err_tb_str = self._error_queue.get(
240 block=block, timeout=ERROR_FETCH_TIMEOUT)
241 raise TuneError(("Trial raised an exception. Traceback:\n{}"
242 .format(err_tb_str)))
243 except queue.Empty:
244 pass
245
246
247 def wrap_function(train_func):
248
249 use_track = False
250 try:
251 func_args = inspect.getargspec(train_func).args
252 use_track = ("reporter" not in func_args and len(func_args) == 1)
253 if use_track:
254 logger.info("tune.track signature detected.")
255 except Exception:
256 logger.info(
257 "Function inspection failed - assuming reporter signature.")
258
259 class WrappedFunc(FunctionRunner):
260 def _trainable_func(self, config, reporter):
261 output = train_func(config, reporter)
262 # If train_func returns, we need to notify the main event loop
263 # of the last result while avoiding double logging. This is done
264 # with the keyword RESULT_DUPLICATE -- see tune/trial_runner.py.
265 reporter(**{RESULT_DUPLICATE: True})
266 return output
267
268 class WrappedTrackFunc(FunctionRunner):
269 def _trainable_func(self, config, reporter):
270 track.init(_tune_reporter=reporter)
271 output = train_func(config)
272 reporter(**{RESULT_DUPLICATE: True})
273 track.shutdown()
274 return output
275
276 return WrappedTrackFunc if use_track else WrappedFunc
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/ray/tune/function_runner.py b/python/ray/tune/function_runner.py
--- a/python/ray/tune/function_runner.py
+++ b/python/ray/tune/function_runner.py
@@ -7,6 +7,7 @@
import inspect
import threading
import traceback
+import sys
from six.moves import queue
from ray.tune import track
@@ -248,7 +249,10 @@
use_track = False
try:
- func_args = inspect.getargspec(train_func).args
+ if sys.version_info >= (3, 3):
+ func_args = inspect.getfullargspec(train_func).args
+ else:
+ func_args = inspect.getargspec(train_func).args
use_track = ("reporter" not in func_args and len(func_args) == 1)
if use_track:
logger.info("tune.track signature detected.")
|
{"golden_diff": "diff --git a/python/ray/tune/function_runner.py b/python/ray/tune/function_runner.py\n--- a/python/ray/tune/function_runner.py\n+++ b/python/ray/tune/function_runner.py\n@@ -7,6 +7,7 @@\n import inspect\n import threading\n import traceback\n+import sys\n from six.moves import queue\n \n from ray.tune import track\n@@ -248,7 +249,10 @@\n \n use_track = False\n try:\n- func_args = inspect.getargspec(train_func).args\n+ if sys.version_info >= (3, 3):\n+ func_args = inspect.getfullargspec(train_func).args\n+ else:\n+ func_args = inspect.getargspec(train_func).args\n use_track = (\"reporter\" not in func_args and len(func_args) == 1)\n if use_track:\n logger.info(\"tune.track signature detected.\")\n", "issue": "[tune] Ray Tune fails to parse typing hints of the function for experiment\n### What is the problem?\r\nIf the function for experiment has a [typing hint](https://docs.python.org/3/library/typing.html) for its argument `config`, then Ray Tune fails to parse the argument and assumes that there is a reporter signature. The cause of this problem is in this source code:\r\nhttps://github.com/ray-project/ray/blob/1eaa57c98f8870a43e1ea14ec011b6bd4be97c8d/python/ray/tune/function_runner.py#L250-L257\r\nChanging \r\n`func_args = inspect.getargspec(train_func).args` \r\nto\r\n`func_args = inspect.getfullargspec(train_func).args` \r\nmight solve the problem.\r\n\r\n*Ray version and other system information (Python version, TensorFlow version, OS):*\r\nRay: 0.8.0\r\nPython: 3.7.5\r\nOS: Ubuntu 18.04\r\n\r\n*Does the problem occur on the [latest wheels](https://ray.readthedocs.io/en/latest/installation.html)?*\r\nI couldn't install the latest wheel. So I can't confirm it.\r\n\r\n### Reproduction\r\nThe following is a modification of the first examples in the [Ray Tune Documentation](https://ray.readthedocs.io/en/latest/tune.html#quick-start), where I added a typing hint `config: Dict[str, Any]` for the argument of function `train_mnist`.\r\n\r\n```\r\nfrom typing import Dict, Any\r\n\r\nimport torch.optim as optim\r\nfrom ray import tune\r\nfrom ray.tune.examples.mnist_pytorch import get_data_loaders, ConvNet, train, test\r\n\r\n\r\ndef train_mnist(config: Dict[str, Any]):\r\n train_loader, test_loader = get_data_loaders()\r\n model = ConvNet()\r\n optimizer = optim.SGD(model.parameters(), lr=config[\"lr\"])\r\n for i in range(10):\r\n train(model, optimizer, train_loader)\r\n acc = test(model, test_loader)\r\n tune.track.log(mean_accuracy=acc)\r\n\r\n\r\nanalysis = tune.run(train_mnist, config={\"lr\": tune.grid_search([0.001, 0.01, 0.1])})\r\n\r\nprint(\"Best config: \", analysis.get_best_config(metric=\"mean_accuracy\"))\r\n\r\n# Get a dataframe for analyzing trial results.\r\ndf = analysis.dataframe()\r\n```\r\nWhen running the code you get this error message:\r\n**TypeError: train_mnist() takes 1 positional argument but 2 were given**\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport time\nimport inspect\nimport threading\nimport traceback\nfrom six.moves import queue\n\nfrom ray.tune import track\nfrom ray.tune import TuneError\nfrom ray.tune.trainable import Trainable\nfrom ray.tune.result import TIME_THIS_ITER_S, RESULT_DUPLICATE\n\nlogger = logging.getLogger(__name__)\n\n# Time between FunctionRunner checks when fetching\n# new results after signaling the reporter to continue\nRESULT_FETCH_TIMEOUT = 0.2\n\nERROR_REPORT_TIMEOUT = 10\nERROR_FETCH_TIMEOUT = 1\n\n\nclass StatusReporter(object):\n \"\"\"Object passed into your function that you can report status through.\n\n Example:\n >>> def trainable_function(config, reporter):\n >>> assert isinstance(reporter, StatusReporter)\n >>> reporter(timesteps_this_iter=1)\n \"\"\"\n\n def __init__(self, result_queue, continue_semaphore, logdir=None):\n self._queue = result_queue\n self._last_report_time = None\n self._continue_semaphore = continue_semaphore\n self._logdir = logdir\n\n def __call__(self, **kwargs):\n \"\"\"Report updated training status.\n\n Pass in `done=True` when the training job is completed.\n\n Args:\n kwargs: Latest training result status.\n\n Example:\n >>> reporter(mean_accuracy=1, training_iteration=4)\n >>> reporter(mean_accuracy=1, training_iteration=4, done=True)\n\n Raises:\n StopIteration: A StopIteration exception is raised if the trial has\n been signaled to stop.\n \"\"\"\n\n assert self._last_report_time is not None, (\n \"StatusReporter._start() must be called before the first \"\n \"report __call__ is made to ensure correct runtime metrics.\")\n\n # time per iteration is recorded directly in the reporter to ensure\n # any delays in logging results aren't counted\n report_time = time.time()\n if TIME_THIS_ITER_S not in kwargs:\n kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time\n self._last_report_time = report_time\n\n # add results to a thread-safe queue\n self._queue.put(kwargs.copy(), block=True)\n\n # This blocks until notification from the FunctionRunner that the last\n # result has been returned to Tune and that the function is safe to\n # resume training.\n self._continue_semaphore.acquire()\n\n def _start(self):\n self._last_report_time = time.time()\n\n @property\n def logdir(self):\n return self._logdir\n\n\nclass _RunnerThread(threading.Thread):\n \"\"\"Supervisor thread that runs your script.\"\"\"\n\n def __init__(self, entrypoint, error_queue):\n threading.Thread.__init__(self)\n self._entrypoint = entrypoint\n self._error_queue = error_queue\n self.daemon = True\n\n def run(self):\n try:\n self._entrypoint()\n except StopIteration:\n logger.debug(\n (\"Thread runner raised StopIteration. Interperting it as a \"\n \"signal to terminate the thread without error.\"))\n except Exception as e:\n logger.exception(\"Runner Thread raised error.\")\n try:\n # report the error but avoid indefinite blocking which would\n # prevent the exception from being propagated in the unlikely\n # case that something went terribly wrong\n err_tb_str = traceback.format_exc()\n self._error_queue.put(\n err_tb_str, block=True, timeout=ERROR_REPORT_TIMEOUT)\n except queue.Full:\n logger.critical(\n (\"Runner Thread was unable to report error to main \"\n \"function runner thread. This means a previous error \"\n \"was not processed. This should never happen.\"))\n raise e\n\n\nclass FunctionRunner(Trainable):\n \"\"\"Trainable that runs a user function reporting results.\n\n This mode of execution does not support checkpoint/restore.\"\"\"\n\n _name = \"func\"\n\n def _setup(self, config):\n # Semaphore for notifying the reporter to continue with the computation\n # and to generate the next result.\n self._continue_semaphore = threading.Semaphore(0)\n\n # Queue for passing results between threads\n self._results_queue = queue.Queue(1)\n\n # Queue for passing errors back from the thread runner. The error queue\n # has a max size of one to prevent stacking error and force error\n # reporting to block until finished.\n self._error_queue = queue.Queue(1)\n\n self._status_reporter = StatusReporter(\n self._results_queue, self._continue_semaphore, self.logdir)\n self._last_result = {}\n config = config.copy()\n\n def entrypoint():\n return self._trainable_func(config, self._status_reporter)\n\n # the runner thread is not started until the first call to _train\n self._runner = _RunnerThread(entrypoint, self._error_queue)\n\n def _trainable_func(self):\n \"\"\"Subclasses can override this to set the trainable func.\"\"\"\n\n raise NotImplementedError\n\n def _train(self):\n \"\"\"Implements train() for a Function API.\n\n If the RunnerThread finishes without reporting \"done\",\n Tune will automatically provide a magic keyword __duplicate__\n along with a result with \"done=True\". The TrialRunner will handle the\n result accordingly (see tune/trial_runner.py).\n \"\"\"\n if self._runner.is_alive():\n # if started and alive, inform the reporter to continue and\n # generate the next result\n self._continue_semaphore.release()\n else:\n # if not alive, try to start\n self._status_reporter._start()\n try:\n self._runner.start()\n except RuntimeError:\n # If this is reached, it means the thread was started and is\n # now done or has raised an exception.\n pass\n\n result = None\n while result is None and self._runner.is_alive():\n # fetch the next produced result\n try:\n result = self._results_queue.get(\n block=True, timeout=RESULT_FETCH_TIMEOUT)\n except queue.Empty:\n pass\n\n # if no result were found, then the runner must no longer be alive\n if result is None:\n # Try one last time to fetch results in case results were reported\n # in between the time of the last check and the termination of the\n # thread runner.\n try:\n result = self._results_queue.get(block=False)\n except queue.Empty:\n pass\n\n # check if error occured inside the thread runner\n if result is None:\n # only raise an error from the runner if all results are consumed\n self._report_thread_runner_error(block=True)\n\n # Under normal conditions, this code should never be reached since\n # this branch should only be visited if the runner thread raised\n # an exception. If no exception were raised, it means that the\n # runner thread never reported any results which should not be\n # possible when wrapping functions with `wrap_function`.\n raise TuneError(\n (\"Wrapped function ran until completion without reporting \"\n \"results or raising an exception.\"))\n\n else:\n if not self._error_queue.empty():\n logger.warning(\n (\"Runner error waiting to be raised in main thread. \"\n \"Logging all available results first.\"))\n\n # This keyword appears if the train_func using the Function API\n # finishes without \"done=True\". This duplicates the last result, but\n # the TrialRunner will not log this result again.\n if \"__duplicate__\" in result:\n new_result = self._last_result.copy()\n new_result.update(result)\n result = new_result\n\n self._last_result = result\n return result\n\n def _stop(self):\n # If everything stayed in synch properly, this should never happen.\n if not self._results_queue.empty():\n logger.warning(\n (\"Some results were added after the trial stop condition. \"\n \"These results won't be logged.\"))\n\n # Check for any errors that might have been missed.\n self._report_thread_runner_error()\n\n def _report_thread_runner_error(self, block=False):\n try:\n err_tb_str = self._error_queue.get(\n block=block, timeout=ERROR_FETCH_TIMEOUT)\n raise TuneError((\"Trial raised an exception. Traceback:\\n{}\"\n .format(err_tb_str)))\n except queue.Empty:\n pass\n\n\ndef wrap_function(train_func):\n\n use_track = False\n try:\n func_args = inspect.getargspec(train_func).args\n use_track = (\"reporter\" not in func_args and len(func_args) == 1)\n if use_track:\n logger.info(\"tune.track signature detected.\")\n except Exception:\n logger.info(\n \"Function inspection failed - assuming reporter signature.\")\n\n class WrappedFunc(FunctionRunner):\n def _trainable_func(self, config, reporter):\n output = train_func(config, reporter)\n # If train_func returns, we need to notify the main event loop\n # of the last result while avoiding double logging. This is done\n # with the keyword RESULT_DUPLICATE -- see tune/trial_runner.py.\n reporter(**{RESULT_DUPLICATE: True})\n return output\n\n class WrappedTrackFunc(FunctionRunner):\n def _trainable_func(self, config, reporter):\n track.init(_tune_reporter=reporter)\n output = train_func(config)\n reporter(**{RESULT_DUPLICATE: True})\n track.shutdown()\n return output\n\n return WrappedTrackFunc if use_track else WrappedFunc\n", "path": "python/ray/tune/function_runner.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport time\nimport inspect\nimport threading\nimport traceback\nimport sys\nfrom six.moves import queue\n\nfrom ray.tune import track\nfrom ray.tune import TuneError\nfrom ray.tune.trainable import Trainable\nfrom ray.tune.result import TIME_THIS_ITER_S, RESULT_DUPLICATE\n\nlogger = logging.getLogger(__name__)\n\n# Time between FunctionRunner checks when fetching\n# new results after signaling the reporter to continue\nRESULT_FETCH_TIMEOUT = 0.2\n\nERROR_REPORT_TIMEOUT = 10\nERROR_FETCH_TIMEOUT = 1\n\n\nclass StatusReporter(object):\n \"\"\"Object passed into your function that you can report status through.\n\n Example:\n >>> def trainable_function(config, reporter):\n >>> assert isinstance(reporter, StatusReporter)\n >>> reporter(timesteps_this_iter=1)\n \"\"\"\n\n def __init__(self, result_queue, continue_semaphore, logdir=None):\n self._queue = result_queue\n self._last_report_time = None\n self._continue_semaphore = continue_semaphore\n self._logdir = logdir\n\n def __call__(self, **kwargs):\n \"\"\"Report updated training status.\n\n Pass in `done=True` when the training job is completed.\n\n Args:\n kwargs: Latest training result status.\n\n Example:\n >>> reporter(mean_accuracy=1, training_iteration=4)\n >>> reporter(mean_accuracy=1, training_iteration=4, done=True)\n\n Raises:\n StopIteration: A StopIteration exception is raised if the trial has\n been signaled to stop.\n \"\"\"\n\n assert self._last_report_time is not None, (\n \"StatusReporter._start() must be called before the first \"\n \"report __call__ is made to ensure correct runtime metrics.\")\n\n # time per iteration is recorded directly in the reporter to ensure\n # any delays in logging results aren't counted\n report_time = time.time()\n if TIME_THIS_ITER_S not in kwargs:\n kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time\n self._last_report_time = report_time\n\n # add results to a thread-safe queue\n self._queue.put(kwargs.copy(), block=True)\n\n # This blocks until notification from the FunctionRunner that the last\n # result has been returned to Tune and that the function is safe to\n # resume training.\n self._continue_semaphore.acquire()\n\n def _start(self):\n self._last_report_time = time.time()\n\n @property\n def logdir(self):\n return self._logdir\n\n\nclass _RunnerThread(threading.Thread):\n \"\"\"Supervisor thread that runs your script.\"\"\"\n\n def __init__(self, entrypoint, error_queue):\n threading.Thread.__init__(self)\n self._entrypoint = entrypoint\n self._error_queue = error_queue\n self.daemon = True\n\n def run(self):\n try:\n self._entrypoint()\n except StopIteration:\n logger.debug(\n (\"Thread runner raised StopIteration. Interperting it as a \"\n \"signal to terminate the thread without error.\"))\n except Exception as e:\n logger.exception(\"Runner Thread raised error.\")\n try:\n # report the error but avoid indefinite blocking which would\n # prevent the exception from being propagated in the unlikely\n # case that something went terribly wrong\n err_tb_str = traceback.format_exc()\n self._error_queue.put(\n err_tb_str, block=True, timeout=ERROR_REPORT_TIMEOUT)\n except queue.Full:\n logger.critical(\n (\"Runner Thread was unable to report error to main \"\n \"function runner thread. This means a previous error \"\n \"was not processed. This should never happen.\"))\n raise e\n\n\nclass FunctionRunner(Trainable):\n \"\"\"Trainable that runs a user function reporting results.\n\n This mode of execution does not support checkpoint/restore.\"\"\"\n\n _name = \"func\"\n\n def _setup(self, config):\n # Semaphore for notifying the reporter to continue with the computation\n # and to generate the next result.\n self._continue_semaphore = threading.Semaphore(0)\n\n # Queue for passing results between threads\n self._results_queue = queue.Queue(1)\n\n # Queue for passing errors back from the thread runner. The error queue\n # has a max size of one to prevent stacking error and force error\n # reporting to block until finished.\n self._error_queue = queue.Queue(1)\n\n self._status_reporter = StatusReporter(\n self._results_queue, self._continue_semaphore, self.logdir)\n self._last_result = {}\n config = config.copy()\n\n def entrypoint():\n return self._trainable_func(config, self._status_reporter)\n\n # the runner thread is not started until the first call to _train\n self._runner = _RunnerThread(entrypoint, self._error_queue)\n\n def _trainable_func(self):\n \"\"\"Subclasses can override this to set the trainable func.\"\"\"\n\n raise NotImplementedError\n\n def _train(self):\n \"\"\"Implements train() for a Function API.\n\n If the RunnerThread finishes without reporting \"done\",\n Tune will automatically provide a magic keyword __duplicate__\n along with a result with \"done=True\". The TrialRunner will handle the\n result accordingly (see tune/trial_runner.py).\n \"\"\"\n if self._runner.is_alive():\n # if started and alive, inform the reporter to continue and\n # generate the next result\n self._continue_semaphore.release()\n else:\n # if not alive, try to start\n self._status_reporter._start()\n try:\n self._runner.start()\n except RuntimeError:\n # If this is reached, it means the thread was started and is\n # now done or has raised an exception.\n pass\n\n result = None\n while result is None and self._runner.is_alive():\n # fetch the next produced result\n try:\n result = self._results_queue.get(\n block=True, timeout=RESULT_FETCH_TIMEOUT)\n except queue.Empty:\n pass\n\n # if no result were found, then the runner must no longer be alive\n if result is None:\n # Try one last time to fetch results in case results were reported\n # in between the time of the last check and the termination of the\n # thread runner.\n try:\n result = self._results_queue.get(block=False)\n except queue.Empty:\n pass\n\n # check if error occured inside the thread runner\n if result is None:\n # only raise an error from the runner if all results are consumed\n self._report_thread_runner_error(block=True)\n\n # Under normal conditions, this code should never be reached since\n # this branch should only be visited if the runner thread raised\n # an exception. If no exception were raised, it means that the\n # runner thread never reported any results which should not be\n # possible when wrapping functions with `wrap_function`.\n raise TuneError(\n (\"Wrapped function ran until completion without reporting \"\n \"results or raising an exception.\"))\n\n else:\n if not self._error_queue.empty():\n logger.warning(\n (\"Runner error waiting to be raised in main thread. \"\n \"Logging all available results first.\"))\n\n # This keyword appears if the train_func using the Function API\n # finishes without \"done=True\". This duplicates the last result, but\n # the TrialRunner will not log this result again.\n if \"__duplicate__\" in result:\n new_result = self._last_result.copy()\n new_result.update(result)\n result = new_result\n\n self._last_result = result\n return result\n\n def _stop(self):\n # If everything stayed in synch properly, this should never happen.\n if not self._results_queue.empty():\n logger.warning(\n (\"Some results were added after the trial stop condition. \"\n \"These results won't be logged.\"))\n\n # Check for any errors that might have been missed.\n self._report_thread_runner_error()\n\n def _report_thread_runner_error(self, block=False):\n try:\n err_tb_str = self._error_queue.get(\n block=block, timeout=ERROR_FETCH_TIMEOUT)\n raise TuneError((\"Trial raised an exception. Traceback:\\n{}\"\n .format(err_tb_str)))\n except queue.Empty:\n pass\n\n\ndef wrap_function(train_func):\n\n use_track = False\n try:\n if sys.version_info >= (3, 3):\n func_args = inspect.getfullargspec(train_func).args\n else:\n func_args = inspect.getargspec(train_func).args\n use_track = (\"reporter\" not in func_args and len(func_args) == 1)\n if use_track:\n logger.info(\"tune.track signature detected.\")\n except Exception:\n logger.info(\n \"Function inspection failed - assuming reporter signature.\")\n\n class WrappedFunc(FunctionRunner):\n def _trainable_func(self, config, reporter):\n output = train_func(config, reporter)\n # If train_func returns, we need to notify the main event loop\n # of the last result while avoiding double logging. This is done\n # with the keyword RESULT_DUPLICATE -- see tune/trial_runner.py.\n reporter(**{RESULT_DUPLICATE: True})\n return output\n\n class WrappedTrackFunc(FunctionRunner):\n def _trainable_func(self, config, reporter):\n track.init(_tune_reporter=reporter)\n output = train_func(config)\n reporter(**{RESULT_DUPLICATE: True})\n track.shutdown()\n return output\n\n return WrappedTrackFunc if use_track else WrappedFunc\n", "path": "python/ray/tune/function_runner.py"}]}
| 3,591 | 201 |
gh_patches_debug_236
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-2042
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken build due to failed `linkcheck` job
I've noticed that matrix badges are frequently inaccessible, see README:
<img width="893" alt="image" src="https://github.com/jazzband/pip-tools/assets/7377671/94c2d45a-12ef-4237-8a85-434ee1bd7c05">
Sometimes, a certain issue even results in CI builds [breaking](https://github.com/jazzband/pip-tools/actions/runs/5920050370/job/16051009863#step:10:446) (caught in #1973):
```
broken https://img.shields.io/matrix/pip-tools:matrix.org?label=Discuss%20on%20Matrix%20at%20%23pip-tools%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat - 408 Client Error: Request Timeout for url: https://img.shields.io/matrix/pip-tools:matrix.org?label=Discuss%20on%20Matrix%20at%20%23pip-tools%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat
```
Perhaps we should consider [ignoring](https://github.com/jazzband/pip-tools/blob/04d2235716bc43cad3c10288081a4d2b7ee56944/docs/conf.py#L55-L57) `https://img.shields.io/matrix` as well?
/cc @webknjaz
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # https://www.sphinx-doc.org/en/master/usage/configuration.html
2 """Configuration file for the Sphinx documentation builder."""
3
4 from __future__ import annotations
5
6 from importlib.metadata import version as get_version
7 from pathlib import Path
8
9 from sphinx.util import logging
10 from sphinx.util.console import bold
11
12 logger = logging.getLogger(__name__)
13
14 # -- Path setup --------------------------------------------------------------
15
16 PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
17
18
19 # -- Project information -----------------------------------------------------
20
21 project = "pip-tools"
22 author = f"{project} Contributors"
23 copyright = f"The {author}"
24
25 # The full version, including alpha/beta/rc tags
26 release = get_version(project)
27
28 # The short X.Y version
29 version = ".".join(release.split(".")[:3])
30
31 logger.info(bold("%s version: %s"), project, version)
32 logger.info(bold("%s release: %s"), project, release)
33
34 # -- General configuration ---------------------------------------------------
35
36 # Add any Sphinx extension module names here, as strings. They can be
37 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
38 # ones.
39 extensions = ["myst_parser", "sphinxcontrib.programoutput"]
40
41
42 # -- Options for HTML output -------------------------------------------------
43
44 # The theme to use for HTML and HTML Help pages. See the documentation for
45 # a list of builtin themes.
46 #
47 html_theme = "furo"
48 html_title = f"<nobr>{project}</nobr> documentation v{release}"
49
50
51 # -------------------------------------------------------------------------
52 default_role = "any"
53 nitpicky = True
54
55 linkcheck_ignore = [
56 r"^https://matrix\.to/#",
57 ]
58
59 suppress_warnings = ["myst.xref_missing"]
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -54,6 +54,7 @@
linkcheck_ignore = [
r"^https://matrix\.to/#",
+ r"^https://img.shields.io/matrix",
]
suppress_warnings = ["myst.xref_missing"]
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -54,6 +54,7 @@\n \n linkcheck_ignore = [\n r\"^https://matrix\\.to/#\",\n+ r\"^https://img.shields.io/matrix\",\n ]\n \n suppress_warnings = [\"myst.xref_missing\"]\n", "issue": "Broken build due to failed `linkcheck` job\nI've noticed that matrix badges are frequently inaccessible, see README:\r\n<img width=\"893\" alt=\"image\" src=\"https://github.com/jazzband/pip-tools/assets/7377671/94c2d45a-12ef-4237-8a85-434ee1bd7c05\">\r\n\r\nSometimes, a certain issue even results in CI builds [breaking](https://github.com/jazzband/pip-tools/actions/runs/5920050370/job/16051009863#step:10:446) (caught in #1973):\r\n\r\n```\r\nbroken https://img.shields.io/matrix/pip-tools:matrix.org?label=Discuss%20on%20Matrix%20at%20%23pip-tools%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat - 408 Client Error: Request Timeout for url: https://img.shields.io/matrix/pip-tools:matrix.org?label=Discuss%20on%20Matrix%20at%20%23pip-tools%3Amatrix.org&logo=matrix&server_fqdn=matrix.org&style=flat\r\n```\r\n\r\nPerhaps we should consider [ignoring](https://github.com/jazzband/pip-tools/blob/04d2235716bc43cad3c10288081a4d2b7ee56944/docs/conf.py#L55-L57) `https://img.shields.io/matrix` as well?\r\n\r\n/cc @webknjaz \r\n\n", "before_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom importlib.metadata import version as get_version\nfrom pathlib import Path\n\nfrom sphinx.util import logging\nfrom sphinx.util.console import bold\n\nlogger = logging.getLogger(__name__)\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The full version, including alpha/beta/rc tags\nrelease = get_version(project)\n\n# The short X.Y version\nversion = \".\".join(release.split(\".\")[:3])\n\nlogger.info(bold(\"%s version: %s\"), project, version)\nlogger.info(bold(\"%s release: %s\"), project, release)\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\", \"sphinxcontrib.programoutput\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_title = f\"<nobr>{project}</nobr> documentation v{release}\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n\nlinkcheck_ignore = [\n r\"^https://matrix\\.to/#\",\n]\n\nsuppress_warnings = [\"myst.xref_missing\"]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom importlib.metadata import version as get_version\nfrom pathlib import Path\n\nfrom sphinx.util import logging\nfrom sphinx.util.console import bold\n\nlogger = logging.getLogger(__name__)\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The full version, including alpha/beta/rc tags\nrelease = get_version(project)\n\n# The short X.Y version\nversion = \".\".join(release.split(\".\")[:3])\n\nlogger.info(bold(\"%s version: %s\"), project, version)\nlogger.info(bold(\"%s release: %s\"), project, release)\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\", \"sphinxcontrib.programoutput\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_title = f\"<nobr>{project}</nobr> documentation v{release}\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n\nlinkcheck_ignore = [\n r\"^https://matrix\\.to/#\",\n r\"^https://img.shields.io/matrix\",\n]\n\nsuppress_warnings = [\"myst.xref_missing\"]\n", "path": "docs/conf.py"}]}
| 1,114 | 77 |
gh_patches_debug_34818
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-728
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use `SOFTMAX_LOG` API of cudnn v3 in `softmax_corss_entropy`
`SOFTMAX_LOG` is supported in cudnn v3. It helps `softmax_cross_entropy` in #712
We need to check version of cudnn. CuPy doesn't support it now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/loss/softmax_cross_entropy.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import function
6 from chainer.utils import type_check
7
8
9 def logsumexp(x):
10 xp = cuda.get_array_module(x)
11 m = x.max(axis=1, keepdims=True)
12 y = x - m
13 xp.exp(y, out=y)
14 return xp.log(y.sum(axis=1, keepdims=True)) + m
15
16
17 def softmax_log(x):
18 # TODO(unno): Use cudnn (cudnn v2 doesn't support CUDNN_SOFTMAX_LOG)
19 log_z = logsumexp(x)
20 return x - log_z
21
22
23 class SoftmaxCrossEntropy(function.Function):
24
25 """Softmax activation followed by a cross entropy loss."""
26
27 ignore_label = -1
28
29 def __init__(self, use_cudnn=True, normalize=True):
30 self.use_cudnn = use_cudnn
31 self.normalize = normalize
32
33 def check_type_forward(self, in_types):
34 type_check.expect(in_types.size() == 2)
35 x_type, t_type = in_types
36
37 type_check.expect(
38 x_type.dtype == numpy.float32,
39 t_type.dtype == numpy.int32,
40 t_type.ndim == x_type.ndim - 1,
41
42 x_type.shape[0] == t_type.shape[0],
43 x_type.shape[2:] == t_type.shape[1:],
44 )
45
46 def forward_cpu(self, inputs):
47 x, t = inputs
48 log_y = softmax_log(x)
49 self.y = numpy.exp(log_y)
50 log_yd = numpy.rollaxis(log_y, 1)
51 log_yd = log_yd.reshape(len(log_yd), -1)
52
53 log_p = log_yd[numpy.maximum(t.flat, 0), six.moves.range(t.size)]
54 # deal with the case where the SoftmaxCrossEntropy is
55 # unpickled from the old version
56 if getattr(self, 'normalize', True):
57 count = (t != self.ignore_label).sum()
58 else:
59 count = x.shape[0]
60 self.count = count
61
62 if count == 0:
63 return numpy.zeros((), dtype=x.dtype),
64
65 y = (log_p * (t.flat != self.ignore_label)).sum(keepdims=True) \
66 * (-1.0 / count)
67 return y.reshape(()),
68
69 def forward_gpu(self, inputs):
70 cupy = cuda.cupy
71 x, t = inputs
72 log_y = softmax_log(x)
73 self.y = cupy.exp(log_y)
74 if getattr(self, 'normalize', True):
75 count = float((t != self.ignore_label).sum())
76 else:
77 count = t.shape[0]
78 self.count = count
79
80 if count == 0:
81 return cupy.zeros((), dtype=x.dtype),
82
83 log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
84 ret = cuda.reduce(
85 'S t, raw T log_y, int32 n_channel, T inv_count', 'T out',
86 't == -1 ? 0 : log_y[_j * n_channel + t]',
87 'a + b', 'out = a * inv_count', '0', 'crossent_fwd'
88 )(t, log_y.reduced_view(), log_y.shape[-1], -1.0 / count)
89 return ret,
90
91 def backward_cpu(self, inputs, grad_outputs):
92 x, t = inputs
93 if self.count == 0:
94 return numpy.zeros_like(x), None
95
96 gloss = grad_outputs[0]
97 n_unit = t.size // t.shape[0]
98 if self.y.ndim == 2:
99 gx = self.y.copy()
100 gx[six.moves.xrange(len(t)), numpy.maximum(t, 0)] -= 1
101 gx *= (t != self.ignore_label).reshape((len(t), 1))
102 else:
103 # in the case where y.ndim is higher than 2,
104 # we think that a current implementation is inefficient
105 # because it yields two provisional arrays for indexing.
106 gx = self.y.copy().reshape(self.y.shape[0], self.y.shape[1], -1)
107 fst_index = numpy.arange(t.size) // n_unit
108 trd_index = numpy.arange(t.size) % n_unit
109 gx[fst_index, numpy.maximum(t.flat, 0), trd_index] -= 1
110 gx *= (t != self.ignore_label).reshape((len(t), 1, -1))
111 gx = gx.reshape(self.y.shape)
112
113 gx *= gloss / self.count
114 return gx, None
115
116 def backward_gpu(self, inputs, grad_outputs):
117 cupy = cuda.cupy
118 x, t = inputs
119 if self.count == 0:
120 return cupy.zeros_like(x), None
121
122 gloss = grad_outputs[0]
123 n_unit = t.size // t.shape[0]
124 coeff = cuda.cupy.divide(gloss, self.count, dtype=gloss.dtype)
125 gx = cuda.elementwise(
126 'T y, S t, raw T coeff, S n_channel, S n_unit',
127 'T gx',
128 '''
129 const int c = (i / n_unit % n_channel);
130 if (t == -1) {
131 gx = 0;
132 } else {
133 gx = coeff[0] * (y - (c == t));
134 }
135 ''',
136 'softmax_crossent_bwd')(
137 self.y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)
138 return gx, None
139
140
141 def softmax_cross_entropy(x, t, use_cudnn=True, normalize=True):
142 """Computes cross entropy loss for pre-softmax activations.
143
144 Args:
145 x (Variable): Variable holding a multidimensional array whose element
146 indicates unnormalized log probability: the first axis of the
147 variable represents the number of samples, and the second axis
148 represents the number of classes. While this function computes
149 a usual softmax cross entropy if the number of dimensions is equal
150 to 2, it computes a cross entropy of the replicated softmax if the
151 number of dimensions is greater than 2.
152 t (Variable): Variable holding an int32 vector of groundtruth labels.
153 If ``t[i] == -1``, correspondig ``x[i]`` is ignored.
154 normalize (Variable): Variable holding a boolean value which
155 determines the normalization constant. If true, this function
156 normalizes the cross entropy loss across all instances. If else,
157 it only normalizes along a batch size.
158
159 Returns:
160 Variable: A variable holding a scalar array of the cross entropy loss.
161
162 .. note::
163
164 This function is differentiable only by ``x``.
165
166 """
167 return SoftmaxCrossEntropy(use_cudnn, normalize)(x, t)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/loss/softmax_cross_entropy.py b/chainer/functions/loss/softmax_cross_entropy.py
--- a/chainer/functions/loss/softmax_cross_entropy.py
+++ b/chainer/functions/loss/softmax_cross_entropy.py
@@ -6,6 +6,14 @@
from chainer.utils import type_check
+if cuda.cudnn_enabled:
+ cudnn = cuda.cudnn
+ libcudnn = cudnn.cudnn
+ _algorithm = libcudnn.CUDNN_SOFTMAX_LOG
+ _mode = libcudnn.CUDNN_SOFTMAX_MODE_CHANNEL
+ _cudnn_version = libcudnn.getVersion()
+
+
def logsumexp(x):
xp = cuda.get_array_module(x)
m = x.max(axis=1, keepdims=True)
@@ -14,10 +22,26 @@
return xp.log(y.sum(axis=1, keepdims=True)) + m
-def softmax_log(x):
- # TODO(unno): Use cudnn (cudnn v2 doesn't support CUDNN_SOFTMAX_LOG)
- log_z = logsumexp(x)
- return x - log_z
+def softmax_log(x, use_cudnn):
+ xp = cuda.get_array_module(x)
+ if xp != numpy and cuda.cudnn_enabled and use_cudnn \
+ and _cudnn_version >= 3000:
+ dtype = x.dtype
+ one = numpy.array(1, dtype=dtype).ctypes
+ zero = numpy.array(0, dtype=dtype).ctypes
+ handle = cudnn.get_handle()
+ x_cube = x.reshape(x.shape[:2] + (-1, 1))
+ desc = cudnn.create_tensor_descriptor(x_cube)
+ y = xp.empty_like(x)
+ libcudnn.softmaxForward(
+ handle, _algorithm, _mode, one.data, desc.value,
+ x_cube.data.ptr, zero.data, desc.value,
+ y.data.ptr)
+ return y
+
+ else:
+ log_z = logsumexp(x)
+ return x - log_z
class SoftmaxCrossEntropy(function.Function):
@@ -45,7 +69,7 @@
def forward_cpu(self, inputs):
x, t = inputs
- log_y = softmax_log(x)
+ log_y = softmax_log(x, False)
self.y = numpy.exp(log_y)
log_yd = numpy.rollaxis(log_y, 1)
log_yd = log_yd.reshape(len(log_yd), -1)
@@ -69,7 +93,7 @@
def forward_gpu(self, inputs):
cupy = cuda.cupy
x, t = inputs
- log_y = softmax_log(x)
+ log_y = softmax_log(x, self.use_cudnn)
self.y = cupy.exp(log_y)
if getattr(self, 'normalize', True):
count = float((t != self.ignore_label).sum())
|
{"golden_diff": "diff --git a/chainer/functions/loss/softmax_cross_entropy.py b/chainer/functions/loss/softmax_cross_entropy.py\n--- a/chainer/functions/loss/softmax_cross_entropy.py\n+++ b/chainer/functions/loss/softmax_cross_entropy.py\n@@ -6,6 +6,14 @@\n from chainer.utils import type_check\n \n \n+if cuda.cudnn_enabled:\n+ cudnn = cuda.cudnn\n+ libcudnn = cudnn.cudnn\n+ _algorithm = libcudnn.CUDNN_SOFTMAX_LOG\n+ _mode = libcudnn.CUDNN_SOFTMAX_MODE_CHANNEL\n+ _cudnn_version = libcudnn.getVersion()\n+\n+\n def logsumexp(x):\n xp = cuda.get_array_module(x)\n m = x.max(axis=1, keepdims=True)\n@@ -14,10 +22,26 @@\n return xp.log(y.sum(axis=1, keepdims=True)) + m\n \n \n-def softmax_log(x):\n- # TODO(unno): Use cudnn (cudnn v2 doesn't support CUDNN_SOFTMAX_LOG)\n- log_z = logsumexp(x)\n- return x - log_z\n+def softmax_log(x, use_cudnn):\n+ xp = cuda.get_array_module(x)\n+ if xp != numpy and cuda.cudnn_enabled and use_cudnn \\\n+ and _cudnn_version >= 3000:\n+ dtype = x.dtype\n+ one = numpy.array(1, dtype=dtype).ctypes\n+ zero = numpy.array(0, dtype=dtype).ctypes\n+ handle = cudnn.get_handle()\n+ x_cube = x.reshape(x.shape[:2] + (-1, 1))\n+ desc = cudnn.create_tensor_descriptor(x_cube)\n+ y = xp.empty_like(x)\n+ libcudnn.softmaxForward(\n+ handle, _algorithm, _mode, one.data, desc.value,\n+ x_cube.data.ptr, zero.data, desc.value,\n+ y.data.ptr)\n+ return y\n+\n+ else:\n+ log_z = logsumexp(x)\n+ return x - log_z\n \n \n class SoftmaxCrossEntropy(function.Function):\n@@ -45,7 +69,7 @@\n \n def forward_cpu(self, inputs):\n x, t = inputs\n- log_y = softmax_log(x)\n+ log_y = softmax_log(x, False)\n self.y = numpy.exp(log_y)\n log_yd = numpy.rollaxis(log_y, 1)\n log_yd = log_yd.reshape(len(log_yd), -1)\n@@ -69,7 +93,7 @@\n def forward_gpu(self, inputs):\n cupy = cuda.cupy\n x, t = inputs\n- log_y = softmax_log(x)\n+ log_y = softmax_log(x, self.use_cudnn)\n self.y = cupy.exp(log_y)\n if getattr(self, 'normalize', True):\n count = float((t != self.ignore_label).sum())\n", "issue": "Use `SOFTMAX_LOG` API of cudnn v3 in `softmax_corss_entropy`\n`SOFTMAX_LOG` is supported in cudnn v3. It helps `softmax_cross_entropy` in #712 \nWe need to check version of cudnn. CuPy doesn't support it now.\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef logsumexp(x):\n xp = cuda.get_array_module(x)\n m = x.max(axis=1, keepdims=True)\n y = x - m\n xp.exp(y, out=y)\n return xp.log(y.sum(axis=1, keepdims=True)) + m\n\n\ndef softmax_log(x):\n # TODO(unno): Use cudnn (cudnn v2 doesn't support CUDNN_SOFTMAX_LOG)\n log_z = logsumexp(x)\n return x - log_z\n\n\nclass SoftmaxCrossEntropy(function.Function):\n\n \"\"\"Softmax activation followed by a cross entropy loss.\"\"\"\n\n ignore_label = -1\n\n def __init__(self, use_cudnn=True, normalize=True):\n self.use_cudnn = use_cudnn\n self.normalize = normalize\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n t_type.dtype == numpy.int32,\n t_type.ndim == x_type.ndim - 1,\n\n x_type.shape[0] == t_type.shape[0],\n x_type.shape[2:] == t_type.shape[1:],\n )\n\n def forward_cpu(self, inputs):\n x, t = inputs\n log_y = softmax_log(x)\n self.y = numpy.exp(log_y)\n log_yd = numpy.rollaxis(log_y, 1)\n log_yd = log_yd.reshape(len(log_yd), -1)\n\n log_p = log_yd[numpy.maximum(t.flat, 0), six.moves.range(t.size)]\n # deal with the case where the SoftmaxCrossEntropy is\n # unpickled from the old version\n if getattr(self, 'normalize', True):\n count = (t != self.ignore_label).sum()\n else:\n count = x.shape[0]\n self.count = count\n\n if count == 0:\n return numpy.zeros((), dtype=x.dtype),\n\n y = (log_p * (t.flat != self.ignore_label)).sum(keepdims=True) \\\n * (-1.0 / count)\n return y.reshape(()),\n\n def forward_gpu(self, inputs):\n cupy = cuda.cupy\n x, t = inputs\n log_y = softmax_log(x)\n self.y = cupy.exp(log_y)\n if getattr(self, 'normalize', True):\n count = float((t != self.ignore_label).sum())\n else:\n count = t.shape[0]\n self.count = count\n\n if count == 0:\n return cupy.zeros((), dtype=x.dtype),\n\n log_y = cupy.rollaxis(log_y, 1, log_y.ndim)\n ret = cuda.reduce(\n 'S t, raw T log_y, int32 n_channel, T inv_count', 'T out',\n 't == -1 ? 0 : log_y[_j * n_channel + t]',\n 'a + b', 'out = a * inv_count', '0', 'crossent_fwd'\n )(t, log_y.reduced_view(), log_y.shape[-1], -1.0 / count)\n return ret,\n\n def backward_cpu(self, inputs, grad_outputs):\n x, t = inputs\n if self.count == 0:\n return numpy.zeros_like(x), None\n\n gloss = grad_outputs[0]\n n_unit = t.size // t.shape[0]\n if self.y.ndim == 2:\n gx = self.y.copy()\n gx[six.moves.xrange(len(t)), numpy.maximum(t, 0)] -= 1\n gx *= (t != self.ignore_label).reshape((len(t), 1))\n else:\n # in the case where y.ndim is higher than 2,\n # we think that a current implementation is inefficient\n # because it yields two provisional arrays for indexing.\n gx = self.y.copy().reshape(self.y.shape[0], self.y.shape[1], -1)\n fst_index = numpy.arange(t.size) // n_unit\n trd_index = numpy.arange(t.size) % n_unit\n gx[fst_index, numpy.maximum(t.flat, 0), trd_index] -= 1\n gx *= (t != self.ignore_label).reshape((len(t), 1, -1))\n gx = gx.reshape(self.y.shape)\n\n gx *= gloss / self.count\n return gx, None\n\n def backward_gpu(self, inputs, grad_outputs):\n cupy = cuda.cupy\n x, t = inputs\n if self.count == 0:\n return cupy.zeros_like(x), None\n\n gloss = grad_outputs[0]\n n_unit = t.size // t.shape[0]\n coeff = cuda.cupy.divide(gloss, self.count, dtype=gloss.dtype)\n gx = cuda.elementwise(\n 'T y, S t, raw T coeff, S n_channel, S n_unit',\n 'T gx',\n '''\n const int c = (i / n_unit % n_channel);\n if (t == -1) {\n gx = 0;\n } else {\n gx = coeff[0] * (y - (c == t));\n }\n ''',\n 'softmax_crossent_bwd')(\n self.y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)\n return gx, None\n\n\ndef softmax_cross_entropy(x, t, use_cudnn=True, normalize=True):\n \"\"\"Computes cross entropy loss for pre-softmax activations.\n\n Args:\n x (Variable): Variable holding a multidimensional array whose element\n indicates unnormalized log probability: the first axis of the\n variable represents the number of samples, and the second axis\n represents the number of classes. While this function computes\n a usual softmax cross entropy if the number of dimensions is equal\n to 2, it computes a cross entropy of the replicated softmax if the\n number of dimensions is greater than 2.\n t (Variable): Variable holding an int32 vector of groundtruth labels.\n If ``t[i] == -1``, correspondig ``x[i]`` is ignored.\n normalize (Variable): Variable holding a boolean value which\n determines the normalization constant. If true, this function\n normalizes the cross entropy loss across all instances. If else,\n it only normalizes along a batch size.\n\n Returns:\n Variable: A variable holding a scalar array of the cross entropy loss.\n\n .. note::\n\n This function is differentiable only by ``x``.\n\n \"\"\"\n return SoftmaxCrossEntropy(use_cudnn, normalize)(x, t)\n", "path": "chainer/functions/loss/softmax_cross_entropy.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nif cuda.cudnn_enabled:\n cudnn = cuda.cudnn\n libcudnn = cudnn.cudnn\n _algorithm = libcudnn.CUDNN_SOFTMAX_LOG\n _mode = libcudnn.CUDNN_SOFTMAX_MODE_CHANNEL\n _cudnn_version = libcudnn.getVersion()\n\n\ndef logsumexp(x):\n xp = cuda.get_array_module(x)\n m = x.max(axis=1, keepdims=True)\n y = x - m\n xp.exp(y, out=y)\n return xp.log(y.sum(axis=1, keepdims=True)) + m\n\n\ndef softmax_log(x, use_cudnn):\n xp = cuda.get_array_module(x)\n if xp != numpy and cuda.cudnn_enabled and use_cudnn \\\n and _cudnn_version >= 3000:\n dtype = x.dtype\n one = numpy.array(1, dtype=dtype).ctypes\n zero = numpy.array(0, dtype=dtype).ctypes\n handle = cudnn.get_handle()\n x_cube = x.reshape(x.shape[:2] + (-1, 1))\n desc = cudnn.create_tensor_descriptor(x_cube)\n y = xp.empty_like(x)\n libcudnn.softmaxForward(\n handle, _algorithm, _mode, one.data, desc.value,\n x_cube.data.ptr, zero.data, desc.value,\n y.data.ptr)\n return y\n\n else:\n log_z = logsumexp(x)\n return x - log_z\n\n\nclass SoftmaxCrossEntropy(function.Function):\n\n \"\"\"Softmax activation followed by a cross entropy loss.\"\"\"\n\n ignore_label = -1\n\n def __init__(self, use_cudnn=True, normalize=True):\n self.use_cudnn = use_cudnn\n self.normalize = normalize\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n t_type.dtype == numpy.int32,\n t_type.ndim == x_type.ndim - 1,\n\n x_type.shape[0] == t_type.shape[0],\n x_type.shape[2:] == t_type.shape[1:],\n )\n\n def forward_cpu(self, inputs):\n x, t = inputs\n log_y = softmax_log(x, False)\n self.y = numpy.exp(log_y)\n log_yd = numpy.rollaxis(log_y, 1)\n log_yd = log_yd.reshape(len(log_yd), -1)\n\n log_p = log_yd[numpy.maximum(t.flat, 0), six.moves.range(t.size)]\n # deal with the case where the SoftmaxCrossEntropy is\n # unpickled from the old version\n if getattr(self, 'normalize', True):\n count = (t != self.ignore_label).sum()\n else:\n count = x.shape[0]\n self.count = count\n\n if count == 0:\n return numpy.zeros((), dtype=x.dtype),\n\n y = (log_p * (t.flat != self.ignore_label)).sum(keepdims=True) \\\n * (-1.0 / count)\n return y.reshape(()),\n\n def forward_gpu(self, inputs):\n cupy = cuda.cupy\n x, t = inputs\n log_y = softmax_log(x, self.use_cudnn)\n self.y = cupy.exp(log_y)\n if getattr(self, 'normalize', True):\n count = float((t != self.ignore_label).sum())\n else:\n count = t.shape[0]\n self.count = count\n\n if count == 0:\n return cupy.zeros((), dtype=x.dtype),\n\n log_y = cupy.rollaxis(log_y, 1, log_y.ndim)\n ret = cuda.reduce(\n 'S t, raw T log_y, int32 n_channel, T inv_count', 'T out',\n 't == -1 ? 0 : log_y[_j * n_channel + t]',\n 'a + b', 'out = a * inv_count', '0', 'crossent_fwd'\n )(t, log_y.reduced_view(), log_y.shape[-1], -1.0 / count)\n return ret,\n\n def backward_cpu(self, inputs, grad_outputs):\n x, t = inputs\n if self.count == 0:\n return numpy.zeros_like(x), None\n\n gloss = grad_outputs[0]\n n_unit = t.size // t.shape[0]\n if self.y.ndim == 2:\n gx = self.y.copy()\n gx[six.moves.xrange(len(t)), numpy.maximum(t, 0)] -= 1\n gx *= (t != self.ignore_label).reshape((len(t), 1))\n else:\n # in the case where y.ndim is higher than 2,\n # we think that a current implementation is inefficient\n # because it yields two provisional arrays for indexing.\n gx = self.y.copy().reshape(self.y.shape[0], self.y.shape[1], -1)\n fst_index = numpy.arange(t.size) // n_unit\n trd_index = numpy.arange(t.size) % n_unit\n gx[fst_index, numpy.maximum(t.flat, 0), trd_index] -= 1\n gx *= (t != self.ignore_label).reshape((len(t), 1, -1))\n gx = gx.reshape(self.y.shape)\n\n gx *= gloss / self.count\n return gx, None\n\n def backward_gpu(self, inputs, grad_outputs):\n cupy = cuda.cupy\n x, t = inputs\n if self.count == 0:\n return cupy.zeros_like(x), None\n\n gloss = grad_outputs[0]\n n_unit = t.size // t.shape[0]\n coeff = cuda.cupy.divide(gloss, self.count, dtype=gloss.dtype)\n gx = cuda.elementwise(\n 'T y, S t, raw T coeff, S n_channel, S n_unit',\n 'T gx',\n '''\n const int c = (i / n_unit % n_channel);\n if (t == -1) {\n gx = 0;\n } else {\n gx = coeff[0] * (y - (c == t));\n }\n ''',\n 'softmax_crossent_bwd')(\n self.y, cupy.expand_dims(t, 1), coeff, x.shape[1], n_unit)\n return gx, None\n\n\ndef softmax_cross_entropy(x, t, use_cudnn=True, normalize=True):\n \"\"\"Computes cross entropy loss for pre-softmax activations.\n\n Args:\n x (Variable): Variable holding a multidimensional array whose element\n indicates unnormalized log probability: the first axis of the\n variable represents the number of samples, and the second axis\n represents the number of classes. While this function computes\n a usual softmax cross entropy if the number of dimensions is equal\n to 2, it computes a cross entropy of the replicated softmax if the\n number of dimensions is greater than 2.\n t (Variable): Variable holding an int32 vector of groundtruth labels.\n If ``t[i] == -1``, correspondig ``x[i]`` is ignored.\n normalize (Variable): Variable holding a boolean value which\n determines the normalization constant. If true, this function\n normalizes the cross entropy loss across all instances. If else,\n it only normalizes along a batch size.\n\n Returns:\n Variable: A variable holding a scalar array of the cross entropy loss.\n\n .. note::\n\n This function is differentiable only by ``x``.\n\n \"\"\"\n return SoftmaxCrossEntropy(use_cudnn, normalize)(x, t)\n", "path": "chainer/functions/loss/softmax_cross_entropy.py"}]}
| 2,204 | 659 |
gh_patches_debug_23420
|
rasdani/github-patches
|
git_diff
|
pypa__pipenv-1536
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
prettytoml deletes unrelated elements when removing items
From #1507.
prettytoml has a bug in `table.py::TableElement.__delitem__` that it deletes a key when the line before it contains inline comment. A minimal example:
```python
import pipenv # noqa
from prettytoml import lexer
from prettytoml.elements.atomic import AtomicElement
from prettytoml.elements.metadata import (
WhitespaceElement, PunctuationElement, CommentElement,
)
from prettytoml.elements.table import TableElement
def test_table():
initial_toml = """id=42 # My id\nage=14"""
tokens = tuple(lexer.tokenize(initial_toml))
table = TableElement([
AtomicElement(tokens[0:1]),
PunctuationElement(tokens[1:2]),
AtomicElement(tokens[2:3]),
WhitespaceElement(tokens[3:4]),
CommentElement(tokens[4:6]),
AtomicElement(tokens[6:7]),
PunctuationElement(tokens[7:8]),
AtomicElement(tokens[8:9]),
])
assert set(table.items()) == {('id', 42), ('age', 14)}
del table['id']
assert set(table.items()) == {('age', 14)}
```
This test case would fail on the final assertion. `table` at this point would be empty, but it should not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/patched/prettytoml/elements/table.py`
Content:
```
1 from prettytoml.elements import abstracttable, factory
2 from prettytoml.elements.errors import InvalidElementError
3 from prettytoml.elements.common import Element
4 from prettytoml.elements.metadata import CommentElement, NewlineElement, WhitespaceElement
5 from . import common
6
7
8 class TableElement(abstracttable.AbstractTable):
9 """
10 An Element containing an unnamed top-level table.
11
12 Implements dict-like interface.
13
14 Assumes input sub_elements are correct.
15
16 Raises InvalidElementError on duplicate keys.
17 """
18
19 def __init__(self, sub_elements):
20 abstracttable.AbstractTable.__init__(self, sub_elements)
21
22 self._check_for_duplicate_keys()
23
24 def _check_for_duplicate_keys(self):
25 if len(set(self.keys())) < len(self.keys()):
26 raise InvalidElementError('Duplicate keys found')
27
28 def __setitem__(self, key, value):
29 if key in self:
30 self._update(key, value)
31 else:
32 self._insert(key, value)
33
34 def _update(self, key, value):
35 _, value_i = self._find_key_and_value(key)
36 self._sub_elements[value_i] = value if isinstance(value, Element) else factory.create_element(value)
37
38 def _find_insertion_index(self):
39 """
40 Returns the self.sub_elements index in which new entries should be inserted.
41 """
42
43 non_metadata_elements = tuple(self._enumerate_non_metadata_sub_elements())
44
45 if not non_metadata_elements:
46 return 0
47
48 last_entry_i = non_metadata_elements[-1][0]
49 following_newline_i = self._find_following_line_terminator(last_entry_i)
50
51 return following_newline_i + 1
52
53 def _detect_indentation_size(self):
54 """
55 Detects the level of indentation used in this table.
56 """
57
58 def lines():
59 # Returns a sequence of sequences of elements belonging to each line
60 start = 0
61 for i, element in enumerate(self.elements):
62 if isinstance(element, (CommentElement, NewlineElement)):
63 yield self.elements[start:i+1]
64 start = i+1
65
66 def indentation(line):
67 # Counts the number of whitespace tokens at the beginning of this line
68 try:
69 first_non_whitespace_i = next(i for (i, e) in enumerate(line) if not isinstance(e, WhitespaceElement))
70 return sum(space.length for space in line[:first_non_whitespace_i])
71 except StopIteration:
72 return 0
73
74 def is_empty_line(line):
75 return all(e.type == common.TYPE_METADATA for e in line)
76
77 try:
78 return min(indentation(line) for line in lines() if len(line) > 1 and not is_empty_line(line))
79 except ValueError: # Raised by ValueError when no matching lines found
80 return 0
81
82 def _insert(self, key, value):
83
84 value_element = value if isinstance(value, Element) else factory.create_element(value)
85
86 indentation_size = self._detect_indentation_size()
87 indentation = [factory.create_whitespace_element(self._detect_indentation_size())] if indentation_size else []
88
89 inserted_elements = indentation + [
90 factory.create_string_element(key, bare_allowed=True),
91 factory.create_whitespace_element(),
92 factory.create_operator_element('='),
93 factory.create_whitespace_element(),
94 value_element,
95 factory.create_newline_element(),
96 ]
97
98 insertion_index = self._find_insertion_index()
99
100 self._sub_elements = \
101 self.sub_elements[:insertion_index] + inserted_elements + self.sub_elements[insertion_index:]
102
103 def __delitem__(self, key):
104 begin, _ = self._find_key_and_value(key)
105 preceding_newline = self._find_preceding_newline(begin)
106 if preceding_newline >= 0:
107 begin = preceding_newline
108 end = self._find_following_newline(begin)
109 if end < 0:
110 end = len(tuple(self._sub_elements))
111 self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:]
112
113 def pop(self, key):
114 v = self[key]
115 del self[key]
116 return v
117
118 def value(self):
119 return self
120
121 def __str__(self):
122 return str(self.primitive_value)
123
```
Path: `pipenv/patched/prettytoml/elements/traversal/__init__.py`
Content:
```
1 from prettytoml import tokens
2 from prettytoml.elements import common
3 from prettytoml.elements.metadata import PunctuationElement, NewlineElement
4 from prettytoml.elements.traversal import predicates
5
6
7 class TraversalMixin:
8 """
9 A mix-in that provides convenient sub-element traversal to any class with
10 an `elements` member that is a sequence of Element instances
11 """
12
13 def __find_following_element(self, index, predicate):
14 """
15 Finds and returns the index of element in self.elements that evaluates the given predicate to True
16 and whose index is higher than the given index, or returns -Infinity on failure.
17 """
18 return find_following(self.elements, predicate, index)
19
20 def __find_preceding_element(self, index, predicate):
21 """
22 Finds and returns the index of the element in self.elements that evaluates the given predicate to True
23 and whose index is lower than the given index.
24 """
25 i = find_previous(self.elements, predicate, index)
26 if i == float('inf'):
27 return float('-inf')
28 return i
29
30 def __must_find_following_element(self, predicate):
31 """
32 Finds and returns the index to the element in self.elements that evaluatest the predicate to True, or raises
33 an error.
34 """
35 i = self.__find_following_element(-1, predicate)
36 if i < 0:
37 raise RuntimeError('Could not find non-optional element')
38 return i
39
40 def _enumerate_non_metadata_sub_elements(self):
41 """
42 Returns a sequence of of (index, sub_element) of the non-metadata sub-elements.
43 """
44 return ((i, element) for i, element in enumerate(self.elements) if element.type != common.TYPE_METADATA)
45
46 def _find_preceding_comma(self, index):
47 """
48 Returns the index of the preceding comma element to the given index, or -Infinity.
49 """
50 return self.__find_preceding_element(index, predicates.op_comma)
51
52 def _find_following_comma(self, index):
53 """
54 Returns the index of the following comma element after the given index, or -Infinity.
55 """
56 def predicate(element):
57 return isinstance(element, PunctuationElement) and element.token.type == tokens.TYPE_OP_COMMA
58 return self.__find_following_element(index, predicate)
59
60 def _find_following_newline(self, index):
61 """
62 Returns the index of the following newline element after the given index, or -Infinity.
63 """
64 return self.__find_following_element(index, lambda e: isinstance(e, NewlineElement))
65
66 def _find_following_comment(self, index):
67 """
68 Returns the index of the following comment element after the given index, or -Infinity.
69 """
70 return self.__find_following_element(index, predicates.comment)
71
72 def _find_following_line_terminator(self, index):
73 """
74 Returns the index of the following comment or newline element after the given index, or -Infinity.
75 """
76 following_comment = self._find_following_comment(index)
77 following_newline = self._find_following_newline(index)
78
79 if following_comment == float('-inf'):
80 return following_newline
81 if following_newline == float('inf'):
82 return following_comment
83
84 if following_newline < following_comment:
85 return following_newline
86 else:
87 return following_comment
88
89 def _find_preceding_newline(self, index):
90 """
91 Returns the index of the preceding newline element to the given index, or -Infinity.
92 """
93 return self.__find_preceding_element(index, predicates.newline)
94
95 def _find_following_non_metadata(self, index):
96 """
97 Returns the index to the following non-metadata element after the given index, or -Infinity.
98 """
99 return self.__find_following_element(index, predicates.non_metadata)
100
101 def _find_closing_square_bracket(self):
102 """
103 Returns the index to the closing square bracket, or raises an Error.
104 """
105
106 return self.__must_find_following_element(predicates.closing_square_bracket)
107
108 def _find_following_opening_square_bracket(self, index):
109 """
110 Returns the index to the opening square bracket, or -Infinity.
111 """
112 return self.__find_following_element(index, predicates.opening_square_bracket)
113
114 def _find_following_closing_square_bracket(self, index):
115 """
116 Returns the index to the closing square bracket, or -Infinity.
117 """
118 return self.__find_following_element(index, predicates.closing_square_bracket)
119
120 def _find_following_table(self, index):
121 """
122 Returns the index to the next TableElement after the specified index, or -Infinity.
123 """
124 return self.__find_following_element(index, predicates.table)
125
126 def _find_preceding_table(self, index):
127 """
128 Returns the index to the preceding TableElement to the specified index, or -Infinity.
129 """
130 return self.__find_preceding_element(index,predicates.table)
131
132 def _find_closing_curly_bracket(self):
133 """
134 Returns the index to the closing curly bracket, or raises an Error.
135 """
136 def predicate(element):
137 return isinstance(element, PunctuationElement) and element.token.type == tokens.TYPE_OP_CURLY_RIGHT_BRACKET
138 return self.__must_find_following_element(predicate)
139
140 def _find_following_table_header(self, index):
141 """
142 Returns the index to the table header after the given element index, or -Infinity.
143 """
144 return self.__find_following_element(index, predicates.table_header)
145
146
147 def find_following(element_seq, predicate, index=None):
148 """
149 Finds and returns the index of the next element fulfilling the specified predicate after the specified
150 index, or -Infinity.
151
152 Starts searching linearly from the start_from index.
153 """
154
155 if isinstance(index, (int, float)) and index < 0:
156 index = None
157
158 for i, element in tuple(enumerate(element_seq))[index+1 if index is not None else index:]:
159 if predicate(element):
160 return i
161 return float('-inf')
162
163
164 def find_previous(element_seq, predicate, index=None):
165 """
166 Finds and returns the index of the previous element fulfilling the specified predicate preceding to the specified
167 index, or Infinity.
168 """
169 if isinstance(index, (int, float)) and index >= len(element_seq):
170 index = None
171
172 for i, element in reversed(tuple(enumerate(element_seq))[:index]):
173 if predicate(element):
174 return i
175 return float('inf')
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pipenv/patched/prettytoml/elements/table.py b/pipenv/patched/prettytoml/elements/table.py
--- a/pipenv/patched/prettytoml/elements/table.py
+++ b/pipenv/patched/prettytoml/elements/table.py
@@ -105,7 +105,7 @@
preceding_newline = self._find_preceding_newline(begin)
if preceding_newline >= 0:
begin = preceding_newline
- end = self._find_following_newline(begin)
+ end = self._find_following_line_terminator(begin)
if end < 0:
end = len(tuple(self._sub_elements))
self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:]
diff --git a/pipenv/patched/prettytoml/elements/traversal/__init__.py b/pipenv/patched/prettytoml/elements/traversal/__init__.py
--- a/pipenv/patched/prettytoml/elements/traversal/__init__.py
+++ b/pipenv/patched/prettytoml/elements/traversal/__init__.py
@@ -76,9 +76,9 @@
following_comment = self._find_following_comment(index)
following_newline = self._find_following_newline(index)
- if following_comment == float('-inf'):
+ if following_comment == float('inf'):
return following_newline
- if following_newline == float('inf'):
+ if following_newline == float('-inf'):
return following_comment
if following_newline < following_comment:
|
{"golden_diff": "diff --git a/pipenv/patched/prettytoml/elements/table.py b/pipenv/patched/prettytoml/elements/table.py\n--- a/pipenv/patched/prettytoml/elements/table.py\n+++ b/pipenv/patched/prettytoml/elements/table.py\n@@ -105,7 +105,7 @@\n preceding_newline = self._find_preceding_newline(begin)\n if preceding_newline >= 0:\n begin = preceding_newline\n- end = self._find_following_newline(begin)\n+ end = self._find_following_line_terminator(begin)\n if end < 0:\n end = len(tuple(self._sub_elements))\n self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:]\ndiff --git a/pipenv/patched/prettytoml/elements/traversal/__init__.py b/pipenv/patched/prettytoml/elements/traversal/__init__.py\n--- a/pipenv/patched/prettytoml/elements/traversal/__init__.py\n+++ b/pipenv/patched/prettytoml/elements/traversal/__init__.py\n@@ -76,9 +76,9 @@\n following_comment = self._find_following_comment(index)\n following_newline = self._find_following_newline(index)\n \n- if following_comment == float('-inf'):\n+ if following_comment == float('inf'):\n return following_newline\n- if following_newline == float('inf'):\n+ if following_newline == float('-inf'):\n return following_comment\n \n if following_newline < following_comment:\n", "issue": "prettytoml deletes unrelated elements when removing items\nFrom #1507.\r\n\r\nprettytoml has a bug in `table.py::TableElement.__delitem__` that it deletes a key when the line before it contains inline comment. A minimal example:\r\n\r\n```python\r\nimport pipenv # noqa\r\n\r\nfrom prettytoml import lexer\r\nfrom prettytoml.elements.atomic import AtomicElement\r\nfrom prettytoml.elements.metadata import (\r\n WhitespaceElement, PunctuationElement, CommentElement,\r\n)\r\nfrom prettytoml.elements.table import TableElement\r\n\r\n\r\ndef test_table():\r\n\r\n initial_toml = \"\"\"id=42 # My id\\nage=14\"\"\"\r\n tokens = tuple(lexer.tokenize(initial_toml))\r\n table = TableElement([\r\n AtomicElement(tokens[0:1]),\r\n PunctuationElement(tokens[1:2]),\r\n AtomicElement(tokens[2:3]),\r\n WhitespaceElement(tokens[3:4]),\r\n CommentElement(tokens[4:6]),\r\n\r\n AtomicElement(tokens[6:7]),\r\n PunctuationElement(tokens[7:8]),\r\n AtomicElement(tokens[8:9]),\r\n ])\r\n\r\n assert set(table.items()) == {('id', 42), ('age', 14)}\r\n\r\n del table['id']\r\n assert set(table.items()) == {('age', 14)}\r\n```\r\n\r\nThis test case would fail on the final assertion. `table` at this point would be empty, but it should not.\n", "before_files": [{"content": "from prettytoml.elements import abstracttable, factory\nfrom prettytoml.elements.errors import InvalidElementError\nfrom prettytoml.elements.common import Element\nfrom prettytoml.elements.metadata import CommentElement, NewlineElement, WhitespaceElement\nfrom . import common\n\n\nclass TableElement(abstracttable.AbstractTable):\n \"\"\"\n An Element containing an unnamed top-level table.\n\n Implements dict-like interface.\n\n Assumes input sub_elements are correct.\n\n Raises InvalidElementError on duplicate keys.\n \"\"\"\n\n def __init__(self, sub_elements):\n abstracttable.AbstractTable.__init__(self, sub_elements)\n\n self._check_for_duplicate_keys()\n\n def _check_for_duplicate_keys(self):\n if len(set(self.keys())) < len(self.keys()):\n raise InvalidElementError('Duplicate keys found')\n\n def __setitem__(self, key, value):\n if key in self:\n self._update(key, value)\n else:\n self._insert(key, value)\n\n def _update(self, key, value):\n _, value_i = self._find_key_and_value(key)\n self._sub_elements[value_i] = value if isinstance(value, Element) else factory.create_element(value)\n\n def _find_insertion_index(self):\n \"\"\"\n Returns the self.sub_elements index in which new entries should be inserted.\n \"\"\"\n\n non_metadata_elements = tuple(self._enumerate_non_metadata_sub_elements())\n\n if not non_metadata_elements:\n return 0\n\n last_entry_i = non_metadata_elements[-1][0]\n following_newline_i = self._find_following_line_terminator(last_entry_i)\n\n return following_newline_i + 1\n\n def _detect_indentation_size(self):\n \"\"\"\n Detects the level of indentation used in this table.\n \"\"\"\n\n def lines():\n # Returns a sequence of sequences of elements belonging to each line\n start = 0\n for i, element in enumerate(self.elements):\n if isinstance(element, (CommentElement, NewlineElement)):\n yield self.elements[start:i+1]\n start = i+1\n\n def indentation(line):\n # Counts the number of whitespace tokens at the beginning of this line\n try:\n first_non_whitespace_i = next(i for (i, e) in enumerate(line) if not isinstance(e, WhitespaceElement))\n return sum(space.length for space in line[:first_non_whitespace_i])\n except StopIteration:\n return 0\n\n def is_empty_line(line):\n return all(e.type == common.TYPE_METADATA for e in line)\n\n try:\n return min(indentation(line) for line in lines() if len(line) > 1 and not is_empty_line(line))\n except ValueError: # Raised by ValueError when no matching lines found\n return 0\n\n def _insert(self, key, value):\n\n value_element = value if isinstance(value, Element) else factory.create_element(value)\n\n indentation_size = self._detect_indentation_size()\n indentation = [factory.create_whitespace_element(self._detect_indentation_size())] if indentation_size else []\n\n inserted_elements = indentation + [\n factory.create_string_element(key, bare_allowed=True),\n factory.create_whitespace_element(),\n factory.create_operator_element('='),\n factory.create_whitespace_element(),\n value_element,\n factory.create_newline_element(),\n ]\n\n insertion_index = self._find_insertion_index()\n\n self._sub_elements = \\\n self.sub_elements[:insertion_index] + inserted_elements + self.sub_elements[insertion_index:]\n\n def __delitem__(self, key):\n begin, _ = self._find_key_and_value(key)\n preceding_newline = self._find_preceding_newline(begin)\n if preceding_newline >= 0:\n begin = preceding_newline\n end = self._find_following_newline(begin)\n if end < 0:\n end = len(tuple(self._sub_elements))\n self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:]\n\n def pop(self, key):\n v = self[key]\n del self[key]\n return v\n\n def value(self):\n return self\n\n def __str__(self):\n return str(self.primitive_value)\n", "path": "pipenv/patched/prettytoml/elements/table.py"}, {"content": "from prettytoml import tokens\nfrom prettytoml.elements import common\nfrom prettytoml.elements.metadata import PunctuationElement, NewlineElement\nfrom prettytoml.elements.traversal import predicates\n\n\nclass TraversalMixin:\n \"\"\"\n A mix-in that provides convenient sub-element traversal to any class with\n an `elements` member that is a sequence of Element instances\n \"\"\"\n\n def __find_following_element(self, index, predicate):\n \"\"\"\n Finds and returns the index of element in self.elements that evaluates the given predicate to True\n and whose index is higher than the given index, or returns -Infinity on failure.\n \"\"\"\n return find_following(self.elements, predicate, index)\n\n def __find_preceding_element(self, index, predicate):\n \"\"\"\n Finds and returns the index of the element in self.elements that evaluates the given predicate to True\n and whose index is lower than the given index.\n \"\"\"\n i = find_previous(self.elements, predicate, index)\n if i == float('inf'):\n return float('-inf')\n return i\n\n def __must_find_following_element(self, predicate):\n \"\"\"\n Finds and returns the index to the element in self.elements that evaluatest the predicate to True, or raises\n an error.\n \"\"\"\n i = self.__find_following_element(-1, predicate)\n if i < 0:\n raise RuntimeError('Could not find non-optional element')\n return i\n\n def _enumerate_non_metadata_sub_elements(self):\n \"\"\"\n Returns a sequence of of (index, sub_element) of the non-metadata sub-elements.\n \"\"\"\n return ((i, element) for i, element in enumerate(self.elements) if element.type != common.TYPE_METADATA)\n\n def _find_preceding_comma(self, index):\n \"\"\"\n Returns the index of the preceding comma element to the given index, or -Infinity.\n \"\"\"\n return self.__find_preceding_element(index, predicates.op_comma)\n\n def _find_following_comma(self, index):\n \"\"\"\n Returns the index of the following comma element after the given index, or -Infinity.\n \"\"\"\n def predicate(element):\n return isinstance(element, PunctuationElement) and element.token.type == tokens.TYPE_OP_COMMA\n return self.__find_following_element(index, predicate)\n\n def _find_following_newline(self, index):\n \"\"\"\n Returns the index of the following newline element after the given index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, lambda e: isinstance(e, NewlineElement))\n\n def _find_following_comment(self, index):\n \"\"\"\n Returns the index of the following comment element after the given index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.comment)\n\n def _find_following_line_terminator(self, index):\n \"\"\"\n Returns the index of the following comment or newline element after the given index, or -Infinity.\n \"\"\"\n following_comment = self._find_following_comment(index)\n following_newline = self._find_following_newline(index)\n\n if following_comment == float('-inf'):\n return following_newline\n if following_newline == float('inf'):\n return following_comment\n\n if following_newline < following_comment:\n return following_newline\n else:\n return following_comment\n\n def _find_preceding_newline(self, index):\n \"\"\"\n Returns the index of the preceding newline element to the given index, or -Infinity.\n \"\"\"\n return self.__find_preceding_element(index, predicates.newline)\n\n def _find_following_non_metadata(self, index):\n \"\"\"\n Returns the index to the following non-metadata element after the given index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.non_metadata)\n\n def _find_closing_square_bracket(self):\n \"\"\"\n Returns the index to the closing square bracket, or raises an Error.\n \"\"\"\n\n return self.__must_find_following_element(predicates.closing_square_bracket)\n\n def _find_following_opening_square_bracket(self, index):\n \"\"\"\n Returns the index to the opening square bracket, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.opening_square_bracket)\n\n def _find_following_closing_square_bracket(self, index):\n \"\"\"\n Returns the index to the closing square bracket, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.closing_square_bracket)\n\n def _find_following_table(self, index):\n \"\"\"\n Returns the index to the next TableElement after the specified index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.table)\n\n def _find_preceding_table(self, index):\n \"\"\"\n Returns the index to the preceding TableElement to the specified index, or -Infinity.\n \"\"\"\n return self.__find_preceding_element(index,predicates.table)\n\n def _find_closing_curly_bracket(self):\n \"\"\"\n Returns the index to the closing curly bracket, or raises an Error.\n \"\"\"\n def predicate(element):\n return isinstance(element, PunctuationElement) and element.token.type == tokens.TYPE_OP_CURLY_RIGHT_BRACKET\n return self.__must_find_following_element(predicate)\n\n def _find_following_table_header(self, index):\n \"\"\"\n Returns the index to the table header after the given element index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.table_header)\n\n\ndef find_following(element_seq, predicate, index=None):\n \"\"\"\n Finds and returns the index of the next element fulfilling the specified predicate after the specified\n index, or -Infinity.\n\n Starts searching linearly from the start_from index.\n \"\"\"\n\n if isinstance(index, (int, float)) and index < 0:\n index = None\n\n for i, element in tuple(enumerate(element_seq))[index+1 if index is not None else index:]:\n if predicate(element):\n return i\n return float('-inf')\n\n\ndef find_previous(element_seq, predicate, index=None):\n \"\"\"\n Finds and returns the index of the previous element fulfilling the specified predicate preceding to the specified\n index, or Infinity.\n \"\"\"\n if isinstance(index, (int, float)) and index >= len(element_seq):\n index = None\n\n for i, element in reversed(tuple(enumerate(element_seq))[:index]):\n if predicate(element):\n return i\n return float('inf')\n", "path": "pipenv/patched/prettytoml/elements/traversal/__init__.py"}], "after_files": [{"content": "from prettytoml.elements import abstracttable, factory\nfrom prettytoml.elements.errors import InvalidElementError\nfrom prettytoml.elements.common import Element\nfrom prettytoml.elements.metadata import CommentElement, NewlineElement, WhitespaceElement\nfrom . import common\n\n\nclass TableElement(abstracttable.AbstractTable):\n \"\"\"\n An Element containing an unnamed top-level table.\n\n Implements dict-like interface.\n\n Assumes input sub_elements are correct.\n\n Raises InvalidElementError on duplicate keys.\n \"\"\"\n\n def __init__(self, sub_elements):\n abstracttable.AbstractTable.__init__(self, sub_elements)\n\n self._check_for_duplicate_keys()\n\n def _check_for_duplicate_keys(self):\n if len(set(self.keys())) < len(self.keys()):\n raise InvalidElementError('Duplicate keys found')\n\n def __setitem__(self, key, value):\n if key in self:\n self._update(key, value)\n else:\n self._insert(key, value)\n\n def _update(self, key, value):\n _, value_i = self._find_key_and_value(key)\n self._sub_elements[value_i] = value if isinstance(value, Element) else factory.create_element(value)\n\n def _find_insertion_index(self):\n \"\"\"\n Returns the self.sub_elements index in which new entries should be inserted.\n \"\"\"\n\n non_metadata_elements = tuple(self._enumerate_non_metadata_sub_elements())\n\n if not non_metadata_elements:\n return 0\n\n last_entry_i = non_metadata_elements[-1][0]\n following_newline_i = self._find_following_line_terminator(last_entry_i)\n\n return following_newline_i + 1\n\n def _detect_indentation_size(self):\n \"\"\"\n Detects the level of indentation used in this table.\n \"\"\"\n\n def lines():\n # Returns a sequence of sequences of elements belonging to each line\n start = 0\n for i, element in enumerate(self.elements):\n if isinstance(element, (CommentElement, NewlineElement)):\n yield self.elements[start:i+1]\n start = i+1\n\n def indentation(line):\n # Counts the number of whitespace tokens at the beginning of this line\n try:\n first_non_whitespace_i = next(i for (i, e) in enumerate(line) if not isinstance(e, WhitespaceElement))\n return sum(space.length for space in line[:first_non_whitespace_i])\n except StopIteration:\n return 0\n\n def is_empty_line(line):\n return all(e.type == common.TYPE_METADATA for e in line)\n\n try:\n return min(indentation(line) for line in lines() if len(line) > 1 and not is_empty_line(line))\n except ValueError: # Raised by ValueError when no matching lines found\n return 0\n\n def _insert(self, key, value):\n\n value_element = value if isinstance(value, Element) else factory.create_element(value)\n\n indentation_size = self._detect_indentation_size()\n indentation = [factory.create_whitespace_element(self._detect_indentation_size())] if indentation_size else []\n\n inserted_elements = indentation + [\n factory.create_string_element(key, bare_allowed=True),\n factory.create_whitespace_element(),\n factory.create_operator_element('='),\n factory.create_whitespace_element(),\n value_element,\n factory.create_newline_element(),\n ]\n\n insertion_index = self._find_insertion_index()\n\n self._sub_elements = \\\n self.sub_elements[:insertion_index] + inserted_elements + self.sub_elements[insertion_index:]\n\n def __delitem__(self, key):\n begin, _ = self._find_key_and_value(key)\n preceding_newline = self._find_preceding_newline(begin)\n if preceding_newline >= 0:\n begin = preceding_newline\n end = self._find_following_line_terminator(begin)\n if end < 0:\n end = len(tuple(self._sub_elements))\n self._sub_elements = self.sub_elements[:begin] + self.sub_elements[end:]\n\n def pop(self, key):\n v = self[key]\n del self[key]\n return v\n\n def value(self):\n return self\n\n def __str__(self):\n return str(self.primitive_value)\n", "path": "pipenv/patched/prettytoml/elements/table.py"}, {"content": "from prettytoml import tokens\nfrom prettytoml.elements import common\nfrom prettytoml.elements.metadata import PunctuationElement, NewlineElement\nfrom prettytoml.elements.traversal import predicates\n\n\nclass TraversalMixin:\n \"\"\"\n A mix-in that provides convenient sub-element traversal to any class with\n an `elements` member that is a sequence of Element instances\n \"\"\"\n\n def __find_following_element(self, index, predicate):\n \"\"\"\n Finds and returns the index of element in self.elements that evaluates the given predicate to True\n and whose index is higher than the given index, or returns -Infinity on failure.\n \"\"\"\n return find_following(self.elements, predicate, index)\n\n def __find_preceding_element(self, index, predicate):\n \"\"\"\n Finds and returns the index of the element in self.elements that evaluates the given predicate to True\n and whose index is lower than the given index.\n \"\"\"\n i = find_previous(self.elements, predicate, index)\n if i == float('inf'):\n return float('-inf')\n return i\n\n def __must_find_following_element(self, predicate):\n \"\"\"\n Finds and returns the index to the element in self.elements that evaluatest the predicate to True, or raises\n an error.\n \"\"\"\n i = self.__find_following_element(-1, predicate)\n if i < 0:\n raise RuntimeError('Could not find non-optional element')\n return i\n\n def _enumerate_non_metadata_sub_elements(self):\n \"\"\"\n Returns a sequence of of (index, sub_element) of the non-metadata sub-elements.\n \"\"\"\n return ((i, element) for i, element in enumerate(self.elements) if element.type != common.TYPE_METADATA)\n\n def _find_preceding_comma(self, index):\n \"\"\"\n Returns the index of the preceding comma element to the given index, or -Infinity.\n \"\"\"\n return self.__find_preceding_element(index, predicates.op_comma)\n\n def _find_following_comma(self, index):\n \"\"\"\n Returns the index of the following comma element after the given index, or -Infinity.\n \"\"\"\n def predicate(element):\n return isinstance(element, PunctuationElement) and element.token.type == tokens.TYPE_OP_COMMA\n return self.__find_following_element(index, predicate)\n\n def _find_following_newline(self, index):\n \"\"\"\n Returns the index of the following newline element after the given index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, lambda e: isinstance(e, NewlineElement))\n\n def _find_following_comment(self, index):\n \"\"\"\n Returns the index of the following comment element after the given index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.comment)\n\n def _find_following_line_terminator(self, index):\n \"\"\"\n Returns the index of the following comment or newline element after the given index, or -Infinity.\n \"\"\"\n following_comment = self._find_following_comment(index)\n following_newline = self._find_following_newline(index)\n\n if following_comment == float('inf'):\n return following_newline\n if following_newline == float('-inf'):\n return following_comment\n\n if following_newline < following_comment:\n return following_newline\n else:\n return following_comment\n\n def _find_preceding_newline(self, index):\n \"\"\"\n Returns the index of the preceding newline element to the given index, or -Infinity.\n \"\"\"\n return self.__find_preceding_element(index, predicates.newline)\n\n def _find_following_non_metadata(self, index):\n \"\"\"\n Returns the index to the following non-metadata element after the given index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.non_metadata)\n\n def _find_closing_square_bracket(self):\n \"\"\"\n Returns the index to the closing square bracket, or raises an Error.\n \"\"\"\n\n return self.__must_find_following_element(predicates.closing_square_bracket)\n\n def _find_following_opening_square_bracket(self, index):\n \"\"\"\n Returns the index to the opening square bracket, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.opening_square_bracket)\n\n def _find_following_closing_square_bracket(self, index):\n \"\"\"\n Returns the index to the closing square bracket, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.closing_square_bracket)\n\n def _find_following_table(self, index):\n \"\"\"\n Returns the index to the next TableElement after the specified index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.table)\n\n def _find_preceding_table(self, index):\n \"\"\"\n Returns the index to the preceding TableElement to the specified index, or -Infinity.\n \"\"\"\n return self.__find_preceding_element(index,predicates.table)\n\n def _find_closing_curly_bracket(self):\n \"\"\"\n Returns the index to the closing curly bracket, or raises an Error.\n \"\"\"\n def predicate(element):\n return isinstance(element, PunctuationElement) and element.token.type == tokens.TYPE_OP_CURLY_RIGHT_BRACKET\n return self.__must_find_following_element(predicate)\n\n def _find_following_table_header(self, index):\n \"\"\"\n Returns the index to the table header after the given element index, or -Infinity.\n \"\"\"\n return self.__find_following_element(index, predicates.table_header)\n\n\ndef find_following(element_seq, predicate, index=None):\n \"\"\"\n Finds and returns the index of the next element fulfilling the specified predicate after the specified\n index, or -Infinity.\n\n Starts searching linearly from the start_from index.\n \"\"\"\n\n if isinstance(index, (int, float)) and index < 0:\n index = None\n\n for i, element in tuple(enumerate(element_seq))[index+1 if index is not None else index:]:\n if predicate(element):\n return i\n return float('-inf')\n\n\ndef find_previous(element_seq, predicate, index=None):\n \"\"\"\n Finds and returns the index of the previous element fulfilling the specified predicate preceding to the specified\n index, or Infinity.\n \"\"\"\n if isinstance(index, (int, float)) and index >= len(element_seq):\n index = None\n\n for i, element in reversed(tuple(enumerate(element_seq))[:index]):\n if predicate(element):\n return i\n return float('inf')\n", "path": "pipenv/patched/prettytoml/elements/traversal/__init__.py"}]}
| 3,579 | 361 |
gh_patches_debug_65929
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-985
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Trouble installing dvc with pip: No matching distribution found for futures>=3.2.0 (from dvc)
I'm on a fresh ubuntu 18.04 and I want to install DVC. But I run into some dependency problems. Never had that problem before.
```
➤ virtualenv -p python3 .venv
➤ source .venv/bin/activate.fish
➤ pip install dvc
Collecting dvc
Using cached https://files.pythonhosted.org/packages/d2/2d/117b6e99f4e7f0760d99944919d9dcaaeabfb6c6182a9c890b7260eec697/dvc-0.15.2-py2.py3-none-any.whl
Collecting pyasn1>=0.4.1 (from dvc)
Using cached https://files.pythonhosted.org/packages/d1/a1/7790cc85db38daa874f6a2e6308131b9953feb1367f2ae2d1123bb93a9f5/pyasn1-0.4.4-py2.py3-none-any.whl
Collecting ply>=3.9 (from dvc)
Using cached https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl
Collecting futures>=3.2.0 (from dvc)
Could not find a version that satisfies the requirement futures>=3.2.0 (from dvc) (from versions: 0.2.python3, 0.1, 0.2, 1.0, 2.0, 2.1, 2.1.1, 2.1.2, 2.1.3, 2.1.4, 2.1.5, 2.1.6, 2.2.0, 3.0.0, 3.0.1, 3.0.2, 3.0.3, 3.0.4, 3.0.5, 3.1.0, 3.1.1)
No matching distribution found for futures>=3.2.0 (from dvc)
```
Here are all relevant version
```
➤ pip --version
pip 18.0 from /home/PATH/.venv/lib/python3.6/site-packages/pip (python 3.6)
➤ python --version
Python 3.6.5
➤ virtualenv --version
16.0.0
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import sys
2 import platform
3 from setuptools import setup, find_packages
4 from distutils.errors import DistutilsPlatformError
5 from dvc import VERSION
6
7
8 install_requires = [
9 "ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
10 "configparser>=3.5.0",
11 "zc.lockfile>=1.2.1",
12 "future>=0.16.0",
13 "colorama>=0.3.9",
14 "configobj>=5.0.6",
15 "networkx==2.1",
16 "pyyaml>=3.12",
17 "gitpython>=2.1.8",
18 "ntfsutils>=0.1.4",
19 "setuptools>=34.0.0",
20 "nanotime>=0.5.2",
21 "pyasn1>=0.4.1",
22 "schema>=0.6.7",
23 "jsonpath-rw==1.4.0",
24 "reflink==0.2.0",
25 "requests>=2.18.4",
26 ]
27
28 if sys.version_info[0] == 2:
29 install_requires.append("futures>=3.2.0")
30
31 # Extra dependencies for remote integrations
32 gs = [
33 "google-cloud==0.32.0",
34 ]
35 s3 = [
36 "boto3==1.7.4",
37 ]
38 azure = [
39 "azure-storage-blob==1.3.0"
40 ]
41 ssh = [
42 "paramiko>=2.4.1",
43 ]
44 all_remotes = gs + s3 + azure + ssh
45
46 setup(
47 name='dvc',
48 version=VERSION,
49 description='Git for data scientists - manage your code and data together',
50 long_description=open('README.rst', 'r').read(),
51 author='Dmitry Petrov',
52 author_email='[email protected]',
53 download_url='https://github.com/iterative/dvc',
54 license='Apache License 2.0',
55 install_requires=install_requires,
56 extras_require={
57 'all': all_remotes,
58 'gs': gs,
59 's3': s3,
60 'azure': azure,
61 'ssh': ssh,
62 },
63 keywords='data science, data version control, machine learning',
64 classifiers=[
65 'Development Status :: 4 - Beta',
66 'Programming Language :: Python :: 2',
67 'Programming Language :: Python :: 3',
68 ],
69 packages=find_packages(exclude=['bin', 'tests', 'functests']),
70 include_package_data=True,
71 url='http://dataversioncontrol.com',
72 entry_points={
73 'console_scripts': ['dvc = dvc.main:main']
74 },
75 zip_safe=False
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,11 +23,9 @@
"jsonpath-rw==1.4.0",
"reflink==0.2.0",
"requests>=2.18.4",
+ 'futures; python_version == "2.7"',
]
-if sys.version_info[0] == 2:
- install_requires.append("futures>=3.2.0")
-
# Extra dependencies for remote integrations
gs = [
"google-cloud==0.32.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,11 +23,9 @@\n \"jsonpath-rw==1.4.0\",\n \"reflink==0.2.0\",\n \"requests>=2.18.4\",\n+ 'futures; python_version == \"2.7\"',\n ]\n \n-if sys.version_info[0] == 2:\n- install_requires.append(\"futures>=3.2.0\")\n-\n # Extra dependencies for remote integrations\n gs = [\n \"google-cloud==0.32.0\",\n", "issue": "Trouble installing dvc with pip: No matching distribution found for futures>=3.2.0 (from dvc)\nI'm on a fresh ubuntu 18.04 and I want to install DVC. But I run into some dependency problems. Never had that problem before.\r\n```\r\n\u27a4 virtualenv -p python3 .venv\r\n\u27a4 source .venv/bin/activate.fish\r\n\u27a4 pip install dvc\r\nCollecting dvc\r\n Using cached https://files.pythonhosted.org/packages/d2/2d/117b6e99f4e7f0760d99944919d9dcaaeabfb6c6182a9c890b7260eec697/dvc-0.15.2-py2.py3-none-any.whl\r\nCollecting pyasn1>=0.4.1 (from dvc)\r\n Using cached https://files.pythonhosted.org/packages/d1/a1/7790cc85db38daa874f6a2e6308131b9953feb1367f2ae2d1123bb93a9f5/pyasn1-0.4.4-py2.py3-none-any.whl\r\nCollecting ply>=3.9 (from dvc)\r\n Using cached https://files.pythonhosted.org/packages/a3/58/35da89ee790598a0700ea49b2a66594140f44dec458c07e8e3d4979137fc/ply-3.11-py2.py3-none-any.whl\r\nCollecting futures>=3.2.0 (from dvc)\r\n Could not find a version that satisfies the requirement futures>=3.2.0 (from dvc) (from versions: 0.2.python3, 0.1, 0.2, 1.0, 2.0, 2.1, 2.1.1, 2.1.2, 2.1.3, 2.1.4, 2.1.5, 2.1.6, 2.2.0, 3.0.0, 3.0.1, 3.0.2, 3.0.3, 3.0.4, 3.0.5, 3.1.0, 3.1.1)\r\nNo matching distribution found for futures>=3.2.0 (from dvc)\r\n```\r\nHere are all relevant version\r\n```\r\n\u27a4 pip --version\r\npip 18.0 from /home/PATH/.venv/lib/python3.6/site-packages/pip (python 3.6)\r\n\u27a4 python --version\r\nPython 3.6.5\r\n\u27a4 virtualenv --version\r\n16.0.0\r\n```\n", "before_files": [{"content": "import sys\nimport platform\nfrom setuptools import setup, find_packages\nfrom distutils.errors import DistutilsPlatformError\nfrom dvc import VERSION\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"networkx==2.1\",\n \"pyyaml>=3.12\",\n \"gitpython>=2.1.8\",\n \"ntfsutils>=0.1.4\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-rw==1.4.0\",\n \"reflink==0.2.0\",\n \"requests>=2.18.4\",\n]\n\nif sys.version_info[0] == 2:\n install_requires.append(\"futures>=3.2.0\")\n\n# Extra dependencies for remote integrations\ngs = [\n \"google-cloud==0.32.0\",\n]\ns3 = [\n \"boto3==1.7.4\",\n]\nazure = [\n \"azure-storage-blob==1.3.0\"\n]\nssh = [\n \"paramiko>=2.4.1\",\n]\nall_remotes = gs + s3 + azure + ssh\n\nsetup(\n name='dvc',\n version=VERSION,\n description='Git for data scientists - manage your code and data together',\n long_description=open('README.rst', 'r').read(),\n author='Dmitry Petrov',\n author_email='[email protected]',\n download_url='https://github.com/iterative/dvc',\n license='Apache License 2.0',\n install_requires=install_requires,\n extras_require={\n 'all': all_remotes,\n 'gs': gs,\n 's3': s3,\n 'azure': azure,\n 'ssh': ssh,\n },\n keywords='data science, data version control, machine learning',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n ],\n packages=find_packages(exclude=['bin', 'tests', 'functests']),\n include_package_data=True,\n url='http://dataversioncontrol.com',\n entry_points={\n 'console_scripts': ['dvc = dvc.main:main']\n },\n zip_safe=False\n)\n", "path": "setup.py"}], "after_files": [{"content": "import sys\nimport platform\nfrom setuptools import setup, find_packages\nfrom distutils.errors import DistutilsPlatformError\nfrom dvc import VERSION\n\n\ninstall_requires = [\n \"ply>=3.9\", # See https://github.com/pyinstaller/pyinstaller/issues/1945\n \"configparser>=3.5.0\",\n \"zc.lockfile>=1.2.1\",\n \"future>=0.16.0\",\n \"colorama>=0.3.9\",\n \"configobj>=5.0.6\",\n \"networkx==2.1\",\n \"pyyaml>=3.12\",\n \"gitpython>=2.1.8\",\n \"ntfsutils>=0.1.4\",\n \"setuptools>=34.0.0\",\n \"nanotime>=0.5.2\",\n \"pyasn1>=0.4.1\",\n \"schema>=0.6.7\",\n \"jsonpath-rw==1.4.0\",\n \"reflink==0.2.0\",\n \"requests>=2.18.4\",\n 'futures; python_version == \"2.7\"',\n]\n\n# Extra dependencies for remote integrations\ngs = [\n \"google-cloud==0.32.0\",\n]\ns3 = [\n \"boto3==1.7.4\",\n]\nazure = [\n \"azure-storage-blob==1.3.0\"\n]\nssh = [\n \"paramiko>=2.4.1\",\n]\nall_remotes = gs + s3 + azure + ssh\n\nsetup(\n name='dvc',\n version=VERSION,\n description='Git for data scientists - manage your code and data together',\n long_description=open('README.rst', 'r').read(),\n author='Dmitry Petrov',\n author_email='[email protected]',\n download_url='https://github.com/iterative/dvc',\n license='Apache License 2.0',\n install_requires=install_requires,\n extras_require={\n 'all': all_remotes,\n 'gs': gs,\n 's3': s3,\n 'azure': azure,\n 'ssh': ssh,\n },\n keywords='data science, data version control, machine learning',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n ],\n packages=find_packages(exclude=['bin', 'tests', 'functests']),\n include_package_data=True,\n url='http://dataversioncontrol.com',\n entry_points={\n 'console_scripts': ['dvc = dvc.main:main']\n },\n zip_safe=False\n)\n", "path": "setup.py"}]}
| 1,655 | 134 |
gh_patches_debug_8904
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3852
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Azure - c7n-Mailer Errors
About 50% of the time mailer runs, the following error results and messages aren't picked up, delivered:
```
Traceback (most recent call last):
File "/usr/local/bin/c7n-mailer", line 10, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/c7n_mailer/cli.py", line 227, in main
processor.run()
File "/usr/local/lib/python3.7/site-packages/c7n_mailer/azure/azure_queue_processor.py", line 62, in run
if (self.process_azure_queue_message(queue_message) or
File "/usr/local/lib/python3.7/site-packages/c7n_mailer/azure/azure_queue_processor.py", line 89, in process_azure_queue_message
SendGridDelivery(self.config, self.logger))
File "/usr/local/lib/python3.7/site-packages/c7n_mailer/azure/sendgrid_delivery.py", line 29, in __init__
sendgrid.SendGridAPIClient(apikey=self.config.get('sendgrid_api_key', ''))
TypeError: __init__() got an unexpected keyword argument 'apikey'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py`
Content:
```
1 # Copyright 2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sendgrid
16 import six
17 from c7n_mailer.utils import (get_message_subject, get_rendered_jinja)
18 from c7n_mailer.utils_email import is_email
19 from python_http_client import exceptions
20 from sendgrid.helpers.mail import Email, Content, Mail
21
22
23 class SendGridDelivery(object):
24
25 def __init__(self, config, logger):
26 self.config = config
27 self.logger = logger
28 self.sendgrid_client = \
29 sendgrid.SendGridAPIClient(apikey=self.config.get('sendgrid_api_key', ''))
30
31 def get_to_addrs_sendgrid_messages_map(self, queue_message):
32 # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }
33 to_addrs_to_resources_map = self.get_email_to_addrs_to_resources_map(queue_message)
34
35 to_addrs_to_content_map = {}
36 for to_addrs, resources in six.iteritems(to_addrs_to_resources_map):
37 to_addrs_to_content_map[to_addrs] = self.get_message_content(
38 queue_message,
39 resources,
40 list(to_addrs)
41 )
42 # eg: { ('[email protected]', '[email protected]'): message }
43 return to_addrs_to_content_map
44
45 # this function returns a dictionary with a tuple of emails as the key
46 # and the list of resources as the value. This helps ensure minimal emails
47 # are sent, while only ever sending emails to the respective parties.
48 def get_email_to_addrs_to_resources_map(self, queue_message):
49 email_to_addrs_to_resources_map = {}
50 targets = queue_message['action']['to']
51
52 for resource in queue_message['resources']:
53 # this is the list of emails that will be sent for this resource
54 resource_emails = []
55
56 for target in targets:
57 if target.startswith('tag:') and 'tags' in resource:
58 tag_name = target.split(':', 1)[1]
59 result = resource.get('tags', {}).get(tag_name, None)
60 if is_email(result):
61 resource_emails.append(result)
62 elif is_email(target):
63 resource_emails.append(target)
64
65 resource_emails = tuple(sorted(set(resource_emails)))
66
67 if resource_emails:
68 email_to_addrs_to_resources_map.setdefault(resource_emails, []).append(resource)
69
70 if email_to_addrs_to_resources_map == {}:
71 self.logger.debug('Found no email addresses, sending no emails.')
72 # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }
73 return email_to_addrs_to_resources_map
74
75 def get_message_content(self, queue_message, resources, to_addrs):
76 return get_rendered_jinja(
77 to_addrs, queue_message, resources, self.logger,
78 'template', 'default', self.config['templates_folders'])
79
80 def sendgrid_handler(self, queue_message, to_addrs_to_email_messages_map):
81 self.logger.info("Sending account:%s policy:%s %s:%s email:%s to %s" % (
82 queue_message.get('account', ''),
83 queue_message['policy']['name'],
84 queue_message['policy']['resource'],
85 str(len(queue_message['resources'])),
86 queue_message['action'].get('template', 'default'),
87 to_addrs_to_email_messages_map))
88
89 from_email = Email(self.config.get('from_address', ''))
90 subject = get_message_subject(queue_message)
91 email_format = queue_message['action'].get('template_format', None)
92 if not email_format:
93 email_format = queue_message['action'].get(
94 'template', 'default').endswith('html') and 'html' or 'plain'
95
96 for email_to_addrs, email_content in six.iteritems(to_addrs_to_email_messages_map):
97 for to_address in email_to_addrs:
98 to_email = Email(to_address)
99 content = Content("text/" + email_format, email_content)
100 mail = Mail(from_email, subject, to_email, content)
101 try:
102 self.sendgrid_client.client.mail.send.post(request_body=mail.get())
103 except (exceptions.UnauthorizedError, exceptions.BadRequestsError) as e:
104 self.logger.warning(
105 "\n**Error \nPolicy:%s \nAccount:%s \nSending to:%s \n\nRequest body:"
106 "\n%s\n\nRequest headers:\n%s\n\n mailer.yml: %s" % (
107 queue_message['policy'],
108 queue_message.get('account', ''),
109 email_to_addrs,
110 e.body,
111 e.headers,
112 self.config
113 )
114 )
115 return False
116 return True
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py b/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py
--- a/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py
+++ b/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py
@@ -26,7 +26,7 @@
self.config = config
self.logger = logger
self.sendgrid_client = \
- sendgrid.SendGridAPIClient(apikey=self.config.get('sendgrid_api_key', ''))
+ sendgrid.SendGridAPIClient(self.config.get('sendgrid_api_key', ''))
def get_to_addrs_sendgrid_messages_map(self, queue_message):
# eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }
|
{"golden_diff": "diff --git a/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py b/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py\n--- a/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py\n+++ b/tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py\n@@ -26,7 +26,7 @@\n self.config = config\n self.logger = logger\n self.sendgrid_client = \\\n- sendgrid.SendGridAPIClient(apikey=self.config.get('sendgrid_api_key', ''))\n+ sendgrid.SendGridAPIClient(self.config.get('sendgrid_api_key', ''))\n \n def get_to_addrs_sendgrid_messages_map(self, queue_message):\n # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }\n", "issue": "Azure - c7n-Mailer Errors\nAbout 50% of the time mailer runs, the following error results and messages aren't picked up, delivered:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/c7n-mailer\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.7/site-packages/c7n_mailer/cli.py\", line 227, in main\r\n processor.run()\r\n File \"/usr/local/lib/python3.7/site-packages/c7n_mailer/azure/azure_queue_processor.py\", line 62, in run\r\n if (self.process_azure_queue_message(queue_message) or\r\n File \"/usr/local/lib/python3.7/site-packages/c7n_mailer/azure/azure_queue_processor.py\", line 89, in process_azure_queue_message\r\n SendGridDelivery(self.config, self.logger))\r\n File \"/usr/local/lib/python3.7/site-packages/c7n_mailer/azure/sendgrid_delivery.py\", line 29, in __init__\r\n sendgrid.SendGridAPIClient(apikey=self.config.get('sendgrid_api_key', ''))\r\nTypeError: __init__() got an unexpected keyword argument 'apikey'\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sendgrid\nimport six\nfrom c7n_mailer.utils import (get_message_subject, get_rendered_jinja)\nfrom c7n_mailer.utils_email import is_email\nfrom python_http_client import exceptions\nfrom sendgrid.helpers.mail import Email, Content, Mail\n\n\nclass SendGridDelivery(object):\n\n def __init__(self, config, logger):\n self.config = config\n self.logger = logger\n self.sendgrid_client = \\\n sendgrid.SendGridAPIClient(apikey=self.config.get('sendgrid_api_key', ''))\n\n def get_to_addrs_sendgrid_messages_map(self, queue_message):\n # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }\n to_addrs_to_resources_map = self.get_email_to_addrs_to_resources_map(queue_message)\n\n to_addrs_to_content_map = {}\n for to_addrs, resources in six.iteritems(to_addrs_to_resources_map):\n to_addrs_to_content_map[to_addrs] = self.get_message_content(\n queue_message,\n resources,\n list(to_addrs)\n )\n # eg: { ('[email protected]', '[email protected]'): message }\n return to_addrs_to_content_map\n\n # this function returns a dictionary with a tuple of emails as the key\n # and the list of resources as the value. This helps ensure minimal emails\n # are sent, while only ever sending emails to the respective parties.\n def get_email_to_addrs_to_resources_map(self, queue_message):\n email_to_addrs_to_resources_map = {}\n targets = queue_message['action']['to']\n\n for resource in queue_message['resources']:\n # this is the list of emails that will be sent for this resource\n resource_emails = []\n\n for target in targets:\n if target.startswith('tag:') and 'tags' in resource:\n tag_name = target.split(':', 1)[1]\n result = resource.get('tags', {}).get(tag_name, None)\n if is_email(result):\n resource_emails.append(result)\n elif is_email(target):\n resource_emails.append(target)\n\n resource_emails = tuple(sorted(set(resource_emails)))\n\n if resource_emails:\n email_to_addrs_to_resources_map.setdefault(resource_emails, []).append(resource)\n\n if email_to_addrs_to_resources_map == {}:\n self.logger.debug('Found no email addresses, sending no emails.')\n # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }\n return email_to_addrs_to_resources_map\n\n def get_message_content(self, queue_message, resources, to_addrs):\n return get_rendered_jinja(\n to_addrs, queue_message, resources, self.logger,\n 'template', 'default', self.config['templates_folders'])\n\n def sendgrid_handler(self, queue_message, to_addrs_to_email_messages_map):\n self.logger.info(\"Sending account:%s policy:%s %s:%s email:%s to %s\" % (\n queue_message.get('account', ''),\n queue_message['policy']['name'],\n queue_message['policy']['resource'],\n str(len(queue_message['resources'])),\n queue_message['action'].get('template', 'default'),\n to_addrs_to_email_messages_map))\n\n from_email = Email(self.config.get('from_address', ''))\n subject = get_message_subject(queue_message)\n email_format = queue_message['action'].get('template_format', None)\n if not email_format:\n email_format = queue_message['action'].get(\n 'template', 'default').endswith('html') and 'html' or 'plain'\n\n for email_to_addrs, email_content in six.iteritems(to_addrs_to_email_messages_map):\n for to_address in email_to_addrs:\n to_email = Email(to_address)\n content = Content(\"text/\" + email_format, email_content)\n mail = Mail(from_email, subject, to_email, content)\n try:\n self.sendgrid_client.client.mail.send.post(request_body=mail.get())\n except (exceptions.UnauthorizedError, exceptions.BadRequestsError) as e:\n self.logger.warning(\n \"\\n**Error \\nPolicy:%s \\nAccount:%s \\nSending to:%s \\n\\nRequest body:\"\n \"\\n%s\\n\\nRequest headers:\\n%s\\n\\n mailer.yml: %s\" % (\n queue_message['policy'],\n queue_message.get('account', ''),\n email_to_addrs,\n e.body,\n e.headers,\n self.config\n )\n )\n return False\n return True\n", "path": "tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sendgrid\nimport six\nfrom c7n_mailer.utils import (get_message_subject, get_rendered_jinja)\nfrom c7n_mailer.utils_email import is_email\nfrom python_http_client import exceptions\nfrom sendgrid.helpers.mail import Email, Content, Mail\n\n\nclass SendGridDelivery(object):\n\n def __init__(self, config, logger):\n self.config = config\n self.logger = logger\n self.sendgrid_client = \\\n sendgrid.SendGridAPIClient(self.config.get('sendgrid_api_key', ''))\n\n def get_to_addrs_sendgrid_messages_map(self, queue_message):\n # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }\n to_addrs_to_resources_map = self.get_email_to_addrs_to_resources_map(queue_message)\n\n to_addrs_to_content_map = {}\n for to_addrs, resources in six.iteritems(to_addrs_to_resources_map):\n to_addrs_to_content_map[to_addrs] = self.get_message_content(\n queue_message,\n resources,\n list(to_addrs)\n )\n # eg: { ('[email protected]', '[email protected]'): message }\n return to_addrs_to_content_map\n\n # this function returns a dictionary with a tuple of emails as the key\n # and the list of resources as the value. This helps ensure minimal emails\n # are sent, while only ever sending emails to the respective parties.\n def get_email_to_addrs_to_resources_map(self, queue_message):\n email_to_addrs_to_resources_map = {}\n targets = queue_message['action']['to']\n\n for resource in queue_message['resources']:\n # this is the list of emails that will be sent for this resource\n resource_emails = []\n\n for target in targets:\n if target.startswith('tag:') and 'tags' in resource:\n tag_name = target.split(':', 1)[1]\n result = resource.get('tags', {}).get(tag_name, None)\n if is_email(result):\n resource_emails.append(result)\n elif is_email(target):\n resource_emails.append(target)\n\n resource_emails = tuple(sorted(set(resource_emails)))\n\n if resource_emails:\n email_to_addrs_to_resources_map.setdefault(resource_emails, []).append(resource)\n\n if email_to_addrs_to_resources_map == {}:\n self.logger.debug('Found no email addresses, sending no emails.')\n # eg: { ('[email protected]', '[email protected]'): [resource1, resource2, etc] }\n return email_to_addrs_to_resources_map\n\n def get_message_content(self, queue_message, resources, to_addrs):\n return get_rendered_jinja(\n to_addrs, queue_message, resources, self.logger,\n 'template', 'default', self.config['templates_folders'])\n\n def sendgrid_handler(self, queue_message, to_addrs_to_email_messages_map):\n self.logger.info(\"Sending account:%s policy:%s %s:%s email:%s to %s\" % (\n queue_message.get('account', ''),\n queue_message['policy']['name'],\n queue_message['policy']['resource'],\n str(len(queue_message['resources'])),\n queue_message['action'].get('template', 'default'),\n to_addrs_to_email_messages_map))\n\n from_email = Email(self.config.get('from_address', ''))\n subject = get_message_subject(queue_message)\n email_format = queue_message['action'].get('template_format', None)\n if not email_format:\n email_format = queue_message['action'].get(\n 'template', 'default').endswith('html') and 'html' or 'plain'\n\n for email_to_addrs, email_content in six.iteritems(to_addrs_to_email_messages_map):\n for to_address in email_to_addrs:\n to_email = Email(to_address)\n content = Content(\"text/\" + email_format, email_content)\n mail = Mail(from_email, subject, to_email, content)\n try:\n self.sendgrid_client.client.mail.send.post(request_body=mail.get())\n except (exceptions.UnauthorizedError, exceptions.BadRequestsError) as e:\n self.logger.warning(\n \"\\n**Error \\nPolicy:%s \\nAccount:%s \\nSending to:%s \\n\\nRequest body:\"\n \"\\n%s\\n\\nRequest headers:\\n%s\\n\\n mailer.yml: %s\" % (\n queue_message['policy'],\n queue_message.get('account', ''),\n email_to_addrs,\n e.body,\n e.headers,\n self.config\n )\n )\n return False\n return True\n", "path": "tools/c7n_mailer/c7n_mailer/azure/sendgrid_delivery.py"}]}
| 1,901 | 198 |
gh_patches_debug_18314
|
rasdani/github-patches
|
git_diff
|
pymodbus-dev__pymodbus-532
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python modbus Unit decode
https://github.com/riptideio/pymodbus/blob/fbdc470ae3e138c50e3659ec4ec8ebf39df58936/pymodbus/client/asynchronous/twisted/__init__.py#L101
Always be 0 because all framers not return 'uid' but 'unit'
Create pythonpackage.yml
<!-- Please raise your PR's against the `dev` branch instead of `master` -->
Another typo length lenght
Apart the typos carried by my PR #480, I just noticed another one in pymodbus/framer/socket_framer.py (dev branch):
```
return dict(tid=tid, pid=pid, lenght=length, unit=uid, fcode=fcode)
```
Read RTU Holding Register through Serial Forwarder/TCP.
I have Energy Meter connected through RTU and able to get holding registers data through simple RTU Code.
Now i want to make Convert this RTU to TCP through Forwarder. I want to send data to TCP which forwards the command to RTU and fetches data for me.
I have implement the Forwarder code just dont know how to fetch the holding register of RTU through it.
**Code for Simple RTU Read**
> import pymodbus
> from pymodbus.pdu import ModbusRequest
> from pymodbus.client.sync import ModbusSerialClient as ModbusClient
> #initialize a serial RTU client instance
> from pymodbus.transaction import ModbusRtuFramer
>
> #count= the number of registers to read
> #unit= the slave unit this request is targeting
> #address= the starting address to read from
>
> client = ModbusClient(method = 'rtu', port='/dev/ttyUSB0', baudrate= 9600)
>
> #Connect to the serial modbus server
> connection = client.connect()
> print(connection)
>
> #Starting add, num of reg to read, slave unit.
> read = client.read_holding_registers(address = 0x01,count =2, unit=1)
> data = read.registers
>
> print(data)
>
> #Closes the underlying socket connection
> client.close()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Installs pymodbus using distutils
4
5 Run:
6 python setup.py install
7 to install the package from the source archive.
8
9 For information about setuptools
10 http://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords
11 """
12
13 # --------------------------------------------------------------------------- #
14 # initialization
15 # --------------------------------------------------------------------------- #
16 try: # if not installed, install and proceed
17 from setuptools import setup, find_packages
18 except ImportError:
19 from ez_setup import use_setuptools
20 use_setuptools()
21 from setuptools import setup, find_packages
22
23 try:
24 from setup_commands import command_classes
25 except ImportError:
26 command_classes={}
27 from pymodbus import __version__, __author__, __maintainer__
28
29 with open('requirements.txt') as reqs:
30 install_requires = [
31 line for line in reqs.read().split('\n')
32 if (line and not line.startswith('--'))
33 ]
34 install_requires.append("pyserial >= 3.4")
35 # --------------------------------------------------------------------------- #
36 # configuration
37 # --------------------------------------------------------------------------- #
38 setup(
39 name="pymodbus",
40 version=__version__,
41 description="A fully featured modbus protocol stack in python",
42 long_description="""
43 Pymodbus aims to be a fully implemented modbus protocol stack
44 implemented using twisted/asyncio/tornado.
45 Its orignal goal was to allow simulation of thousands of modbus devices
46 on a single machine for monitoring software testing.
47 """,
48 classifiers=[
49 'Development Status :: 4 - Beta',
50 'Environment :: Console',
51 'Environment :: X11 Applications :: GTK',
52 'Framework :: Twisted',
53 'Intended Audience :: Developers',
54 'License :: OSI Approved :: BSD License',
55 'Operating System :: POSIX :: Linux',
56 'Operating System :: Unix',
57 'Programming Language :: Python',
58 'Programming Language :: Python :: 3',
59 'Topic :: System :: Networking',
60 'Topic :: Utilities'
61 ],
62 keywords='modbus, twisted, scada',
63 author=__author__,
64 author_email='[email protected]',
65 maintainer=__maintainer__,
66 maintainer_email='[email protected]',
67 url='https://github.com/riptideio/pymodbus/',
68 license='BSD-3-Clause',
69 packages=find_packages(exclude=['examples', 'test']),
70 exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},
71 py_modules=['ez_setup'],
72 platforms=['Linux', 'Mac OS X', 'Win'],
73 include_package_data=True,
74 zip_safe=True,
75 install_requires=install_requires,
76 extras_require={
77 'quality': [
78 'coverage >= 3.5.3',
79 'nose >= 1.2.1',
80 'mock >= 1.0.0',
81 'pep8 >= 1.3.3'
82 ],
83 'documents': ['sphinx >= 1.1.3',
84 'sphinx_rtd_theme',
85 'humanfriendly'],
86 'twisted': [
87 'twisted >= 12.2.0',
88 'pyasn1 >= 0.1.4',
89 ],
90 'tornado': [
91 'tornado >= 4.5.3'
92 ],
93 'repl': [
94 'click>=6.7',
95 'prompt-toolkit==2.0.4',
96 'pygments==2.2.0'
97 ]
98 },
99 entry_points={
100 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],
101 },
102 test_suite='nose.collector',
103 cmdclass=command_classes,
104 )
105
106
```
Path: `pymodbus/version.py`
Content:
```
1 """
2 Handle the version information here; you should only have to
3 change the version tuple.
4
5 Since we are using twisted's version class, we can also query
6 the svn version as well using the local .entries file.
7 """
8
9
10 class Version(object):
11
12 def __init__(self, package, major, minor, micro, pre=None):
13 """
14
15 :param package: Name of the package that this is a version of.
16 :param major: The major version number.
17 :param minor: The minor version number.
18 :param micro: The micro version number.
19 :param pre: The pre release tag
20 """
21 self.package = package
22 self.major = major
23 self.minor = minor
24 self.micro = micro
25 self.pre = pre
26
27 def short(self):
28 """ Return a string in canonical short version format
29 <major>.<minor>.<micro>.<pre>
30 """
31 if self.pre:
32 return '%d.%d.%d.%s' % (self.major, self.minor, self.micro, self.pre)
33 else:
34 return '%d.%d.%d' % (self.major, self.minor, self.micro)
35
36 def __str__(self):
37 """ Returns a string representation of the object
38
39 :returns: A string representation of this object
40 """
41 return '[%s, version %s]' % (self.package, self.short())
42
43
44 version = Version('pymodbus', 2, 4, 0, 'rc2')
45
46
47
48 version.__name__ = 'pymodbus' # fix epydoc error
49
50 # --------------------------------------------------------------------------- #
51 # Exported symbols
52 # --------------------------------------------------------------------------- #
53
54 __all__ = ["version"]
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pymodbus/version.py b/pymodbus/version.py
--- a/pymodbus/version.py
+++ b/pymodbus/version.py
@@ -41,9 +41,7 @@
return '[%s, version %s]' % (self.package, self.short())
-version = Version('pymodbus', 2, 4, 0, 'rc2')
-
-
+version = Version('pymodbus', 2, 4, 0)
version.__name__ = 'pymodbus' # fix epydoc error
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -84,14 +84,14 @@
'sphinx_rtd_theme',
'humanfriendly'],
'twisted': [
- 'twisted >= 12.2.0',
+ 'twisted >= 20.3.0',
'pyasn1 >= 0.1.4',
],
'tornado': [
- 'tornado >= 4.5.3'
+ 'tornado == 4.5.3'
],
'repl': [
- 'click>=6.7',
+ 'click>=7.0',
'prompt-toolkit==2.0.4',
'pygments==2.2.0'
]
|
{"golden_diff": "diff --git a/pymodbus/version.py b/pymodbus/version.py\n--- a/pymodbus/version.py\n+++ b/pymodbus/version.py\n@@ -41,9 +41,7 @@\n return '[%s, version %s]' % (self.package, self.short())\n \n \n-version = Version('pymodbus', 2, 4, 0, 'rc2')\n-\n-\n+version = Version('pymodbus', 2, 4, 0)\n \n version.__name__ = 'pymodbus' # fix epydoc error\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -84,14 +84,14 @@\n 'sphinx_rtd_theme',\n 'humanfriendly'],\n 'twisted': [\n- 'twisted >= 12.2.0',\n+ 'twisted >= 20.3.0',\n 'pyasn1 >= 0.1.4',\n ],\n 'tornado': [\n- 'tornado >= 4.5.3'\n+ 'tornado == 4.5.3'\n ],\n 'repl': [\n- 'click>=6.7',\n+ 'click>=7.0',\n 'prompt-toolkit==2.0.4',\n 'pygments==2.2.0'\n ]\n", "issue": "Python modbus Unit decode\nhttps://github.com/riptideio/pymodbus/blob/fbdc470ae3e138c50e3659ec4ec8ebf39df58936/pymodbus/client/asynchronous/twisted/__init__.py#L101\r\n\r\nAlways be 0 because all framers not return 'uid' but 'unit'\r\n\nCreate pythonpackage.yml\n<!-- Please raise your PR's against the `dev` branch instead of `master` -->\r\n\nAnother typo length lenght\nApart the typos carried by my PR #480, I just noticed another one in pymodbus/framer/socket_framer.py (dev branch):\r\n```\r\nreturn dict(tid=tid, pid=pid, lenght=length, unit=uid, fcode=fcode)\r\n```\nRead RTU Holding Register through Serial Forwarder/TCP.\nI have Energy Meter connected through RTU and able to get holding registers data through simple RTU Code. \r\nNow i want to make Convert this RTU to TCP through Forwarder. I want to send data to TCP which forwards the command to RTU and fetches data for me.\r\n\r\nI have implement the Forwarder code just dont know how to fetch the holding register of RTU through it.\r\n\r\n**Code for Simple RTU Read**\r\n\r\n> import pymodbus\r\n> from pymodbus.pdu import ModbusRequest\r\n> from pymodbus.client.sync import ModbusSerialClient as ModbusClient \r\n> #initialize a serial RTU client instance\r\n> from pymodbus.transaction import ModbusRtuFramer\r\n> \r\n> #count= the number of registers to read\r\n> #unit= the slave unit this request is targeting\r\n> #address= the starting address to read from\r\n> \r\n> client = ModbusClient(method = 'rtu', port='/dev/ttyUSB0', baudrate= 9600)\r\n> \r\n> #Connect to the serial modbus server\r\n> connection = client.connect()\r\n> print(connection)\r\n> \r\n> #Starting add, num of reg to read, slave unit.\r\n> read = client.read_holding_registers(address = 0x01,count =2, unit=1)\r\n> data = read.registers\r\n> \r\n> print(data)\r\n> \r\n> #Closes the underlying socket connection\r\n> client.close()\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls pymodbus using distutils\n\nRun:\n python setup.py install\nto install the package from the source archive.\n\nFor information about setuptools\nhttp://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords\n\"\"\"\n\n# --------------------------------------------------------------------------- #\n# initialization\n# --------------------------------------------------------------------------- #\ntry: # if not installed, install and proceed\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\ntry:\n from setup_commands import command_classes\nexcept ImportError:\n command_classes={}\nfrom pymodbus import __version__, __author__, __maintainer__\n\nwith open('requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n')\n if (line and not line.startswith('--'))\n ]\n install_requires.append(\"pyserial >= 3.4\")\n# --------------------------------------------------------------------------- #\n# configuration\n# --------------------------------------------------------------------------- #\nsetup(\n name=\"pymodbus\",\n version=__version__,\n description=\"A fully featured modbus protocol stack in python\",\n long_description=\"\"\"\n Pymodbus aims to be a fully implemented modbus protocol stack\n implemented using twisted/asyncio/tornado.\n Its orignal goal was to allow simulation of thousands of modbus devices\n on a single machine for monitoring software testing.\n \"\"\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: X11 Applications :: GTK',\n 'Framework :: Twisted',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Networking',\n 'Topic :: Utilities'\n ],\n keywords='modbus, twisted, scada',\n author=__author__,\n author_email='[email protected]',\n maintainer=__maintainer__,\n maintainer_email='[email protected]',\n url='https://github.com/riptideio/pymodbus/',\n license='BSD-3-Clause',\n packages=find_packages(exclude=['examples', 'test']),\n exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},\n py_modules=['ez_setup'],\n platforms=['Linux', 'Mac OS X', 'Win'],\n include_package_data=True,\n zip_safe=True,\n install_requires=install_requires,\n extras_require={\n 'quality': [\n 'coverage >= 3.5.3',\n 'nose >= 1.2.1',\n 'mock >= 1.0.0',\n 'pep8 >= 1.3.3'\n ],\n 'documents': ['sphinx >= 1.1.3',\n 'sphinx_rtd_theme',\n 'humanfriendly'],\n 'twisted': [\n 'twisted >= 12.2.0',\n 'pyasn1 >= 0.1.4',\n ],\n 'tornado': [\n 'tornado >= 4.5.3'\n ],\n 'repl': [\n 'click>=6.7',\n 'prompt-toolkit==2.0.4',\n 'pygments==2.2.0'\n ]\n },\n entry_points={\n 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],\n },\n test_suite='nose.collector',\n cmdclass=command_classes,\n)\n\n", "path": "setup.py"}, {"content": "\"\"\"\nHandle the version information here; you should only have to\nchange the version tuple.\n\nSince we are using twisted's version class, we can also query\nthe svn version as well using the local .entries file.\n\"\"\"\n\n\nclass Version(object):\n\n def __init__(self, package, major, minor, micro, pre=None):\n \"\"\"\n\n :param package: Name of the package that this is a version of.\n :param major: The major version number.\n :param minor: The minor version number.\n :param micro: The micro version number.\n :param pre: The pre release tag\n \"\"\"\n self.package = package\n self.major = major\n self.minor = minor\n self.micro = micro\n self.pre = pre\n\n def short(self):\n \"\"\" Return a string in canonical short version format\n <major>.<minor>.<micro>.<pre>\n \"\"\"\n if self.pre:\n return '%d.%d.%d.%s' % (self.major, self.minor, self.micro, self.pre)\n else:\n return '%d.%d.%d' % (self.major, self.minor, self.micro)\n\n def __str__(self):\n \"\"\" Returns a string representation of the object\n\n :returns: A string representation of this object\n \"\"\"\n return '[%s, version %s]' % (self.package, self.short())\n\n\nversion = Version('pymodbus', 2, 4, 0, 'rc2')\n\n\n\nversion.__name__ = 'pymodbus' # fix epydoc error\n\n# --------------------------------------------------------------------------- #\n# Exported symbols\n# --------------------------------------------------------------------------- #\n\n__all__ = [\"version\"]\n", "path": "pymodbus/version.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls pymodbus using distutils\n\nRun:\n python setup.py install\nto install the package from the source archive.\n\nFor information about setuptools\nhttp://peak.telecommunity.com/DevCenter/setuptools#new-and-changed-setup-keywords\n\"\"\"\n\n# --------------------------------------------------------------------------- #\n# initialization\n# --------------------------------------------------------------------------- #\ntry: # if not installed, install and proceed\n from setuptools import setup, find_packages\nexcept ImportError:\n from ez_setup import use_setuptools\n use_setuptools()\n from setuptools import setup, find_packages\n\ntry:\n from setup_commands import command_classes\nexcept ImportError:\n command_classes={}\nfrom pymodbus import __version__, __author__, __maintainer__\n\nwith open('requirements.txt') as reqs:\n install_requires = [\n line for line in reqs.read().split('\\n')\n if (line and not line.startswith('--'))\n ]\n install_requires.append(\"pyserial >= 3.4\")\n# --------------------------------------------------------------------------- #\n# configuration\n# --------------------------------------------------------------------------- #\nsetup(\n name=\"pymodbus\",\n version=__version__,\n description=\"A fully featured modbus protocol stack in python\",\n long_description=\"\"\"\n Pymodbus aims to be a fully implemented modbus protocol stack\n implemented using twisted/asyncio/tornado.\n Its orignal goal was to allow simulation of thousands of modbus devices\n on a single machine for monitoring software testing.\n \"\"\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Environment :: X11 Applications :: GTK',\n 'Framework :: Twisted',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: Unix',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Networking',\n 'Topic :: Utilities'\n ],\n keywords='modbus, twisted, scada',\n author=__author__,\n author_email='[email protected]',\n maintainer=__maintainer__,\n maintainer_email='[email protected]',\n url='https://github.com/riptideio/pymodbus/',\n license='BSD-3-Clause',\n packages=find_packages(exclude=['examples', 'test']),\n exclude_package_data={'': ['examples', 'test', 'tools', 'doc']},\n py_modules=['ez_setup'],\n platforms=['Linux', 'Mac OS X', 'Win'],\n include_package_data=True,\n zip_safe=True,\n install_requires=install_requires,\n extras_require={\n 'quality': [\n 'coverage >= 3.5.3',\n 'nose >= 1.2.1',\n 'mock >= 1.0.0',\n 'pep8 >= 1.3.3'\n ],\n 'documents': ['sphinx >= 1.1.3',\n 'sphinx_rtd_theme',\n 'humanfriendly'],\n 'twisted': [\n 'twisted >= 20.3.0',\n 'pyasn1 >= 0.1.4',\n ],\n 'tornado': [\n 'tornado == 4.5.3'\n ],\n 'repl': [\n 'click>=7.0',\n 'prompt-toolkit==2.0.4',\n 'pygments==2.2.0'\n ]\n },\n entry_points={\n 'console_scripts': ['pymodbus.console=pymodbus.repl.main:main'],\n },\n test_suite='nose.collector',\n cmdclass=command_classes,\n)\n\n", "path": "setup.py"}, {"content": "\"\"\"\nHandle the version information here; you should only have to\nchange the version tuple.\n\nSince we are using twisted's version class, we can also query\nthe svn version as well using the local .entries file.\n\"\"\"\n\n\nclass Version(object):\n\n def __init__(self, package, major, minor, micro, pre=None):\n \"\"\"\n\n :param package: Name of the package that this is a version of.\n :param major: The major version number.\n :param minor: The minor version number.\n :param micro: The micro version number.\n :param pre: The pre release tag\n \"\"\"\n self.package = package\n self.major = major\n self.minor = minor\n self.micro = micro\n self.pre = pre\n\n def short(self):\n \"\"\" Return a string in canonical short version format\n <major>.<minor>.<micro>.<pre>\n \"\"\"\n if self.pre:\n return '%d.%d.%d.%s' % (self.major, self.minor, self.micro, self.pre)\n else:\n return '%d.%d.%d' % (self.major, self.minor, self.micro)\n\n def __str__(self):\n \"\"\" Returns a string representation of the object\n\n :returns: A string representation of this object\n \"\"\"\n return '[%s, version %s]' % (self.package, self.short())\n\n\nversion = Version('pymodbus', 2, 4, 0)\n\nversion.__name__ = 'pymodbus' # fix epydoc error\n\n# --------------------------------------------------------------------------- #\n# Exported symbols\n# --------------------------------------------------------------------------- #\n\n__all__ = [\"version\"]\n", "path": "pymodbus/version.py"}]}
| 2,222 | 309 |
gh_patches_debug_59175
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-2832
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
icnet 存在的几个问题
[icnet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/icnet)里存在诸多问题:
1.文档有误,--model_path="./cnkpnt/100"应该是--model_path="./chkpnt/100"
2.训练时没有输出中间过程信息,仅在最后输出几个loss信息
3.文档中给的预训练模型无法用于infer,能提供下训好的cnkpnt/100模型吗?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PaddleCV/icnet/train.py`
Content:
```
1 """Trainer for ICNet model."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5 from icnet import icnet
6 import cityscape
7 import argparse
8 import functools
9 import sys
10 import os
11 import time
12 import paddle.fluid as fluid
13 import numpy as np
14 from utils import add_arguments, print_arguments, get_feeder_data, check_gpu
15 from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter
16 from paddle.fluid.initializer import init_on_cpu
17
18 if 'ce_mode' in os.environ:
19 np.random.seed(10)
20 fluid.default_startup_program().random_seed = 90
21
22 parser = argparse.ArgumentParser(description=__doc__)
23 add_arg = functools.partial(add_arguments, argparser=parser)
24 # yapf: disable
25 add_arg('batch_size', int, 16, "Minibatch size.")
26 add_arg('checkpoint_path', str, None, "Checkpoint svae path.")
27 add_arg('init_model', str, None, "Pretrain model path.")
28 add_arg('use_gpu', bool, True, "Whether use GPU to train.")
29 add_arg('random_mirror', bool, True, "Whether prepare by random mirror.")
30 add_arg('random_scaling', bool, True, "Whether prepare by random scaling.")
31 # yapf: enable
32
33 LAMBDA1 = 0.16
34 LAMBDA2 = 0.4
35 LAMBDA3 = 1.0
36 LEARNING_RATE = 0.003
37 POWER = 0.9
38 LOG_PERIOD = 100
39 CHECKPOINT_PERIOD = 100
40 TOTAL_STEP = 100
41
42 no_grad_set = []
43
44
45 def create_loss(predict, label, mask, num_classes):
46 predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])
47 predict = fluid.layers.reshape(predict, shape=[-1, num_classes])
48 label = fluid.layers.reshape(label, shape=[-1, 1])
49 predict = fluid.layers.gather(predict, mask)
50 label = fluid.layers.gather(label, mask)
51 label = fluid.layers.cast(label, dtype="int64")
52 loss = fluid.layers.softmax_with_cross_entropy(predict, label)
53 no_grad_set.append(label.name)
54 return fluid.layers.reduce_mean(loss)
55
56
57 def poly_decay():
58 global_step = _decay_step_counter()
59 with init_on_cpu():
60 decayed_lr = LEARNING_RATE * (fluid.layers.pow(
61 (1 - global_step / TOTAL_STEP), POWER))
62 return decayed_lr
63
64
65 def train(args):
66 data_shape = cityscape.train_data_shape()
67 num_classes = cityscape.num_classes()
68 # define network
69 images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')
70 label_sub1 = fluid.layers.data(name='label_sub1', shape=[1], dtype='int32')
71 label_sub2 = fluid.layers.data(name='label_sub2', shape=[1], dtype='int32')
72 label_sub4 = fluid.layers.data(name='label_sub4', shape=[1], dtype='int32')
73 mask_sub1 = fluid.layers.data(name='mask_sub1', shape=[-1], dtype='int32')
74 mask_sub2 = fluid.layers.data(name='mask_sub2', shape=[-1], dtype='int32')
75 mask_sub4 = fluid.layers.data(name='mask_sub4', shape=[-1], dtype='int32')
76
77 sub4_out, sub24_out, sub124_out = icnet(
78 images, num_classes, np.array(data_shape[1:]).astype("float32"))
79 loss_sub4 = create_loss(sub4_out, label_sub4, mask_sub4, num_classes)
80 loss_sub24 = create_loss(sub24_out, label_sub2, mask_sub2, num_classes)
81 loss_sub124 = create_loss(sub124_out, label_sub1, mask_sub1, num_classes)
82 reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124
83
84 regularizer = fluid.regularizer.L2Decay(0.0001)
85 optimizer = fluid.optimizer.Momentum(
86 learning_rate=poly_decay(), momentum=0.9, regularization=regularizer)
87 _, params_grads = optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)
88
89 # prepare environment
90 place = fluid.CPUPlace()
91 if args.use_gpu:
92 place = fluid.CUDAPlace(0)
93 exe = fluid.Executor(place)
94
95 exe.run(fluid.default_startup_program())
96
97 if args.init_model is not None:
98 print("load model from: %s" % args.init_model)
99
100 def if_exist(var):
101 return os.path.exists(os.path.join(args.init_model, var.name))
102
103 fluid.io.load_vars(exe, args.init_model, predicate=if_exist)
104
105 iter_id = 0
106 t_loss = 0.
107 sub4_loss = 0.
108 sub24_loss = 0.
109 sub124_loss = 0.
110 train_reader = cityscape.train(
111 args.batch_size, flip=args.random_mirror, scaling=args.random_scaling)
112 start_time = time.time()
113 while True:
114 # train a pass
115 for data in train_reader():
116 if iter_id > TOTAL_STEP:
117 end_time = time.time()
118 print("kpis train_duration %f" % (end_time - start_time))
119 return
120 iter_id += 1
121 results = exe.run(
122 feed=get_feeder_data(data, place),
123 fetch_list=[reduced_loss, loss_sub4, loss_sub24, loss_sub124])
124 t_loss += results[0]
125 sub4_loss += results[1]
126 sub24_loss += results[2]
127 sub124_loss += results[3]
128 # training log
129 if iter_id % LOG_PERIOD == 0:
130 print(
131 "Iter[%d]; train loss: %.3f; sub4_loss: %.3f; sub24_loss: %.3f; sub124_loss: %.3f"
132 % (iter_id, t_loss / LOG_PERIOD, sub4_loss / LOG_PERIOD,
133 sub24_loss / LOG_PERIOD, sub124_loss / LOG_PERIOD))
134 print("kpis train_cost %f" % (t_loss / LOG_PERIOD))
135
136 t_loss = 0.
137 sub4_loss = 0.
138 sub24_loss = 0.
139 sub124_loss = 0.
140 sys.stdout.flush()
141
142 if iter_id % CHECKPOINT_PERIOD == 0 and args.checkpoint_path is not None:
143 dir_name = args.checkpoint_path + "/" + str(iter_id)
144 fluid.io.save_persistables(exe, dirname=dir_name)
145 print("Saved checkpoint: %s" % (dir_name))
146
147
148 def main():
149 args = parser.parse_args()
150 print_arguments(args)
151 check_gpu(args.use_gpu)
152 train(args)
153
154
155 if __name__ == "__main__":
156 main()
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PaddleCV/icnet/train.py b/PaddleCV/icnet/train.py
--- a/PaddleCV/icnet/train.py
+++ b/PaddleCV/icnet/train.py
@@ -35,9 +35,11 @@
LAMBDA3 = 1.0
LEARNING_RATE = 0.003
POWER = 0.9
-LOG_PERIOD = 100
-CHECKPOINT_PERIOD = 100
-TOTAL_STEP = 100
+LOG_PERIOD = 1
+CHECKPOINT_PERIOD = 1000
+TOTAL_STEP = 60000
+if 'ce_mode' in os.environ:
+ TOTAL_STEP = 100
no_grad_set = []
|
{"golden_diff": "diff --git a/PaddleCV/icnet/train.py b/PaddleCV/icnet/train.py\n--- a/PaddleCV/icnet/train.py\n+++ b/PaddleCV/icnet/train.py\n@@ -35,9 +35,11 @@\n LAMBDA3 = 1.0\n LEARNING_RATE = 0.003\n POWER = 0.9\n-LOG_PERIOD = 100\n-CHECKPOINT_PERIOD = 100\n-TOTAL_STEP = 100\n+LOG_PERIOD = 1\n+CHECKPOINT_PERIOD = 1000\n+TOTAL_STEP = 60000\n+if 'ce_mode' in os.environ:\n+ TOTAL_STEP = 100\n \n no_grad_set = []\n", "issue": "icnet \u5b58\u5728\u7684\u51e0\u4e2a\u95ee\u9898\n[icnet](https://github.com/PaddlePaddle/models/tree/develop/PaddleCV/icnet)\u91cc\u5b58\u5728\u8bf8\u591a\u95ee\u9898:\r\n1.\u6587\u6863\u6709\u8bef\uff0c--model_path=\"./cnkpnt/100\"\u5e94\u8be5\u662f--model_path=\"./chkpnt/100\"\r\n2.\u8bad\u7ec3\u65f6\u6ca1\u6709\u8f93\u51fa\u4e2d\u95f4\u8fc7\u7a0b\u4fe1\u606f\uff0c\u4ec5\u5728\u6700\u540e\u8f93\u51fa\u51e0\u4e2aloss\u4fe1\u606f\r\n3.\u6587\u6863\u4e2d\u7ed9\u7684\u9884\u8bad\u7ec3\u6a21\u578b\u65e0\u6cd5\u7528\u4e8einfer\uff0c\u80fd\u63d0\u4f9b\u4e0b\u8bad\u597d\u7684cnkpnt/100\u6a21\u578b\u5417\uff1f\n", "before_files": [{"content": "\"\"\"Trainer for ICNet model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom icnet import icnet\nimport cityscape\nimport argparse\nimport functools\nimport sys\nimport os\nimport time\nimport paddle.fluid as fluid\nimport numpy as np\nfrom utils import add_arguments, print_arguments, get_feeder_data, check_gpu\nfrom paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter\nfrom paddle.fluid.initializer import init_on_cpu\n\nif 'ce_mode' in os.environ:\n np.random.seed(10)\n fluid.default_startup_program().random_seed = 90\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('batch_size', int, 16, \"Minibatch size.\")\nadd_arg('checkpoint_path', str, None, \"Checkpoint svae path.\")\nadd_arg('init_model', str, None, \"Pretrain model path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to train.\")\nadd_arg('random_mirror', bool, True, \"Whether prepare by random mirror.\")\nadd_arg('random_scaling', bool, True, \"Whether prepare by random scaling.\")\n# yapf: enable\n\nLAMBDA1 = 0.16\nLAMBDA2 = 0.4\nLAMBDA3 = 1.0\nLEARNING_RATE = 0.003\nPOWER = 0.9\nLOG_PERIOD = 100\nCHECKPOINT_PERIOD = 100\nTOTAL_STEP = 100\n\nno_grad_set = []\n\n\ndef create_loss(predict, label, mask, num_classes):\n predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])\n predict = fluid.layers.reshape(predict, shape=[-1, num_classes])\n label = fluid.layers.reshape(label, shape=[-1, 1])\n predict = fluid.layers.gather(predict, mask)\n label = fluid.layers.gather(label, mask)\n label = fluid.layers.cast(label, dtype=\"int64\")\n loss = fluid.layers.softmax_with_cross_entropy(predict, label)\n no_grad_set.append(label.name)\n return fluid.layers.reduce_mean(loss)\n\n\ndef poly_decay():\n global_step = _decay_step_counter()\n with init_on_cpu():\n decayed_lr = LEARNING_RATE * (fluid.layers.pow(\n (1 - global_step / TOTAL_STEP), POWER))\n return decayed_lr\n\n\ndef train(args):\n data_shape = cityscape.train_data_shape()\n num_classes = cityscape.num_classes()\n # define network\n images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')\n label_sub1 = fluid.layers.data(name='label_sub1', shape=[1], dtype='int32')\n label_sub2 = fluid.layers.data(name='label_sub2', shape=[1], dtype='int32')\n label_sub4 = fluid.layers.data(name='label_sub4', shape=[1], dtype='int32')\n mask_sub1 = fluid.layers.data(name='mask_sub1', shape=[-1], dtype='int32')\n mask_sub2 = fluid.layers.data(name='mask_sub2', shape=[-1], dtype='int32')\n mask_sub4 = fluid.layers.data(name='mask_sub4', shape=[-1], dtype='int32')\n\n sub4_out, sub24_out, sub124_out = icnet(\n images, num_classes, np.array(data_shape[1:]).astype(\"float32\"))\n loss_sub4 = create_loss(sub4_out, label_sub4, mask_sub4, num_classes)\n loss_sub24 = create_loss(sub24_out, label_sub2, mask_sub2, num_classes)\n loss_sub124 = create_loss(sub124_out, label_sub1, mask_sub1, num_classes)\n reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124\n\n regularizer = fluid.regularizer.L2Decay(0.0001)\n optimizer = fluid.optimizer.Momentum(\n learning_rate=poly_decay(), momentum=0.9, regularization=regularizer)\n _, params_grads = optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)\n\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n if args.init_model is not None:\n print(\"load model from: %s\" % args.init_model)\n\n def if_exist(var):\n return os.path.exists(os.path.join(args.init_model, var.name))\n\n fluid.io.load_vars(exe, args.init_model, predicate=if_exist)\n\n iter_id = 0\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n train_reader = cityscape.train(\n args.batch_size, flip=args.random_mirror, scaling=args.random_scaling)\n start_time = time.time()\n while True:\n # train a pass\n for data in train_reader():\n if iter_id > TOTAL_STEP:\n end_time = time.time()\n print(\"kpis\ttrain_duration\t%f\" % (end_time - start_time))\n return\n iter_id += 1\n results = exe.run(\n feed=get_feeder_data(data, place),\n fetch_list=[reduced_loss, loss_sub4, loss_sub24, loss_sub124])\n t_loss += results[0]\n sub4_loss += results[1]\n sub24_loss += results[2]\n sub124_loss += results[3]\n # training log\n if iter_id % LOG_PERIOD == 0:\n print(\n \"Iter[%d]; train loss: %.3f; sub4_loss: %.3f; sub24_loss: %.3f; sub124_loss: %.3f\"\n % (iter_id, t_loss / LOG_PERIOD, sub4_loss / LOG_PERIOD,\n sub24_loss / LOG_PERIOD, sub124_loss / LOG_PERIOD))\n print(\"kpis\ttrain_cost\t%f\" % (t_loss / LOG_PERIOD))\n\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n sys.stdout.flush()\n\n if iter_id % CHECKPOINT_PERIOD == 0 and args.checkpoint_path is not None:\n dir_name = args.checkpoint_path + \"/\" + str(iter_id)\n fluid.io.save_persistables(exe, dirname=dir_name)\n print(\"Saved checkpoint: %s\" % (dir_name))\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n check_gpu(args.use_gpu)\n train(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/icnet/train.py"}], "after_files": [{"content": "\"\"\"Trainer for ICNet model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom icnet import icnet\nimport cityscape\nimport argparse\nimport functools\nimport sys\nimport os\nimport time\nimport paddle.fluid as fluid\nimport numpy as np\nfrom utils import add_arguments, print_arguments, get_feeder_data, check_gpu\nfrom paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter\nfrom paddle.fluid.initializer import init_on_cpu\n\nif 'ce_mode' in os.environ:\n np.random.seed(10)\n fluid.default_startup_program().random_seed = 90\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('batch_size', int, 16, \"Minibatch size.\")\nadd_arg('checkpoint_path', str, None, \"Checkpoint svae path.\")\nadd_arg('init_model', str, None, \"Pretrain model path.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to train.\")\nadd_arg('random_mirror', bool, True, \"Whether prepare by random mirror.\")\nadd_arg('random_scaling', bool, True, \"Whether prepare by random scaling.\")\n# yapf: enable\n\nLAMBDA1 = 0.16\nLAMBDA2 = 0.4\nLAMBDA3 = 1.0\nLEARNING_RATE = 0.003\nPOWER = 0.9\nLOG_PERIOD = 1\nCHECKPOINT_PERIOD = 1000\nTOTAL_STEP = 60000\nif 'ce_mode' in os.environ:\n TOTAL_STEP = 100\n\nno_grad_set = []\n\n\ndef create_loss(predict, label, mask, num_classes):\n predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])\n predict = fluid.layers.reshape(predict, shape=[-1, num_classes])\n label = fluid.layers.reshape(label, shape=[-1, 1])\n predict = fluid.layers.gather(predict, mask)\n label = fluid.layers.gather(label, mask)\n label = fluid.layers.cast(label, dtype=\"int64\")\n loss = fluid.layers.softmax_with_cross_entropy(predict, label)\n no_grad_set.append(label.name)\n return fluid.layers.reduce_mean(loss)\n\n\ndef poly_decay():\n global_step = _decay_step_counter()\n with init_on_cpu():\n decayed_lr = LEARNING_RATE * (fluid.layers.pow(\n (1 - global_step / TOTAL_STEP), POWER))\n return decayed_lr\n\n\ndef train(args):\n data_shape = cityscape.train_data_shape()\n num_classes = cityscape.num_classes()\n # define network\n images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')\n label_sub1 = fluid.layers.data(name='label_sub1', shape=[1], dtype='int32')\n label_sub2 = fluid.layers.data(name='label_sub2', shape=[1], dtype='int32')\n label_sub4 = fluid.layers.data(name='label_sub4', shape=[1], dtype='int32')\n mask_sub1 = fluid.layers.data(name='mask_sub1', shape=[-1], dtype='int32')\n mask_sub2 = fluid.layers.data(name='mask_sub2', shape=[-1], dtype='int32')\n mask_sub4 = fluid.layers.data(name='mask_sub4', shape=[-1], dtype='int32')\n\n sub4_out, sub24_out, sub124_out = icnet(\n images, num_classes, np.array(data_shape[1:]).astype(\"float32\"))\n loss_sub4 = create_loss(sub4_out, label_sub4, mask_sub4, num_classes)\n loss_sub24 = create_loss(sub24_out, label_sub2, mask_sub2, num_classes)\n loss_sub124 = create_loss(sub124_out, label_sub1, mask_sub1, num_classes)\n reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124\n\n regularizer = fluid.regularizer.L2Decay(0.0001)\n optimizer = fluid.optimizer.Momentum(\n learning_rate=poly_decay(), momentum=0.9, regularization=regularizer)\n _, params_grads = optimizer.minimize(reduced_loss, no_grad_set=no_grad_set)\n\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n if args.init_model is not None:\n print(\"load model from: %s\" % args.init_model)\n\n def if_exist(var):\n return os.path.exists(os.path.join(args.init_model, var.name))\n\n fluid.io.load_vars(exe, args.init_model, predicate=if_exist)\n\n iter_id = 0\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n train_reader = cityscape.train(\n args.batch_size, flip=args.random_mirror, scaling=args.random_scaling)\n start_time = time.time()\n while True:\n # train a pass\n for data in train_reader():\n if iter_id > TOTAL_STEP:\n end_time = time.time()\n print(\"kpis\ttrain_duration\t%f\" % (end_time - start_time))\n return\n iter_id += 1\n results = exe.run(\n feed=get_feeder_data(data, place),\n fetch_list=[reduced_loss, loss_sub4, loss_sub24, loss_sub124])\n t_loss += results[0]\n sub4_loss += results[1]\n sub24_loss += results[2]\n sub124_loss += results[3]\n # training log\n if iter_id % LOG_PERIOD == 0:\n print(\n \"Iter[%d]; train loss: %.3f; sub4_loss: %.3f; sub24_loss: %.3f; sub124_loss: %.3f\"\n % (iter_id, t_loss / LOG_PERIOD, sub4_loss / LOG_PERIOD,\n sub24_loss / LOG_PERIOD, sub124_loss / LOG_PERIOD))\n print(\"kpis\ttrain_cost\t%f\" % (t_loss / LOG_PERIOD))\n\n t_loss = 0.\n sub4_loss = 0.\n sub24_loss = 0.\n sub124_loss = 0.\n sys.stdout.flush()\n\n if iter_id % CHECKPOINT_PERIOD == 0 and args.checkpoint_path is not None:\n dir_name = args.checkpoint_path + \"/\" + str(iter_id)\n fluid.io.save_persistables(exe, dirname=dir_name)\n print(\"Saved checkpoint: %s\" % (dir_name))\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n check_gpu(args.use_gpu)\n train(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/icnet/train.py"}]}
| 2,291 | 163 |
gh_patches_debug_25861
|
rasdani/github-patches
|
git_diff
|
OpenNMT__OpenNMT-tf-503
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
How to support PPL
Can you add the PPL measurement?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/evaluation.py`
Content:
```
1 """Evaluation related classes and functions."""
2
3 import collections
4 import os
5 import six
6
7 import tensorflow as tf
8
9 from opennmt.data import dataset as dataset_lib
10 from opennmt.utils import misc
11 from opennmt.utils import scorers as scorers_lib
12
13
14 _SUMMARIES_SCOPE = "metrics"
15
16
17 class EarlyStopping(
18 collections.namedtuple("EarlyStopping",
19 ("metric", "min_improvement", "steps"))):
20 """Conditions for early stopping."""
21
22
23 class Evaluator(object):
24 """Model evaluator."""
25
26 def __init__(self,
27 model,
28 features_file,
29 labels_file,
30 batch_size,
31 scorers=None,
32 save_predictions=False,
33 early_stopping=None,
34 eval_dir=None):
35 """Initializes the evaluator.
36
37 Args:
38 model: A :class:`opennmt.models.model.Model` to evaluate.
39 features_file: Path to the evaluation features.
40 labels_file: Path to the evaluation labels.
41 batch_size: The evaluation batch size.
42 scorers: A list of scorers, callables taking the path to the reference and
43 the hypothesis and return one or more scores.
44 save_predictions: Save evaluation predictions to a file. This is ``True``
45 when :obj:`external_evaluator` is set.
46 early_stopping: An ``EarlyStopping`` instance.
47 eval_dir: Directory where predictions can be saved.
48
49 Raises:
50 ValueError: If predictions should be saved but the model is not compatible.
51 ValueError: If predictions should be saved but :obj:`eval_dir` is ``None``.
52 ValueError: If the :obj:`early_stopping` configuration is invalid.
53 """
54 if scorers is None:
55 scorers = []
56 if scorers:
57 save_predictions = True
58 if save_predictions:
59 if model.unsupervised:
60 raise ValueError("This model does not support saving evaluation predictions")
61 if eval_dir is None:
62 raise ValueError("Saving evaluation predictions requires eval_dir to be set")
63 if not tf.io.gfile.exists(eval_dir):
64 tf.io.gfile.makedirs(eval_dir)
65 self._model = model
66 self._labels_file = labels_file
67 self._save_predictions = save_predictions
68 self._scorers = scorers
69 self._eval_dir = eval_dir
70 self._metrics_history = []
71 if eval_dir is not None:
72 self._summary_writer = tf.summary.create_file_writer(eval_dir)
73 summaries = misc.read_summaries(eval_dir)
74 for step, values in summaries:
75 metrics = misc.extract_prefixed_keys(values, _SUMMARIES_SCOPE + "/")
76 self._metrics_history.append((step, metrics))
77 else:
78 self._summary_writer = tf.summary.create_noop_writer()
79 dataset = model.examples_inputter.make_evaluation_dataset(
80 features_file,
81 labels_file,
82 batch_size,
83 num_threads=1,
84 prefetch_buffer_size=1)
85
86 @dataset_lib.function_on_next(dataset)
87 def _eval(next_fn):
88 source, target = next_fn()
89 outputs, predictions = model(source, labels=target)
90 loss = model.compute_loss(outputs, target, training=False)
91 return loss, predictions, target
92
93 self._eval = _eval
94
95 self._metrics_name = {"loss"}
96 for scorer in self._scorers:
97 self._metrics_name.update(scorer.scores_name)
98 model_metrics = self._model.get_metrics()
99 if model_metrics:
100 self._metrics_name.update(set(six.iterkeys(model_metrics)))
101
102 if early_stopping is not None:
103 if early_stopping.metric not in self._metrics_name:
104 raise ValueError("Invalid early stopping metric '%s', expected one in %s" % (
105 early_stopping.metric, str(self._metrics_name)))
106 if early_stopping.steps <= 0:
107 raise ValueError("Early stopping steps should greater than 0")
108 self._early_stopping = early_stopping
109
110 @classmethod
111 def from_config(cls, model, config, features_file=None, labels_file=None):
112 """Creates an evaluator from the configuration.
113
114 Args:
115 model: A :class:`opennmt.models.model.Model` to evaluate.
116 config: The global user configuration.
117 features_file: Optional input features file to evaluate. If not set, will
118 load ``eval_features_file`` from the data configuration.
119 labels_file: Optional output labels file to evaluate. If not set, will load
120 ``eval_labels_file`` from the data configuration.
121
122 Returns:
123 A :class:`opennmt.evaluation.Evaluator` instance.
124
125 Raises:
126 ValueError: if one of :obj:`features_file` and :obj:`labels_file` is set
127 but not the other.
128 """
129 if (features_file is None) != (labels_file is None):
130 raise ValueError("features_file and labels_file should be both set for evaluation")
131 scorers = config["eval"].get("external_evaluators")
132 if scorers is not None:
133 scorers = scorers_lib.make_scorers(scorers)
134 early_stopping_config = config["eval"].get("early_stopping")
135 if early_stopping_config is not None:
136 early_stopping = EarlyStopping(
137 metric=early_stopping_config.get("metric", "loss"),
138 min_improvement=early_stopping_config.get("min_improvement", 0),
139 steps=early_stopping_config["steps"])
140 else:
141 early_stopping = None
142 return cls(
143 model,
144 features_file or config["data"]["eval_features_file"],
145 labels_file or config["data"].get("eval_labels_file"),
146 config["eval"]["batch_size"],
147 scorers=scorers,
148 save_predictions=config["eval"].get("save_eval_predictions", False),
149 early_stopping=early_stopping,
150 eval_dir=os.path.join(config["model_dir"], "eval"))
151
152 @property
153 def metrics_name(self):
154 """The name of the metrics returned by this evaluator."""
155 return self._metrics_name
156
157 @property
158 def metrics_history(self):
159 """The history of metrics result per evaluation step."""
160 return self._metrics_history
161
162 def should_stop(self):
163 """Returns ``True`` if early stopping conditions are met."""
164 if self._early_stopping is None:
165 return False
166 target_metric = self._early_stopping.metric
167 higher_is_better = None
168 # Look if target_metric is produced by a scorer as they define the scores order.
169 for scorer in self._scorers:
170 if target_metric in scorer.scores_name:
171 higher_is_better = scorer.higher_is_better()
172 break
173 if higher_is_better is None:
174 # TODO: the condition below is not always true, find a way to set it
175 # correctly for Keras metrics.
176 higher_is_better = target_metric != "loss"
177 metrics = [values[target_metric] for _, values in self._metrics_history]
178 should_stop = early_stop(
179 metrics,
180 self._early_stopping.steps,
181 min_improvement=self._early_stopping.min_improvement,
182 higher_is_better=higher_is_better)
183 if should_stop:
184 tf.get_logger().warning(
185 "Evaluation metric '%s' did not improve more than %f in the last %d evaluations",
186 target_metric,
187 self._early_stopping.min_improvement,
188 self._early_stopping.steps)
189 return should_stop
190
191 def __call__(self, step):
192 """Runs the evaluator.
193
194 Args:
195 step: The current training step.
196
197 Returns:
198 A dictionary of evaluation metrics.
199 """
200 tf.get_logger().info("Running evaluation for step %d", step)
201 output_file = None
202 output_path = None
203 if self._save_predictions:
204 output_path = os.path.join(self._eval_dir, "predictions.txt.%d" % step)
205 output_file = tf.io.gfile.GFile(output_path, "w")
206
207 loss_num = 0
208 loss_den = 0
209 metrics = self._model.get_metrics()
210 for loss, predictions, target in self._eval(): # pylint: disable=no-value-for-parameter
211 if isinstance(loss, tuple):
212 loss_num += loss[0]
213 loss_den += loss[1]
214 else:
215 loss_num += loss
216 loss_den += 1
217 if metrics:
218 self._model.update_metrics(metrics, predictions, target)
219 if output_file is not None:
220 predictions = {k:v.numpy() for k, v in six.iteritems(predictions)}
221 for prediction in misc.extract_batches(predictions):
222 self._model.print_prediction(prediction, stream=output_file)
223 if loss_den == 0:
224 raise RuntimeError("No examples were evaluated")
225 loss = loss_num / loss_den
226
227 results = dict(loss=loss)
228 if metrics:
229 for name, metric in six.iteritems(metrics):
230 results[name] = metric.result()
231 if self._save_predictions:
232 tf.get_logger().info("Evaluation predictions saved to %s", output_path)
233 output_file.close()
234 for scorer in self._scorers:
235 score = scorer(self._labels_file, output_path)
236 if isinstance(score, dict):
237 results.update(score)
238 else:
239 results[scorer.name] = score
240
241 return self._record_results(step, results)
242
243 def _record_results(self, step, results):
244 for name, value in six.iteritems(results):
245 if isinstance(value, tf.Tensor):
246 results[name] = value.numpy()
247 # Clear history for steps that are greater than step.
248 while self._metrics_history and self._metrics_history[-1][0] > step:
249 self._metrics_history.pop()
250 self._metrics_history.append((step, dict(results)))
251 tf.get_logger().info(
252 "Evaluation result for step %d: %s",
253 step,
254 " ; ".join("%s = %f" % (k, v) for k, v in six.iteritems(results)))
255 with self._summary_writer.as_default():
256 for key, value in six.iteritems(results):
257 tf.summary.scalar("%s/%s" % (_SUMMARIES_SCOPE, key), value, step=step)
258 self._summary_writer.flush()
259 return results
260
261
262 def early_stop(metrics, steps, min_improvement=0, higher_is_better=False):
263 """Early stopping condition.
264
265 Args:
266 metrics: A list of metric values.
267 steps: Consider the improvement over this many steps.
268 min_improvement: Continue if the metric improved less than this value:
269 higher_is_better: Whether a higher value is better for this metric.
270
271 Returns:
272 A boolean.
273 """
274 if len(metrics) < steps + 1:
275 return False
276
277 def _did_improve(ref, new):
278 # Returns True if new is improving on ref.
279 if higher_is_better:
280 return new > ref + min_improvement
281 else:
282 return new < ref - min_improvement
283
284 samples = metrics[-steps - 1:]
285
286 # First check if the boundaries show an improvement.
287 if _did_improve(samples[0], samples[-1]):
288 return False
289
290 # If not, only early stop if each successive evaluation did not improve.
291 for metric, next_metric in zip(samples[:-1], samples[1:]):
292 if _did_improve(metric, next_metric):
293 return False
294 return True
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opennmt/evaluation.py b/opennmt/evaluation.py
--- a/opennmt/evaluation.py
+++ b/opennmt/evaluation.py
@@ -92,7 +92,7 @@
self._eval = _eval
- self._metrics_name = {"loss"}
+ self._metrics_name = {"loss", "perplexity"}
for scorer in self._scorers:
self._metrics_name.update(scorer.scores_name)
model_metrics = self._model.get_metrics()
@@ -173,7 +173,7 @@
if higher_is_better is None:
# TODO: the condition below is not always true, find a way to set it
# correctly for Keras metrics.
- higher_is_better = target_metric != "loss"
+ higher_is_better = target_metric not in ("loss", "perplexity")
metrics = [values[target_metric] for _, values in self._metrics_history]
should_stop = early_stop(
metrics,
@@ -224,7 +224,7 @@
raise RuntimeError("No examples were evaluated")
loss = loss_num / loss_den
- results = dict(loss=loss)
+ results = dict(loss=loss, perplexity=tf.math.exp(loss))
if metrics:
for name, metric in six.iteritems(metrics):
results[name] = metric.result()
|
{"golden_diff": "diff --git a/opennmt/evaluation.py b/opennmt/evaluation.py\n--- a/opennmt/evaluation.py\n+++ b/opennmt/evaluation.py\n@@ -92,7 +92,7 @@\n \n self._eval = _eval\n \n- self._metrics_name = {\"loss\"}\n+ self._metrics_name = {\"loss\", \"perplexity\"}\n for scorer in self._scorers:\n self._metrics_name.update(scorer.scores_name)\n model_metrics = self._model.get_metrics()\n@@ -173,7 +173,7 @@\n if higher_is_better is None:\n # TODO: the condition below is not always true, find a way to set it\n # correctly for Keras metrics.\n- higher_is_better = target_metric != \"loss\"\n+ higher_is_better = target_metric not in (\"loss\", \"perplexity\")\n metrics = [values[target_metric] for _, values in self._metrics_history]\n should_stop = early_stop(\n metrics,\n@@ -224,7 +224,7 @@\n raise RuntimeError(\"No examples were evaluated\")\n loss = loss_num / loss_den\n \n- results = dict(loss=loss)\n+ results = dict(loss=loss, perplexity=tf.math.exp(loss))\n if metrics:\n for name, metric in six.iteritems(metrics):\n results[name] = metric.result()\n", "issue": "How to support PPL\nCan you add the PPL measurement?\n", "before_files": [{"content": "\"\"\"Evaluation related classes and functions.\"\"\"\n\nimport collections\nimport os\nimport six\n\nimport tensorflow as tf\n\nfrom opennmt.data import dataset as dataset_lib\nfrom opennmt.utils import misc\nfrom opennmt.utils import scorers as scorers_lib\n\n\n_SUMMARIES_SCOPE = \"metrics\"\n\n\nclass EarlyStopping(\n collections.namedtuple(\"EarlyStopping\",\n (\"metric\", \"min_improvement\", \"steps\"))):\n \"\"\"Conditions for early stopping.\"\"\"\n\n\nclass Evaluator(object):\n \"\"\"Model evaluator.\"\"\"\n\n def __init__(self,\n model,\n features_file,\n labels_file,\n batch_size,\n scorers=None,\n save_predictions=False,\n early_stopping=None,\n eval_dir=None):\n \"\"\"Initializes the evaluator.\n\n Args:\n model: A :class:`opennmt.models.model.Model` to evaluate.\n features_file: Path to the evaluation features.\n labels_file: Path to the evaluation labels.\n batch_size: The evaluation batch size.\n scorers: A list of scorers, callables taking the path to the reference and\n the hypothesis and return one or more scores.\n save_predictions: Save evaluation predictions to a file. This is ``True``\n when :obj:`external_evaluator` is set.\n early_stopping: An ``EarlyStopping`` instance.\n eval_dir: Directory where predictions can be saved.\n\n Raises:\n ValueError: If predictions should be saved but the model is not compatible.\n ValueError: If predictions should be saved but :obj:`eval_dir` is ``None``.\n ValueError: If the :obj:`early_stopping` configuration is invalid.\n \"\"\"\n if scorers is None:\n scorers = []\n if scorers:\n save_predictions = True\n if save_predictions:\n if model.unsupervised:\n raise ValueError(\"This model does not support saving evaluation predictions\")\n if eval_dir is None:\n raise ValueError(\"Saving evaluation predictions requires eval_dir to be set\")\n if not tf.io.gfile.exists(eval_dir):\n tf.io.gfile.makedirs(eval_dir)\n self._model = model\n self._labels_file = labels_file\n self._save_predictions = save_predictions\n self._scorers = scorers\n self._eval_dir = eval_dir\n self._metrics_history = []\n if eval_dir is not None:\n self._summary_writer = tf.summary.create_file_writer(eval_dir)\n summaries = misc.read_summaries(eval_dir)\n for step, values in summaries:\n metrics = misc.extract_prefixed_keys(values, _SUMMARIES_SCOPE + \"/\")\n self._metrics_history.append((step, metrics))\n else:\n self._summary_writer = tf.summary.create_noop_writer()\n dataset = model.examples_inputter.make_evaluation_dataset(\n features_file,\n labels_file,\n batch_size,\n num_threads=1,\n prefetch_buffer_size=1)\n\n @dataset_lib.function_on_next(dataset)\n def _eval(next_fn):\n source, target = next_fn()\n outputs, predictions = model(source, labels=target)\n loss = model.compute_loss(outputs, target, training=False)\n return loss, predictions, target\n\n self._eval = _eval\n\n self._metrics_name = {\"loss\"}\n for scorer in self._scorers:\n self._metrics_name.update(scorer.scores_name)\n model_metrics = self._model.get_metrics()\n if model_metrics:\n self._metrics_name.update(set(six.iterkeys(model_metrics)))\n\n if early_stopping is not None:\n if early_stopping.metric not in self._metrics_name:\n raise ValueError(\"Invalid early stopping metric '%s', expected one in %s\" % (\n early_stopping.metric, str(self._metrics_name)))\n if early_stopping.steps <= 0:\n raise ValueError(\"Early stopping steps should greater than 0\")\n self._early_stopping = early_stopping\n\n @classmethod\n def from_config(cls, model, config, features_file=None, labels_file=None):\n \"\"\"Creates an evaluator from the configuration.\n\n Args:\n model: A :class:`opennmt.models.model.Model` to evaluate.\n config: The global user configuration.\n features_file: Optional input features file to evaluate. If not set, will\n load ``eval_features_file`` from the data configuration.\n labels_file: Optional output labels file to evaluate. If not set, will load\n ``eval_labels_file`` from the data configuration.\n\n Returns:\n A :class:`opennmt.evaluation.Evaluator` instance.\n\n Raises:\n ValueError: if one of :obj:`features_file` and :obj:`labels_file` is set\n but not the other.\n \"\"\"\n if (features_file is None) != (labels_file is None):\n raise ValueError(\"features_file and labels_file should be both set for evaluation\")\n scorers = config[\"eval\"].get(\"external_evaluators\")\n if scorers is not None:\n scorers = scorers_lib.make_scorers(scorers)\n early_stopping_config = config[\"eval\"].get(\"early_stopping\")\n if early_stopping_config is not None:\n early_stopping = EarlyStopping(\n metric=early_stopping_config.get(\"metric\", \"loss\"),\n min_improvement=early_stopping_config.get(\"min_improvement\", 0),\n steps=early_stopping_config[\"steps\"])\n else:\n early_stopping = None\n return cls(\n model,\n features_file or config[\"data\"][\"eval_features_file\"],\n labels_file or config[\"data\"].get(\"eval_labels_file\"),\n config[\"eval\"][\"batch_size\"],\n scorers=scorers,\n save_predictions=config[\"eval\"].get(\"save_eval_predictions\", False),\n early_stopping=early_stopping,\n eval_dir=os.path.join(config[\"model_dir\"], \"eval\"))\n\n @property\n def metrics_name(self):\n \"\"\"The name of the metrics returned by this evaluator.\"\"\"\n return self._metrics_name\n\n @property\n def metrics_history(self):\n \"\"\"The history of metrics result per evaluation step.\"\"\"\n return self._metrics_history\n\n def should_stop(self):\n \"\"\"Returns ``True`` if early stopping conditions are met.\"\"\"\n if self._early_stopping is None:\n return False\n target_metric = self._early_stopping.metric\n higher_is_better = None\n # Look if target_metric is produced by a scorer as they define the scores order.\n for scorer in self._scorers:\n if target_metric in scorer.scores_name:\n higher_is_better = scorer.higher_is_better()\n break\n if higher_is_better is None:\n # TODO: the condition below is not always true, find a way to set it\n # correctly for Keras metrics.\n higher_is_better = target_metric != \"loss\"\n metrics = [values[target_metric] for _, values in self._metrics_history]\n should_stop = early_stop(\n metrics,\n self._early_stopping.steps,\n min_improvement=self._early_stopping.min_improvement,\n higher_is_better=higher_is_better)\n if should_stop:\n tf.get_logger().warning(\n \"Evaluation metric '%s' did not improve more than %f in the last %d evaluations\",\n target_metric,\n self._early_stopping.min_improvement,\n self._early_stopping.steps)\n return should_stop\n\n def __call__(self, step):\n \"\"\"Runs the evaluator.\n\n Args:\n step: The current training step.\n\n Returns:\n A dictionary of evaluation metrics.\n \"\"\"\n tf.get_logger().info(\"Running evaluation for step %d\", step)\n output_file = None\n output_path = None\n if self._save_predictions:\n output_path = os.path.join(self._eval_dir, \"predictions.txt.%d\" % step)\n output_file = tf.io.gfile.GFile(output_path, \"w\")\n\n loss_num = 0\n loss_den = 0\n metrics = self._model.get_metrics()\n for loss, predictions, target in self._eval(): # pylint: disable=no-value-for-parameter\n if isinstance(loss, tuple):\n loss_num += loss[0]\n loss_den += loss[1]\n else:\n loss_num += loss\n loss_den += 1\n if metrics:\n self._model.update_metrics(metrics, predictions, target)\n if output_file is not None:\n predictions = {k:v.numpy() for k, v in six.iteritems(predictions)}\n for prediction in misc.extract_batches(predictions):\n self._model.print_prediction(prediction, stream=output_file)\n if loss_den == 0:\n raise RuntimeError(\"No examples were evaluated\")\n loss = loss_num / loss_den\n\n results = dict(loss=loss)\n if metrics:\n for name, metric in six.iteritems(metrics):\n results[name] = metric.result()\n if self._save_predictions:\n tf.get_logger().info(\"Evaluation predictions saved to %s\", output_path)\n output_file.close()\n for scorer in self._scorers:\n score = scorer(self._labels_file, output_path)\n if isinstance(score, dict):\n results.update(score)\n else:\n results[scorer.name] = score\n\n return self._record_results(step, results)\n\n def _record_results(self, step, results):\n for name, value in six.iteritems(results):\n if isinstance(value, tf.Tensor):\n results[name] = value.numpy()\n # Clear history for steps that are greater than step.\n while self._metrics_history and self._metrics_history[-1][0] > step:\n self._metrics_history.pop()\n self._metrics_history.append((step, dict(results)))\n tf.get_logger().info(\n \"Evaluation result for step %d: %s\",\n step,\n \" ; \".join(\"%s = %f\" % (k, v) for k, v in six.iteritems(results)))\n with self._summary_writer.as_default():\n for key, value in six.iteritems(results):\n tf.summary.scalar(\"%s/%s\" % (_SUMMARIES_SCOPE, key), value, step=step)\n self._summary_writer.flush()\n return results\n\n\ndef early_stop(metrics, steps, min_improvement=0, higher_is_better=False):\n \"\"\"Early stopping condition.\n\n Args:\n metrics: A list of metric values.\n steps: Consider the improvement over this many steps.\n min_improvement: Continue if the metric improved less than this value:\n higher_is_better: Whether a higher value is better for this metric.\n\n Returns:\n A boolean.\n \"\"\"\n if len(metrics) < steps + 1:\n return False\n\n def _did_improve(ref, new):\n # Returns True if new is improving on ref.\n if higher_is_better:\n return new > ref + min_improvement\n else:\n return new < ref - min_improvement\n\n samples = metrics[-steps - 1:]\n\n # First check if the boundaries show an improvement.\n if _did_improve(samples[0], samples[-1]):\n return False\n\n # If not, only early stop if each successive evaluation did not improve.\n for metric, next_metric in zip(samples[:-1], samples[1:]):\n if _did_improve(metric, next_metric):\n return False\n return True\n", "path": "opennmt/evaluation.py"}], "after_files": [{"content": "\"\"\"Evaluation related classes and functions.\"\"\"\n\nimport collections\nimport os\nimport six\n\nimport tensorflow as tf\n\nfrom opennmt.data import dataset as dataset_lib\nfrom opennmt.utils import misc\nfrom opennmt.utils import scorers as scorers_lib\n\n\n_SUMMARIES_SCOPE = \"metrics\"\n\n\nclass EarlyStopping(\n collections.namedtuple(\"EarlyStopping\",\n (\"metric\", \"min_improvement\", \"steps\"))):\n \"\"\"Conditions for early stopping.\"\"\"\n\n\nclass Evaluator(object):\n \"\"\"Model evaluator.\"\"\"\n\n def __init__(self,\n model,\n features_file,\n labels_file,\n batch_size,\n scorers=None,\n save_predictions=False,\n early_stopping=None,\n eval_dir=None):\n \"\"\"Initializes the evaluator.\n\n Args:\n model: A :class:`opennmt.models.model.Model` to evaluate.\n features_file: Path to the evaluation features.\n labels_file: Path to the evaluation labels.\n batch_size: The evaluation batch size.\n scorers: A list of scorers, callables taking the path to the reference and\n the hypothesis and return one or more scores.\n save_predictions: Save evaluation predictions to a file. This is ``True``\n when :obj:`external_evaluator` is set.\n early_stopping: An ``EarlyStopping`` instance.\n eval_dir: Directory where predictions can be saved.\n\n Raises:\n ValueError: If predictions should be saved but the model is not compatible.\n ValueError: If predictions should be saved but :obj:`eval_dir` is ``None``.\n ValueError: If the :obj:`early_stopping` configuration is invalid.\n \"\"\"\n if scorers is None:\n scorers = []\n if scorers:\n save_predictions = True\n if save_predictions:\n if model.unsupervised:\n raise ValueError(\"This model does not support saving evaluation predictions\")\n if eval_dir is None:\n raise ValueError(\"Saving evaluation predictions requires eval_dir to be set\")\n if not tf.io.gfile.exists(eval_dir):\n tf.io.gfile.makedirs(eval_dir)\n self._model = model\n self._labels_file = labels_file\n self._save_predictions = save_predictions\n self._scorers = scorers\n self._eval_dir = eval_dir\n self._metrics_history = []\n if eval_dir is not None:\n self._summary_writer = tf.summary.create_file_writer(eval_dir)\n summaries = misc.read_summaries(eval_dir)\n for step, values in summaries:\n metrics = misc.extract_prefixed_keys(values, _SUMMARIES_SCOPE + \"/\")\n self._metrics_history.append((step, metrics))\n else:\n self._summary_writer = tf.summary.create_noop_writer()\n dataset = model.examples_inputter.make_evaluation_dataset(\n features_file,\n labels_file,\n batch_size,\n num_threads=1,\n prefetch_buffer_size=1)\n\n @dataset_lib.function_on_next(dataset)\n def _eval(next_fn):\n source, target = next_fn()\n outputs, predictions = model(source, labels=target)\n loss = model.compute_loss(outputs, target, training=False)\n return loss, predictions, target\n\n self._eval = _eval\n\n self._metrics_name = {\"loss\", \"perplexity\"}\n for scorer in self._scorers:\n self._metrics_name.update(scorer.scores_name)\n model_metrics = self._model.get_metrics()\n if model_metrics:\n self._metrics_name.update(set(six.iterkeys(model_metrics)))\n\n if early_stopping is not None:\n if early_stopping.metric not in self._metrics_name:\n raise ValueError(\"Invalid early stopping metric '%s', expected one in %s\" % (\n early_stopping.metric, str(self._metrics_name)))\n if early_stopping.steps <= 0:\n raise ValueError(\"Early stopping steps should greater than 0\")\n self._early_stopping = early_stopping\n\n @classmethod\n def from_config(cls, model, config, features_file=None, labels_file=None):\n \"\"\"Creates an evaluator from the configuration.\n\n Args:\n model: A :class:`opennmt.models.model.Model` to evaluate.\n config: The global user configuration.\n features_file: Optional input features file to evaluate. If not set, will\n load ``eval_features_file`` from the data configuration.\n labels_file: Optional output labels file to evaluate. If not set, will load\n ``eval_labels_file`` from the data configuration.\n\n Returns:\n A :class:`opennmt.evaluation.Evaluator` instance.\n\n Raises:\n ValueError: if one of :obj:`features_file` and :obj:`labels_file` is set\n but not the other.\n \"\"\"\n if (features_file is None) != (labels_file is None):\n raise ValueError(\"features_file and labels_file should be both set for evaluation\")\n scorers = config[\"eval\"].get(\"external_evaluators\")\n if scorers is not None:\n scorers = scorers_lib.make_scorers(scorers)\n early_stopping_config = config[\"eval\"].get(\"early_stopping\")\n if early_stopping_config is not None:\n early_stopping = EarlyStopping(\n metric=early_stopping_config.get(\"metric\", \"loss\"),\n min_improvement=early_stopping_config.get(\"min_improvement\", 0),\n steps=early_stopping_config[\"steps\"])\n else:\n early_stopping = None\n return cls(\n model,\n features_file or config[\"data\"][\"eval_features_file\"],\n labels_file or config[\"data\"].get(\"eval_labels_file\"),\n config[\"eval\"][\"batch_size\"],\n scorers=scorers,\n save_predictions=config[\"eval\"].get(\"save_eval_predictions\", False),\n early_stopping=early_stopping,\n eval_dir=os.path.join(config[\"model_dir\"], \"eval\"))\n\n @property\n def metrics_name(self):\n \"\"\"The name of the metrics returned by this evaluator.\"\"\"\n return self._metrics_name\n\n @property\n def metrics_history(self):\n \"\"\"The history of metrics result per evaluation step.\"\"\"\n return self._metrics_history\n\n def should_stop(self):\n \"\"\"Returns ``True`` if early stopping conditions are met.\"\"\"\n if self._early_stopping is None:\n return False\n target_metric = self._early_stopping.metric\n higher_is_better = None\n # Look if target_metric is produced by a scorer as they define the scores order.\n for scorer in self._scorers:\n if target_metric in scorer.scores_name:\n higher_is_better = scorer.higher_is_better()\n break\n if higher_is_better is None:\n # TODO: the condition below is not always true, find a way to set it\n # correctly for Keras metrics.\n higher_is_better = target_metric not in (\"loss\", \"perplexity\")\n metrics = [values[target_metric] for _, values in self._metrics_history]\n should_stop = early_stop(\n metrics,\n self._early_stopping.steps,\n min_improvement=self._early_stopping.min_improvement,\n higher_is_better=higher_is_better)\n if should_stop:\n tf.get_logger().warning(\n \"Evaluation metric '%s' did not improve more than %f in the last %d evaluations\",\n target_metric,\n self._early_stopping.min_improvement,\n self._early_stopping.steps)\n return should_stop\n\n def __call__(self, step):\n \"\"\"Runs the evaluator.\n\n Args:\n step: The current training step.\n\n Returns:\n A dictionary of evaluation metrics.\n \"\"\"\n tf.get_logger().info(\"Running evaluation for step %d\", step)\n output_file = None\n output_path = None\n if self._save_predictions:\n output_path = os.path.join(self._eval_dir, \"predictions.txt.%d\" % step)\n output_file = tf.io.gfile.GFile(output_path, \"w\")\n\n loss_num = 0\n loss_den = 0\n metrics = self._model.get_metrics()\n for loss, predictions, target in self._eval(): # pylint: disable=no-value-for-parameter\n if isinstance(loss, tuple):\n loss_num += loss[0]\n loss_den += loss[1]\n else:\n loss_num += loss\n loss_den += 1\n if metrics:\n self._model.update_metrics(metrics, predictions, target)\n if output_file is not None:\n predictions = {k:v.numpy() for k, v in six.iteritems(predictions)}\n for prediction in misc.extract_batches(predictions):\n self._model.print_prediction(prediction, stream=output_file)\n if loss_den == 0:\n raise RuntimeError(\"No examples were evaluated\")\n loss = loss_num / loss_den\n\n results = dict(loss=loss, perplexity=tf.math.exp(loss))\n if metrics:\n for name, metric in six.iteritems(metrics):\n results[name] = metric.result()\n if self._save_predictions:\n tf.get_logger().info(\"Evaluation predictions saved to %s\", output_path)\n output_file.close()\n for scorer in self._scorers:\n score = scorer(self._labels_file, output_path)\n if isinstance(score, dict):\n results.update(score)\n else:\n results[scorer.name] = score\n\n return self._record_results(step, results)\n\n def _record_results(self, step, results):\n for name, value in six.iteritems(results):\n if isinstance(value, tf.Tensor):\n results[name] = value.numpy()\n # Clear history for steps that are greater than step.\n while self._metrics_history and self._metrics_history[-1][0] > step:\n self._metrics_history.pop()\n self._metrics_history.append((step, dict(results)))\n tf.get_logger().info(\n \"Evaluation result for step %d: %s\",\n step,\n \" ; \".join(\"%s = %f\" % (k, v) for k, v in six.iteritems(results)))\n with self._summary_writer.as_default():\n for key, value in six.iteritems(results):\n tf.summary.scalar(\"%s/%s\" % (_SUMMARIES_SCOPE, key), value, step=step)\n self._summary_writer.flush()\n return results\n\n\ndef early_stop(metrics, steps, min_improvement=0, higher_is_better=False):\n \"\"\"Early stopping condition.\n\n Args:\n metrics: A list of metric values.\n steps: Consider the improvement over this many steps.\n min_improvement: Continue if the metric improved less than this value:\n higher_is_better: Whether a higher value is better for this metric.\n\n Returns:\n A boolean.\n \"\"\"\n if len(metrics) < steps + 1:\n return False\n\n def _did_improve(ref, new):\n # Returns True if new is improving on ref.\n if higher_is_better:\n return new > ref + min_improvement\n else:\n return new < ref - min_improvement\n\n samples = metrics[-steps - 1:]\n\n # First check if the boundaries show an improvement.\n if _did_improve(samples[0], samples[-1]):\n return False\n\n # If not, only early stop if each successive evaluation did not improve.\n for metric, next_metric in zip(samples[:-1], samples[1:]):\n if _did_improve(metric, next_metric):\n return False\n return True\n", "path": "opennmt/evaluation.py"}]}
| 3,483 | 303 |
gh_patches_debug_15035
|
rasdani/github-patches
|
git_diff
|
encode__httpx-1537
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Too much data for declared Content-Length when passing string with non-ascii characters via files parameter
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [x ] The bug is reproducible against the latest release and/or `master`.
- [ x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
`h11._util.LocalProtocolError: Too much data for declared Content-Length` is raised when passing string with non-ascii characters via files parameter.
### To reproduce
```
import httpx
response = httpx.post(
"https://httpbin.org/post",
files={
'upload-file': ('example.txt', '\u00E9', 'text/plain; charset=utf-8')
}
)
response.raise_for_status()
print(response.read())
```
-->
### Expected behavior
An exception not be raised and the content length properly computed
### Actual behavior
An exception is raised.
### Debugging material
```
Traceback (most recent call last):
File "/***redacted***/httpx_bug.py", line 5, in <module>
response = httpx.post(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_api.py", line 296, in post
return request(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_api.py", line 93, in request
return client.request(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py", line 733, in request
return self.send(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py", line 767, in send
response = self._send_handling_auth(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py", line 805, in _send_handling_auth
response = self._send_handling_redirects(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py", line 837, in _send_handling_redirects
response = self._send_single_request(request, timeout)
File "/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py", line 861, in _send_single_request
(status_code, headers, stream, ext) = transport.request(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py", line 218, in request
response = connection.request(
File "/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/connection.py", line 106, in request
return self.connection.request(method, url, headers, stream, ext)
File "/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 66, in request
self._send_request_body(stream, timeout)
File "/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 112, in _send_request_body
self._send_event(event, timeout)
File "/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/http11.py", line 123, in _send_event
bytes_to_send = self.h11_state.send(event)
File "/***redacted***/.venv/lib/python3.9/site-packages/h11/_connection.py", line 468, in send
data_list = self.send_with_data_passthrough(event)
File "/***redacted***/.venv/lib/python3.9/site-packages/h11/_connection.py", line 501, in send_with_data_passthrough
writer(event, data_list.append)
File "/***redacted***/.venv/lib/python3.9/site-packages/h11/_writers.py", line 58, in __call__
self.send_data(event.data, write)
File "/***redacted***/.venv/lib/python3.9/site-packages/h11/_writers.py", line 78, in send_data
raise LocalProtocolError("Too much data for declared Content-Length")
h11._util.LocalProtocolError: Too much data for declared Content-Length
```
-->
### Environment
- OS: macOS
- Python version: `Python 3.9.1`
- HTTPX version: `0.16.1`
- Async environment: n/a
- HTTP proxy: no
- Custom certificates: no
### Additional context
n/a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/_multipart.py`
Content:
```
1 import binascii
2 import os
3 import typing
4 from pathlib import Path
5
6 from ._types import FileContent, FileTypes, RequestFiles
7 from ._utils import (
8 format_form_param,
9 guess_content_type,
10 peek_filelike_length,
11 to_bytes,
12 )
13
14
15 class DataField:
16 """
17 A single form field item, within a multipart form field.
18 """
19
20 def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:
21 if not isinstance(name, str):
22 raise TypeError(
23 f"Invalid type for name. Expected str, got {type(name)}: {name!r}"
24 )
25 if not isinstance(value, (str, bytes)):
26 raise TypeError(
27 f"Invalid type for value. Expected str or bytes, got {type(value)}: {value!r}"
28 )
29 self.name = name
30 self.value = value
31
32 def render_headers(self) -> bytes:
33 if not hasattr(self, "_headers"):
34 name = format_form_param("name", self.name)
35 self._headers = b"".join(
36 [b"Content-Disposition: form-data; ", name, b"\r\n\r\n"]
37 )
38
39 return self._headers
40
41 def render_data(self) -> bytes:
42 if not hasattr(self, "_data"):
43 self._data = (
44 self.value
45 if isinstance(self.value, bytes)
46 else self.value.encode("utf-8")
47 )
48
49 return self._data
50
51 def get_length(self) -> int:
52 headers = self.render_headers()
53 data = self.render_data()
54 return len(headers) + len(data)
55
56 def render(self) -> typing.Iterator[bytes]:
57 yield self.render_headers()
58 yield self.render_data()
59
60
61 class FileField:
62 """
63 A single file field item, within a multipart form field.
64 """
65
66 def __init__(self, name: str, value: FileTypes) -> None:
67 self.name = name
68
69 fileobj: FileContent
70
71 if isinstance(value, tuple):
72 try:
73 filename, fileobj, content_type = value # type: ignore
74 except ValueError:
75 filename, fileobj = value # type: ignore
76 content_type = guess_content_type(filename)
77 else:
78 filename = Path(str(getattr(value, "name", "upload"))).name
79 fileobj = value
80 content_type = guess_content_type(filename)
81
82 self.filename = filename
83 self.file = fileobj
84 self.content_type = content_type
85 self._consumed = False
86
87 def get_length(self) -> int:
88 headers = self.render_headers()
89
90 if isinstance(self.file, (str, bytes)):
91 return len(headers) + len(self.file)
92
93 # Let's do our best not to read `file` into memory.
94 try:
95 file_length = peek_filelike_length(self.file)
96 except OSError:
97 # As a last resort, read file and cache contents for later.
98 assert not hasattr(self, "_data")
99 self._data = to_bytes(self.file.read())
100 file_length = len(self._data)
101
102 return len(headers) + file_length
103
104 def render_headers(self) -> bytes:
105 if not hasattr(self, "_headers"):
106 parts = [
107 b"Content-Disposition: form-data; ",
108 format_form_param("name", self.name),
109 ]
110 if self.filename:
111 filename = format_form_param("filename", self.filename)
112 parts.extend([b"; ", filename])
113 if self.content_type is not None:
114 content_type = self.content_type.encode()
115 parts.extend([b"\r\nContent-Type: ", content_type])
116 parts.append(b"\r\n\r\n")
117 self._headers = b"".join(parts)
118
119 return self._headers
120
121 def render_data(self) -> typing.Iterator[bytes]:
122 if isinstance(self.file, (str, bytes)):
123 yield to_bytes(self.file)
124 return
125
126 if hasattr(self, "_data"):
127 # Already rendered.
128 yield self._data
129 return
130
131 if self._consumed: # pragma: nocover
132 self.file.seek(0)
133 self._consumed = True
134
135 for chunk in self.file:
136 yield to_bytes(chunk)
137
138 def render(self) -> typing.Iterator[bytes]:
139 yield self.render_headers()
140 yield from self.render_data()
141
142
143 class MultipartStream:
144 """
145 Request content as streaming multipart encoded form data.
146 """
147
148 def __init__(self, data: dict, files: RequestFiles, boundary: bytes = None) -> None:
149 if boundary is None:
150 boundary = binascii.hexlify(os.urandom(16))
151
152 self.boundary = boundary
153 self.content_type = "multipart/form-data; boundary=%s" % boundary.decode(
154 "ascii"
155 )
156 self.fields = list(self._iter_fields(data, files))
157
158 def _iter_fields(
159 self, data: dict, files: RequestFiles
160 ) -> typing.Iterator[typing.Union[FileField, DataField]]:
161 for name, value in data.items():
162 if isinstance(value, list):
163 for item in value:
164 yield DataField(name=name, value=item)
165 else:
166 yield DataField(name=name, value=value)
167
168 file_items = files.items() if isinstance(files, typing.Mapping) else files
169 for name, value in file_items:
170 yield FileField(name=name, value=value)
171
172 def iter_chunks(self) -> typing.Iterator[bytes]:
173 for field in self.fields:
174 yield b"--%s\r\n" % self.boundary
175 yield from field.render()
176 yield b"\r\n"
177 yield b"--%s--\r\n" % self.boundary
178
179 def iter_chunks_lengths(self) -> typing.Iterator[int]:
180 boundary_length = len(self.boundary)
181 # Follow closely what `.iter_chunks()` does.
182 for field in self.fields:
183 yield 2 + boundary_length + 2
184 yield field.get_length()
185 yield 2
186 yield 2 + boundary_length + 4
187
188 def get_content_length(self) -> int:
189 return sum(self.iter_chunks_lengths())
190
191 # Content stream interface.
192
193 def get_headers(self) -> typing.Dict[str, str]:
194 content_length = str(self.get_content_length())
195 content_type = self.content_type
196 return {"Content-Length": content_length, "Content-Type": content_type}
197
198 def __iter__(self) -> typing.Iterator[bytes]:
199 for chunk in self.iter_chunks():
200 yield chunk
201
202 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
203 for chunk in self.iter_chunks():
204 yield chunk
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/httpx/_multipart.py b/httpx/_multipart.py
--- a/httpx/_multipart.py
+++ b/httpx/_multipart.py
@@ -40,11 +40,7 @@
def render_data(self) -> bytes:
if not hasattr(self, "_data"):
- self._data = (
- self.value
- if isinstance(self.value, bytes)
- else self.value.encode("utf-8")
- )
+ self._data = to_bytes(self.value)
return self._data
@@ -88,7 +84,7 @@
headers = self.render_headers()
if isinstance(self.file, (str, bytes)):
- return len(headers) + len(self.file)
+ return len(headers) + len(to_bytes(self.file))
# Let's do our best not to read `file` into memory.
try:
|
{"golden_diff": "diff --git a/httpx/_multipart.py b/httpx/_multipart.py\n--- a/httpx/_multipart.py\n+++ b/httpx/_multipart.py\n@@ -40,11 +40,7 @@\n \n def render_data(self) -> bytes:\n if not hasattr(self, \"_data\"):\n- self._data = (\n- self.value\n- if isinstance(self.value, bytes)\n- else self.value.encode(\"utf-8\")\n- )\n+ self._data = to_bytes(self.value)\n \n return self._data\n \n@@ -88,7 +84,7 @@\n headers = self.render_headers()\n \n if isinstance(self.file, (str, bytes)):\n- return len(headers) + len(self.file)\n+ return len(headers) + len(to_bytes(self.file))\n \n # Let's do our best not to read `file` into memory.\n try:\n", "issue": "Too much data for declared Content-Length when passing string with non-ascii characters via files parameter\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x ] The bug is reproducible against the latest release and/or `master`.\r\n- [ x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\n`h11._util.LocalProtocolError: Too much data for declared Content-Length` is raised when passing string with non-ascii characters via files parameter.\r\n\r\n### To reproduce\r\n\r\n```\r\nimport httpx\r\n\r\nresponse = httpx.post(\r\n \"https://httpbin.org/post\",\r\n files={\r\n 'upload-file': ('example.txt', '\\u00E9', 'text/plain; charset=utf-8')\r\n }\r\n)\r\nresponse.raise_for_status()\r\nprint(response.read())\r\n```\r\n-->\r\n\r\n### Expected behavior\r\n\r\nAn exception not be raised and the content length properly computed\r\n\r\n### Actual behavior\r\n\r\nAn exception is raised.\r\n\r\n### Debugging material\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/***redacted***/httpx_bug.py\", line 5, in <module>\r\n response = httpx.post(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_api.py\", line 296, in post\r\n return request(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_api.py\", line 93, in request\r\n return client.request(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py\", line 733, in request\r\n return self.send(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py\", line 767, in send\r\n response = self._send_handling_auth(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py\", line 805, in _send_handling_auth\r\n response = self._send_handling_redirects(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py\", line 837, in _send_handling_redirects\r\n response = self._send_single_request(request, timeout)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpx/_client.py\", line 861, in _send_single_request\r\n (status_code, headers, stream, ext) = transport.request(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/connection_pool.py\", line 218, in request\r\n response = connection.request(\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/connection.py\", line 106, in request\r\n return self.connection.request(method, url, headers, stream, ext)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/http11.py\", line 66, in request\r\n self._send_request_body(stream, timeout)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/http11.py\", line 112, in _send_request_body\r\n self._send_event(event, timeout)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/httpcore/_sync/http11.py\", line 123, in _send_event\r\n bytes_to_send = self.h11_state.send(event)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/h11/_connection.py\", line 468, in send\r\n data_list = self.send_with_data_passthrough(event)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/h11/_connection.py\", line 501, in send_with_data_passthrough\r\n writer(event, data_list.append)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/h11/_writers.py\", line 58, in __call__\r\n self.send_data(event.data, write)\r\n File \"/***redacted***/.venv/lib/python3.9/site-packages/h11/_writers.py\", line 78, in send_data\r\n raise LocalProtocolError(\"Too much data for declared Content-Length\")\r\nh11._util.LocalProtocolError: Too much data for declared Content-Length\r\n```\r\n-->\r\n\r\n### Environment\r\n\r\n- OS: macOS\r\n- Python version: `Python 3.9.1`\r\n- HTTPX version: `0.16.1`\r\n- Async environment: n/a\r\n- HTTP proxy: no\r\n- Custom certificates: no\r\n\r\n### Additional context\r\nn/a\r\n\n", "before_files": [{"content": "import binascii\nimport os\nimport typing\nfrom pathlib import Path\n\nfrom ._types import FileContent, FileTypes, RequestFiles\nfrom ._utils import (\n format_form_param,\n guess_content_type,\n peek_filelike_length,\n to_bytes,\n)\n\n\nclass DataField:\n \"\"\"\n A single form field item, within a multipart form field.\n \"\"\"\n\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\n f\"Invalid type for name. Expected str, got {type(name)}: {name!r}\"\n )\n if not isinstance(value, (str, bytes)):\n raise TypeError(\n f\"Invalid type for value. Expected str or bytes, got {type(value)}: {value!r}\"\n )\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n if not hasattr(self, \"_headers\"):\n name = format_form_param(\"name\", self.name)\n self._headers = b\"\".join(\n [b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"]\n )\n\n return self._headers\n\n def render_data(self) -> bytes:\n if not hasattr(self, \"_data\"):\n self._data = (\n self.value\n if isinstance(self.value, bytes)\n else self.value.encode(\"utf-8\")\n )\n\n return self._data\n\n def get_length(self) -> int:\n headers = self.render_headers()\n data = self.render_data()\n return len(headers) + len(data)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield self.render_data()\n\n\nclass FileField:\n \"\"\"\n A single file field item, within a multipart form field.\n \"\"\"\n\n def __init__(self, name: str, value: FileTypes) -> None:\n self.name = name\n\n fileobj: FileContent\n\n if isinstance(value, tuple):\n try:\n filename, fileobj, content_type = value # type: ignore\n except ValueError:\n filename, fileobj = value # type: ignore\n content_type = guess_content_type(filename)\n else:\n filename = Path(str(getattr(value, \"name\", \"upload\"))).name\n fileobj = value\n content_type = guess_content_type(filename)\n\n self.filename = filename\n self.file = fileobj\n self.content_type = content_type\n self._consumed = False\n\n def get_length(self) -> int:\n headers = self.render_headers()\n\n if isinstance(self.file, (str, bytes)):\n return len(headers) + len(self.file)\n\n # Let's do our best not to read `file` into memory.\n try:\n file_length = peek_filelike_length(self.file)\n except OSError:\n # As a last resort, read file and cache contents for later.\n assert not hasattr(self, \"_data\")\n self._data = to_bytes(self.file.read())\n file_length = len(self._data)\n\n return len(headers) + file_length\n\n def render_headers(self) -> bytes:\n if not hasattr(self, \"_headers\"):\n parts = [\n b\"Content-Disposition: form-data; \",\n format_form_param(\"name\", self.name),\n ]\n if self.filename:\n filename = format_form_param(\"filename\", self.filename)\n parts.extend([b\"; \", filename])\n if self.content_type is not None:\n content_type = self.content_type.encode()\n parts.extend([b\"\\r\\nContent-Type: \", content_type])\n parts.append(b\"\\r\\n\\r\\n\")\n self._headers = b\"\".join(parts)\n\n return self._headers\n\n def render_data(self) -> typing.Iterator[bytes]:\n if isinstance(self.file, (str, bytes)):\n yield to_bytes(self.file)\n return\n\n if hasattr(self, \"_data\"):\n # Already rendered.\n yield self._data\n return\n\n if self._consumed: # pragma: nocover\n self.file.seek(0)\n self._consumed = True\n\n for chunk in self.file:\n yield to_bytes(chunk)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield from self.render_data()\n\n\nclass MultipartStream:\n \"\"\"\n Request content as streaming multipart encoded form data.\n \"\"\"\n\n def __init__(self, data: dict, files: RequestFiles, boundary: bytes = None) -> None:\n if boundary is None:\n boundary = binascii.hexlify(os.urandom(16))\n\n self.boundary = boundary\n self.content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\n \"ascii\"\n )\n self.fields = list(self._iter_fields(data, files))\n\n def _iter_fields(\n self, data: dict, files: RequestFiles\n ) -> typing.Iterator[typing.Union[FileField, DataField]]:\n for name, value in data.items():\n if isinstance(value, list):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n file_items = files.items() if isinstance(files, typing.Mapping) else files\n for name, value in file_items:\n yield FileField(name=name, value=value)\n\n def iter_chunks(self) -> typing.Iterator[bytes]:\n for field in self.fields:\n yield b\"--%s\\r\\n\" % self.boundary\n yield from field.render()\n yield b\"\\r\\n\"\n yield b\"--%s--\\r\\n\" % self.boundary\n\n def iter_chunks_lengths(self) -> typing.Iterator[int]:\n boundary_length = len(self.boundary)\n # Follow closely what `.iter_chunks()` does.\n for field in self.fields:\n yield 2 + boundary_length + 2\n yield field.get_length()\n yield 2\n yield 2 + boundary_length + 4\n\n def get_content_length(self) -> int:\n return sum(self.iter_chunks_lengths())\n\n # Content stream interface.\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(self.get_content_length())\n content_type = self.content_type\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n", "path": "httpx/_multipart.py"}], "after_files": [{"content": "import binascii\nimport os\nimport typing\nfrom pathlib import Path\n\nfrom ._types import FileContent, FileTypes, RequestFiles\nfrom ._utils import (\n format_form_param,\n guess_content_type,\n peek_filelike_length,\n to_bytes,\n)\n\n\nclass DataField:\n \"\"\"\n A single form field item, within a multipart form field.\n \"\"\"\n\n def __init__(self, name: str, value: typing.Union[str, bytes]) -> None:\n if not isinstance(name, str):\n raise TypeError(\n f\"Invalid type for name. Expected str, got {type(name)}: {name!r}\"\n )\n if not isinstance(value, (str, bytes)):\n raise TypeError(\n f\"Invalid type for value. Expected str or bytes, got {type(value)}: {value!r}\"\n )\n self.name = name\n self.value = value\n\n def render_headers(self) -> bytes:\n if not hasattr(self, \"_headers\"):\n name = format_form_param(\"name\", self.name)\n self._headers = b\"\".join(\n [b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"]\n )\n\n return self._headers\n\n def render_data(self) -> bytes:\n if not hasattr(self, \"_data\"):\n self._data = to_bytes(self.value)\n\n return self._data\n\n def get_length(self) -> int:\n headers = self.render_headers()\n data = self.render_data()\n return len(headers) + len(data)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield self.render_data()\n\n\nclass FileField:\n \"\"\"\n A single file field item, within a multipart form field.\n \"\"\"\n\n def __init__(self, name: str, value: FileTypes) -> None:\n self.name = name\n\n fileobj: FileContent\n\n if isinstance(value, tuple):\n try:\n filename, fileobj, content_type = value # type: ignore\n except ValueError:\n filename, fileobj = value # type: ignore\n content_type = guess_content_type(filename)\n else:\n filename = Path(str(getattr(value, \"name\", \"upload\"))).name\n fileobj = value\n content_type = guess_content_type(filename)\n\n self.filename = filename\n self.file = fileobj\n self.content_type = content_type\n self._consumed = False\n\n def get_length(self) -> int:\n headers = self.render_headers()\n\n if isinstance(self.file, (str, bytes)):\n return len(headers) + len(to_bytes(self.file))\n\n # Let's do our best not to read `file` into memory.\n try:\n file_length = peek_filelike_length(self.file)\n except OSError:\n # As a last resort, read file and cache contents for later.\n assert not hasattr(self, \"_data\")\n self._data = to_bytes(self.file.read())\n file_length = len(self._data)\n\n return len(headers) + file_length\n\n def render_headers(self) -> bytes:\n if not hasattr(self, \"_headers\"):\n parts = [\n b\"Content-Disposition: form-data; \",\n format_form_param(\"name\", self.name),\n ]\n if self.filename:\n filename = format_form_param(\"filename\", self.filename)\n parts.extend([b\"; \", filename])\n if self.content_type is not None:\n content_type = self.content_type.encode()\n parts.extend([b\"\\r\\nContent-Type: \", content_type])\n parts.append(b\"\\r\\n\\r\\n\")\n self._headers = b\"\".join(parts)\n\n return self._headers\n\n def render_data(self) -> typing.Iterator[bytes]:\n if isinstance(self.file, (str, bytes)):\n yield to_bytes(self.file)\n return\n\n if hasattr(self, \"_data\"):\n # Already rendered.\n yield self._data\n return\n\n if self._consumed: # pragma: nocover\n self.file.seek(0)\n self._consumed = True\n\n for chunk in self.file:\n yield to_bytes(chunk)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield from self.render_data()\n\n\nclass MultipartStream:\n \"\"\"\n Request content as streaming multipart encoded form data.\n \"\"\"\n\n def __init__(self, data: dict, files: RequestFiles, boundary: bytes = None) -> None:\n if boundary is None:\n boundary = binascii.hexlify(os.urandom(16))\n\n self.boundary = boundary\n self.content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\n \"ascii\"\n )\n self.fields = list(self._iter_fields(data, files))\n\n def _iter_fields(\n self, data: dict, files: RequestFiles\n ) -> typing.Iterator[typing.Union[FileField, DataField]]:\n for name, value in data.items():\n if isinstance(value, list):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n file_items = files.items() if isinstance(files, typing.Mapping) else files\n for name, value in file_items:\n yield FileField(name=name, value=value)\n\n def iter_chunks(self) -> typing.Iterator[bytes]:\n for field in self.fields:\n yield b\"--%s\\r\\n\" % self.boundary\n yield from field.render()\n yield b\"\\r\\n\"\n yield b\"--%s--\\r\\n\" % self.boundary\n\n def iter_chunks_lengths(self) -> typing.Iterator[int]:\n boundary_length = len(self.boundary)\n # Follow closely what `.iter_chunks()` does.\n for field in self.fields:\n yield 2 + boundary_length + 2\n yield field.get_length()\n yield 2\n yield 2 + boundary_length + 4\n\n def get_content_length(self) -> int:\n return sum(self.iter_chunks_lengths())\n\n # Content stream interface.\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(self.get_content_length())\n content_type = self.content_type\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n", "path": "httpx/_multipart.py"}]}
| 3,242 | 192 |
gh_patches_debug_23587
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-359
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
McDonald's
JSON endpoint: http://rl.mcdonalds.com/googleapps/GoogleSearchUSAction.do?method=searchLocation&searchTxtLatlng=(43.1272254%2C-87.9432837)&actionType=searchRestaurant&language=en&country=us
Search by lat/lon only? Looks like they geocode using Google Maps API and then call this endpoint with a lat/lon.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/mcdonalds_localizer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 from locations.items import GeojsonPointItem
5
6
7 class McLocalizer(scrapy.Spider):
8
9 name = "mclocalizer"
10 allowed_domains = ["www.mcdonalds.com", "www.mcdonalds.com.pr", "www.mcdonalds.co.cr", "www.mcdonalds.com.ar"]
11 start_urls = (
12 'http://www.mcdonalds.com.pr/api/restaurantsByCountry?country=PR',
13 'http://www.mcdonalds.co.cr/api/restaurantsByCountry?country=CR',
14 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR'
15 )
16
17 def parse(self, response):
18 data = response.body_as_unicode()
19 data.replace('" ', '"')
20 data.replace(' "', '"')
21 results = json.loads(data)
22 results = results["content"]["restaurants"]
23 for data in results:
24 properties = {
25 'ref': data['id'],
26 'lon': float(data['longitude']),
27 'lat': float(data['latitude']),
28
29 }
30
31 contact_info = data['name'][:data['name'].find("<br")]
32 name = contact_info[:contact_info.find("</br")]
33
34 properties["name"] = name
35 properties["addr_full"] = data['name'][data['name'].find("<small>"):-8][8:]
36 # = address[8:]
37
38 yield GeojsonPointItem(**properties)
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/mcdonalds_localizer.py b/locations/spiders/mcdonalds_localizer.py
--- a/locations/spiders/mcdonalds_localizer.py
+++ b/locations/spiders/mcdonalds_localizer.py
@@ -7,11 +7,12 @@
class McLocalizer(scrapy.Spider):
name = "mclocalizer"
- allowed_domains = ["www.mcdonalds.com", "www.mcdonalds.com.pr", "www.mcdonalds.co.cr", "www.mcdonalds.com.ar"]
+ allowed_domains = ["www.mcdonalds.com", "www.mcdonalds.com.pr", "www.mcdonalds.co.cr", "www.mcdonalds.com.ar", "www.mcdonalds.com.pa"]
start_urls = (
'http://www.mcdonalds.com.pr/api/restaurantsByCountry?country=PR',
'http://www.mcdonalds.co.cr/api/restaurantsByCountry?country=CR',
- 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR'
+ 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR',
+ 'http://www.mcdonalds.com.pa/api/restaurantsByCountry?country=PA'
)
def parse(self, response):
@@ -33,6 +34,5 @@
properties["name"] = name
properties["addr_full"] = data['name'][data['name'].find("<small>"):-8][8:]
- # = address[8:]
yield GeojsonPointItem(**properties)
\ No newline at end of file
|
{"golden_diff": "diff --git a/locations/spiders/mcdonalds_localizer.py b/locations/spiders/mcdonalds_localizer.py\n--- a/locations/spiders/mcdonalds_localizer.py\n+++ b/locations/spiders/mcdonalds_localizer.py\n@@ -7,11 +7,12 @@\n class McLocalizer(scrapy.Spider):\n \n name = \"mclocalizer\"\n- allowed_domains = [\"www.mcdonalds.com\", \"www.mcdonalds.com.pr\", \"www.mcdonalds.co.cr\", \"www.mcdonalds.com.ar\"]\n+ allowed_domains = [\"www.mcdonalds.com\", \"www.mcdonalds.com.pr\", \"www.mcdonalds.co.cr\", \"www.mcdonalds.com.ar\", \"www.mcdonalds.com.pa\"]\n start_urls = (\n 'http://www.mcdonalds.com.pr/api/restaurantsByCountry?country=PR',\n 'http://www.mcdonalds.co.cr/api/restaurantsByCountry?country=CR',\n- 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR'\n+ 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR',\n+ 'http://www.mcdonalds.com.pa/api/restaurantsByCountry?country=PA'\n )\n \n def parse(self, response):\n@@ -33,6 +34,5 @@\n \n properties[\"name\"] = name\n properties[\"addr_full\"] = data['name'][data['name'].find(\"<small>\"):-8][8:]\n- # = address[8:]\n \n yield GeojsonPointItem(**properties)\n\\ No newline at end of file\n", "issue": "McDonald's\nJSON endpoint: http://rl.mcdonalds.com/googleapps/GoogleSearchUSAction.do?method=searchLocation&searchTxtLatlng=(43.1272254%2C-87.9432837)&actionType=searchRestaurant&language=en&country=us\n\nSearch by lat/lon only? Looks like they geocode using Google Maps API and then call this endpoint with a lat/lon.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\n\nclass McLocalizer(scrapy.Spider):\n\n name = \"mclocalizer\"\n allowed_domains = [\"www.mcdonalds.com\", \"www.mcdonalds.com.pr\", \"www.mcdonalds.co.cr\", \"www.mcdonalds.com.ar\"]\n start_urls = (\n 'http://www.mcdonalds.com.pr/api/restaurantsByCountry?country=PR',\n 'http://www.mcdonalds.co.cr/api/restaurantsByCountry?country=CR',\n 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR'\n )\n\n def parse(self, response):\n data = response.body_as_unicode()\n data.replace('\" ', '\"')\n data.replace(' \"', '\"')\n results = json.loads(data)\n results = results[\"content\"][\"restaurants\"]\n for data in results:\n properties = {\n 'ref': data['id'],\n 'lon': float(data['longitude']),\n 'lat': float(data['latitude']),\n \n }\n\n contact_info = data['name'][:data['name'].find(\"<br\")]\n name = contact_info[:contact_info.find(\"</br\")]\n\n properties[\"name\"] = name\n properties[\"addr_full\"] = data['name'][data['name'].find(\"<small>\"):-8][8:]\n # = address[8:]\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/mcdonalds_localizer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nfrom locations.items import GeojsonPointItem\n\n\nclass McLocalizer(scrapy.Spider):\n\n name = \"mclocalizer\"\n allowed_domains = [\"www.mcdonalds.com\", \"www.mcdonalds.com.pr\", \"www.mcdonalds.co.cr\", \"www.mcdonalds.com.ar\", \"www.mcdonalds.com.pa\"]\n start_urls = (\n 'http://www.mcdonalds.com.pr/api/restaurantsByCountry?country=PR',\n 'http://www.mcdonalds.co.cr/api/restaurantsByCountry?country=CR',\n 'http://www.mcdonalds.com.ar/api/restaurantsByCountry?country=AR',\n 'http://www.mcdonalds.com.pa/api/restaurantsByCountry?country=PA'\n )\n\n def parse(self, response):\n data = response.body_as_unicode()\n data.replace('\" ', '\"')\n data.replace(' \"', '\"')\n results = json.loads(data)\n results = results[\"content\"][\"restaurants\"]\n for data in results:\n properties = {\n 'ref': data['id'],\n 'lon': float(data['longitude']),\n 'lat': float(data['latitude']),\n \n }\n\n contact_info = data['name'][:data['name'].find(\"<br\")]\n name = contact_info[:contact_info.find(\"</br\")]\n\n properties[\"name\"] = name\n properties[\"addr_full\"] = data['name'][data['name'].find(\"<small>\"):-8][8:]\n\n yield GeojsonPointItem(**properties)", "path": "locations/spiders/mcdonalds_localizer.py"}]}
| 745 | 370 |
gh_patches_debug_39954
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-8268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dns-rfc2136 challenges run infinitely for invalid DNS servers
1. Set `dns_rfc2136_server` to something invalid, e.g. `dns_rfc2136_server = 2001:db8::`.
2. Try to obtain certificates.
3. Challenges run forever.
CTRL+C during a challenge:
```
[... usual]
Performing the following challenges:
dns-01 challenge for [...]
dns-01 challenge for [...]
Encountered exception:
Traceback (most recent call last):
File "/usr/lib/python3.7/site-packages/certbot/auth_handler.py", line 75, in handle_authorizations
resp = self._solve_challenges(aauthzrs)
File "/usr/lib/python3.7/site-packages/certbot/auth_handler.py", line 139, in _solve_challenges
resp = self.auth.perform(all_achalls)
File "/usr/lib/python3.7/site-packages/certbot/plugins/dns_common.py", line 57, in perform
self._perform(domain, validation_domain_name, validation)
File "/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py", line 76, in _perform
self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)
File "/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py", line 112, in add_txt_record
domain = self._find_domain(record_name)
File "/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py", line 186, in _find_domain
if self._query_soa(guess):
File "/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py", line 209, in _query_soa
response = dns.query.udp(request, self.server, port=self.port)
File "/usr/lib/python3.7/site-packages/dns/query.py", line 240, in udp
_wait_for_readable(s, expiration)
File "/usr/lib/python3.7/site-packages/dns/query.py", line 157, in _wait_for_readable
_wait_for(s, True, False, True, expiration)
File "/usr/lib/python3.7/site-packages/dns/query.py", line 131, in _wait_for
if not _polling_backend(fd, readable, writable, error, timeout):
File "/usr/lib/python3.7/site-packages/dns/query.py", line 87, in _poll_for
event_list = pollable.poll()
KeyboardInterrupt
```
**Expected:**
Certbot should set a reasonable query timeout ([this is supported by `dnspython`](https://stackoverflow.com/questions/8989457/dnspython-setting-query-timeout-lifetime)) and alert the user of an issue with the DNS server.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py`
Content:
```
1 """DNS Authenticator using RFC 2136 Dynamic Updates."""
2 import logging
3
4 import dns.flags
5 import dns.message
6 import dns.name
7 import dns.query
8 import dns.rdataclass
9 import dns.rdatatype
10 import dns.tsig
11 import dns.tsigkeyring
12 import dns.update
13 import zope.interface
14
15 from certbot import errors
16 from certbot import interfaces
17 from certbot.plugins import dns_common
18
19 logger = logging.getLogger(__name__)
20
21
22 @zope.interface.implementer(interfaces.IAuthenticator)
23 @zope.interface.provider(interfaces.IPluginFactory)
24 class Authenticator(dns_common.DNSAuthenticator):
25 """DNS Authenticator using RFC 2136 Dynamic Updates
26
27 This Authenticator uses RFC 2136 Dynamic Updates to fulfull a dns-01 challenge.
28 """
29
30 ALGORITHMS = {
31 'HMAC-MD5': dns.tsig.HMAC_MD5,
32 'HMAC-SHA1': dns.tsig.HMAC_SHA1,
33 'HMAC-SHA224': dns.tsig.HMAC_SHA224,
34 'HMAC-SHA256': dns.tsig.HMAC_SHA256,
35 'HMAC-SHA384': dns.tsig.HMAC_SHA384,
36 'HMAC-SHA512': dns.tsig.HMAC_SHA512
37 }
38
39 PORT = 53
40
41 description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).'
42 ttl = 120
43
44 def __init__(self, *args, **kwargs):
45 super(Authenticator, self).__init__(*args, **kwargs)
46 self.credentials = None
47
48 @classmethod
49 def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
50 super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60)
51 add('credentials', help='RFC 2136 credentials INI file.')
52
53 def more_info(self): # pylint: disable=missing-function-docstring
54 return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
55 'RFC 2136 Dynamic Updates.'
56
57 def _validate_algorithm(self, credentials):
58 algorithm = credentials.conf('algorithm')
59 if algorithm:
60 if not self.ALGORITHMS.get(algorithm.upper()):
61 raise errors.PluginError("Unknown algorithm: {0}.".format(algorithm))
62
63 def _setup_credentials(self):
64 self.credentials = self._configure_credentials(
65 'credentials',
66 'RFC 2136 credentials INI file',
67 {
68 'name': 'TSIG key name',
69 'secret': 'TSIG key secret',
70 'server': 'The target DNS server'
71 },
72 self._validate_algorithm
73 )
74
75 def _perform(self, _domain, validation_name, validation):
76 self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)
77
78 def _cleanup(self, _domain, validation_name, validation):
79 self._get_rfc2136_client().del_txt_record(validation_name, validation)
80
81 def _get_rfc2136_client(self):
82 return _RFC2136Client(self.credentials.conf('server'),
83 int(self.credentials.conf('port') or self.PORT),
84 self.credentials.conf('name'),
85 self.credentials.conf('secret'),
86 self.ALGORITHMS.get(self.credentials.conf('algorithm'),
87 dns.tsig.HMAC_MD5))
88
89
90 class _RFC2136Client(object):
91 """
92 Encapsulates all communication with the target DNS server.
93 """
94 def __init__(self, server, port, key_name, key_secret, key_algorithm):
95 self.server = server
96 self.port = port
97 self.keyring = dns.tsigkeyring.from_text({
98 key_name: key_secret
99 })
100 self.algorithm = key_algorithm
101
102 def add_txt_record(self, record_name, record_content, record_ttl):
103 """
104 Add a TXT record using the supplied information.
105
106 :param str record_name: The record name (typically beginning with '_acme-challenge.').
107 :param str record_content: The record content (typically the challenge validation).
108 :param int record_ttl: The record TTL (number of seconds that the record may be cached).
109 :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server
110 """
111
112 domain = self._find_domain(record_name)
113
114 n = dns.name.from_text(record_name)
115 o = dns.name.from_text(domain)
116 rel = n.relativize(o)
117
118 update = dns.update.Update(
119 domain,
120 keyring=self.keyring,
121 keyalgorithm=self.algorithm)
122 update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)
123
124 try:
125 response = dns.query.tcp(update, self.server, port=self.port)
126 except Exception as e:
127 raise errors.PluginError('Encountered error adding TXT record: {0}'
128 .format(e))
129 rcode = response.rcode()
130
131 if rcode == dns.rcode.NOERROR:
132 logger.debug('Successfully added TXT record %s', record_name)
133 else:
134 raise errors.PluginError('Received response from server: {0}'
135 .format(dns.rcode.to_text(rcode)))
136
137 def del_txt_record(self, record_name, record_content):
138 """
139 Delete a TXT record using the supplied information.
140
141 :param str record_name: The record name (typically beginning with '_acme-challenge.').
142 :param str record_content: The record content (typically the challenge validation).
143 :param int record_ttl: The record TTL (number of seconds that the record may be cached).
144 :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server
145 """
146
147 domain = self._find_domain(record_name)
148
149 n = dns.name.from_text(record_name)
150 o = dns.name.from_text(domain)
151 rel = n.relativize(o)
152
153 update = dns.update.Update(
154 domain,
155 keyring=self.keyring,
156 keyalgorithm=self.algorithm)
157 update.delete(rel, dns.rdatatype.TXT, record_content)
158
159 try:
160 response = dns.query.tcp(update, self.server, port=self.port)
161 except Exception as e:
162 raise errors.PluginError('Encountered error deleting TXT record: {0}'
163 .format(e))
164 rcode = response.rcode()
165
166 if rcode == dns.rcode.NOERROR:
167 logger.debug('Successfully deleted TXT record %s', record_name)
168 else:
169 raise errors.PluginError('Received response from server: {0}'
170 .format(dns.rcode.to_text(rcode)))
171
172 def _find_domain(self, record_name):
173 """
174 Find the closest domain with an SOA record for a given domain name.
175
176 :param str record_name: The record name for which to find the closest SOA record.
177 :returns: The domain, if found.
178 :rtype: str
179 :raises certbot.errors.PluginError: if no SOA record can be found.
180 """
181
182 domain_name_guesses = dns_common.base_domain_name_guesses(record_name)
183
184 # Loop through until we find an authoritative SOA record
185 for guess in domain_name_guesses:
186 if self._query_soa(guess):
187 return guess
188
189 raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'
190 .format(record_name, domain_name_guesses))
191
192 def _query_soa(self, domain_name):
193 """
194 Query a domain name for an authoritative SOA record.
195
196 :param str domain_name: The domain name to query for an SOA record.
197 :returns: True if found, False otherwise.
198 :rtype: bool
199 :raises certbot.errors.PluginError: if no response is received.
200 """
201
202 domain = dns.name.from_text(domain_name)
203
204 request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN)
205 # Turn off Recursion Desired bit in query
206 request.flags ^= dns.flags.RD
207
208 try:
209 try:
210 response = dns.query.tcp(request, self.server, port=self.port)
211 except OSError as e:
212 logger.debug('TCP query failed, fallback to UDP: %s', e)
213 response = dns.query.udp(request, self.server, port=self.port)
214 rcode = response.rcode()
215
216 # Authoritative Answer bit should be set
217 if (rcode == dns.rcode.NOERROR and response.get_rrset(response.answer,
218 domain, dns.rdataclass.IN, dns.rdatatype.SOA) and response.flags & dns.flags.AA):
219 logger.debug('Received authoritative SOA response for %s', domain_name)
220 return True
221
222 logger.debug('No authoritative SOA record found for %s', domain_name)
223 return False
224 except Exception as e:
225 raise errors.PluginError('Encountered error when making query: {0}'
226 .format(e))
227
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py
--- a/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py
+++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py
@@ -18,6 +18,7 @@
logger = logging.getLogger(__name__)
+DEFAULT_NETWORK_TIMEOUT = 45
@zope.interface.implementer(interfaces.IAuthenticator)
@zope.interface.provider(interfaces.IPluginFactory)
@@ -91,13 +92,15 @@
"""
Encapsulates all communication with the target DNS server.
"""
- def __init__(self, server, port, key_name, key_secret, key_algorithm):
+ def __init__(self, server, port, key_name, key_secret, key_algorithm,
+ timeout=DEFAULT_NETWORK_TIMEOUT):
self.server = server
self.port = port
self.keyring = dns.tsigkeyring.from_text({
key_name: key_secret
})
self.algorithm = key_algorithm
+ self._default_timeout = timeout
def add_txt_record(self, record_name, record_content, record_ttl):
"""
@@ -122,7 +125,7 @@
update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)
try:
- response = dns.query.tcp(update, self.server, port=self.port)
+ response = dns.query.tcp(update, self.server, self._default_timeout, self.port)
except Exception as e:
raise errors.PluginError('Encountered error adding TXT record: {0}'
.format(e))
@@ -157,7 +160,7 @@
update.delete(rel, dns.rdatatype.TXT, record_content)
try:
- response = dns.query.tcp(update, self.server, port=self.port)
+ response = dns.query.tcp(update, self.server, self._default_timeout, self.port)
except Exception as e:
raise errors.PluginError('Encountered error deleting TXT record: {0}'
.format(e))
@@ -207,10 +210,10 @@
try:
try:
- response = dns.query.tcp(request, self.server, port=self.port)
- except OSError as e:
+ response = dns.query.tcp(request, self.server, self._default_timeout, self.port)
+ except (OSError, dns.exception.Timeout) as e:
logger.debug('TCP query failed, fallback to UDP: %s', e)
- response = dns.query.udp(request, self.server, port=self.port)
+ response = dns.query.udp(request, self.server, self._default_timeout, self.port)
rcode = response.rcode()
# Authoritative Answer bit should be set
|
{"golden_diff": "diff --git a/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py b/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py\n--- a/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py\n+++ b/certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py\n@@ -18,6 +18,7 @@\n \n logger = logging.getLogger(__name__)\n \n+DEFAULT_NETWORK_TIMEOUT = 45\n \n @zope.interface.implementer(interfaces.IAuthenticator)\n @zope.interface.provider(interfaces.IPluginFactory)\n@@ -91,13 +92,15 @@\n \"\"\"\n Encapsulates all communication with the target DNS server.\n \"\"\"\n- def __init__(self, server, port, key_name, key_secret, key_algorithm):\n+ def __init__(self, server, port, key_name, key_secret, key_algorithm,\n+ timeout=DEFAULT_NETWORK_TIMEOUT):\n self.server = server\n self.port = port\n self.keyring = dns.tsigkeyring.from_text({\n key_name: key_secret\n })\n self.algorithm = key_algorithm\n+ self._default_timeout = timeout\n \n def add_txt_record(self, record_name, record_content, record_ttl):\n \"\"\"\n@@ -122,7 +125,7 @@\n update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)\n \n try:\n- response = dns.query.tcp(update, self.server, port=self.port)\n+ response = dns.query.tcp(update, self.server, self._default_timeout, self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error adding TXT record: {0}'\n .format(e))\n@@ -157,7 +160,7 @@\n update.delete(rel, dns.rdatatype.TXT, record_content)\n \n try:\n- response = dns.query.tcp(update, self.server, port=self.port)\n+ response = dns.query.tcp(update, self.server, self._default_timeout, self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error deleting TXT record: {0}'\n .format(e))\n@@ -207,10 +210,10 @@\n \n try:\n try:\n- response = dns.query.tcp(request, self.server, port=self.port)\n- except OSError as e:\n+ response = dns.query.tcp(request, self.server, self._default_timeout, self.port)\n+ except (OSError, dns.exception.Timeout) as e:\n logger.debug('TCP query failed, fallback to UDP: %s', e)\n- response = dns.query.udp(request, self.server, port=self.port)\n+ response = dns.query.udp(request, self.server, self._default_timeout, self.port)\n rcode = response.rcode()\n \n # Authoritative Answer bit should be set\n", "issue": "dns-rfc2136 challenges run infinitely for invalid DNS servers\n1. Set `dns_rfc2136_server` to something invalid, e.g. `dns_rfc2136_server = 2001:db8::`.\r\n2. Try to obtain certificates.\r\n3. Challenges run forever.\r\n\r\nCTRL+C during a challenge:\r\n```\r\n[... usual]\r\nPerforming the following challenges:\r\ndns-01 challenge for [...]\r\ndns-01 challenge for [...]\r\nEncountered exception:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.7/site-packages/certbot/auth_handler.py\", line 75, in handle_authorizations\r\n resp = self._solve_challenges(aauthzrs)\r\n File \"/usr/lib/python3.7/site-packages/certbot/auth_handler.py\", line 139, in _solve_challenges\r\n resp = self.auth.perform(all_achalls)\r\n File \"/usr/lib/python3.7/site-packages/certbot/plugins/dns_common.py\", line 57, in perform\r\n self._perform(domain, validation_domain_name, validation)\r\n File \"/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py\", line 76, in _perform\r\n self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)\r\n File \"/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py\", line 112, in add_txt_record\r\n domain = self._find_domain(record_name)\r\n File \"/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py\", line 186, in _find_domain\r\n if self._query_soa(guess):\r\n File \"/usr/lib/python3.7/site-packages/certbot_dns_rfc2136/dns_rfc2136.py\", line 209, in _query_soa\r\n response = dns.query.udp(request, self.server, port=self.port)\r\n File \"/usr/lib/python3.7/site-packages/dns/query.py\", line 240, in udp\r\n _wait_for_readable(s, expiration)\r\n File \"/usr/lib/python3.7/site-packages/dns/query.py\", line 157, in _wait_for_readable\r\n _wait_for(s, True, False, True, expiration)\r\n File \"/usr/lib/python3.7/site-packages/dns/query.py\", line 131, in _wait_for\r\n if not _polling_backend(fd, readable, writable, error, timeout):\r\n File \"/usr/lib/python3.7/site-packages/dns/query.py\", line 87, in _poll_for\r\n event_list = pollable.poll()\r\nKeyboardInterrupt\r\n```\r\n\r\n**Expected:** \r\nCertbot should set a reasonable query timeout ([this is supported by `dnspython`](https://stackoverflow.com/questions/8989457/dnspython-setting-query-timeout-lifetime)) and alert the user of an issue with the DNS server.\n", "before_files": [{"content": "\"\"\"DNS Authenticator using RFC 2136 Dynamic Updates.\"\"\"\nimport logging\n\nimport dns.flags\nimport dns.message\nimport dns.name\nimport dns.query\nimport dns.rdataclass\nimport dns.rdatatype\nimport dns.tsig\nimport dns.tsigkeyring\nimport dns.update\nimport zope.interface\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.plugins import dns_common\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(dns_common.DNSAuthenticator):\n \"\"\"DNS Authenticator using RFC 2136 Dynamic Updates\n\n This Authenticator uses RFC 2136 Dynamic Updates to fulfull a dns-01 challenge.\n \"\"\"\n\n ALGORITHMS = {\n 'HMAC-MD5': dns.tsig.HMAC_MD5,\n 'HMAC-SHA1': dns.tsig.HMAC_SHA1,\n 'HMAC-SHA224': dns.tsig.HMAC_SHA224,\n 'HMAC-SHA256': dns.tsig.HMAC_SHA256,\n 'HMAC-SHA384': dns.tsig.HMAC_SHA384,\n 'HMAC-SHA512': dns.tsig.HMAC_SHA512\n }\n\n PORT = 53\n\n description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).'\n ttl = 120\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.credentials = None\n\n @classmethod\n def add_parser_arguments(cls, add): # pylint: disable=arguments-differ\n super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60)\n add('credentials', help='RFC 2136 credentials INI file.')\n\n def more_info(self): # pylint: disable=missing-function-docstring\n return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \\\n 'RFC 2136 Dynamic Updates.'\n\n def _validate_algorithm(self, credentials):\n algorithm = credentials.conf('algorithm')\n if algorithm:\n if not self.ALGORITHMS.get(algorithm.upper()):\n raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\n\n def _setup_credentials(self):\n self.credentials = self._configure_credentials(\n 'credentials',\n 'RFC 2136 credentials INI file',\n {\n 'name': 'TSIG key name',\n 'secret': 'TSIG key secret',\n 'server': 'The target DNS server'\n },\n self._validate_algorithm\n )\n\n def _perform(self, _domain, validation_name, validation):\n self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)\n\n def _cleanup(self, _domain, validation_name, validation):\n self._get_rfc2136_client().del_txt_record(validation_name, validation)\n\n def _get_rfc2136_client(self):\n return _RFC2136Client(self.credentials.conf('server'),\n int(self.credentials.conf('port') or self.PORT),\n self.credentials.conf('name'),\n self.credentials.conf('secret'),\n self.ALGORITHMS.get(self.credentials.conf('algorithm'),\n dns.tsig.HMAC_MD5))\n\n\nclass _RFC2136Client(object):\n \"\"\"\n Encapsulates all communication with the target DNS server.\n \"\"\"\n def __init__(self, server, port, key_name, key_secret, key_algorithm):\n self.server = server\n self.port = port\n self.keyring = dns.tsigkeyring.from_text({\n key_name: key_secret\n })\n self.algorithm = key_algorithm\n\n def add_txt_record(self, record_name, record_content, record_ttl):\n \"\"\"\n Add a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, port=self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error adding TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully added TXT record %s', record_name)\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def del_txt_record(self, record_name, record_content):\n \"\"\"\n Delete a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.delete(rel, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, port=self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error deleting TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully deleted TXT record %s', record_name)\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def _find_domain(self, record_name):\n \"\"\"\n Find the closest domain with an SOA record for a given domain name.\n\n :param str record_name: The record name for which to find the closest SOA record.\n :returns: The domain, if found.\n :rtype: str\n :raises certbot.errors.PluginError: if no SOA record can be found.\n \"\"\"\n\n domain_name_guesses = dns_common.base_domain_name_guesses(record_name)\n\n # Loop through until we find an authoritative SOA record\n for guess in domain_name_guesses:\n if self._query_soa(guess):\n return guess\n\n raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'\n .format(record_name, domain_name_guesses))\n\n def _query_soa(self, domain_name):\n \"\"\"\n Query a domain name for an authoritative SOA record.\n\n :param str domain_name: The domain name to query for an SOA record.\n :returns: True if found, False otherwise.\n :rtype: bool\n :raises certbot.errors.PluginError: if no response is received.\n \"\"\"\n\n domain = dns.name.from_text(domain_name)\n\n request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN)\n # Turn off Recursion Desired bit in query\n request.flags ^= dns.flags.RD\n\n try:\n try:\n response = dns.query.tcp(request, self.server, port=self.port)\n except OSError as e:\n logger.debug('TCP query failed, fallback to UDP: %s', e)\n response = dns.query.udp(request, self.server, port=self.port)\n rcode = response.rcode()\n\n # Authoritative Answer bit should be set\n if (rcode == dns.rcode.NOERROR and response.get_rrset(response.answer,\n domain, dns.rdataclass.IN, dns.rdatatype.SOA) and response.flags & dns.flags.AA):\n logger.debug('Received authoritative SOA response for %s', domain_name)\n return True\n\n logger.debug('No authoritative SOA record found for %s', domain_name)\n return False\n except Exception as e:\n raise errors.PluginError('Encountered error when making query: {0}'\n .format(e))\n", "path": "certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py"}], "after_files": [{"content": "\"\"\"DNS Authenticator using RFC 2136 Dynamic Updates.\"\"\"\nimport logging\n\nimport dns.flags\nimport dns.message\nimport dns.name\nimport dns.query\nimport dns.rdataclass\nimport dns.rdatatype\nimport dns.tsig\nimport dns.tsigkeyring\nimport dns.update\nimport zope.interface\n\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.plugins import dns_common\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_NETWORK_TIMEOUT = 45\n\[email protected](interfaces.IAuthenticator)\[email protected](interfaces.IPluginFactory)\nclass Authenticator(dns_common.DNSAuthenticator):\n \"\"\"DNS Authenticator using RFC 2136 Dynamic Updates\n\n This Authenticator uses RFC 2136 Dynamic Updates to fulfull a dns-01 challenge.\n \"\"\"\n\n ALGORITHMS = {\n 'HMAC-MD5': dns.tsig.HMAC_MD5,\n 'HMAC-SHA1': dns.tsig.HMAC_SHA1,\n 'HMAC-SHA224': dns.tsig.HMAC_SHA224,\n 'HMAC-SHA256': dns.tsig.HMAC_SHA256,\n 'HMAC-SHA384': dns.tsig.HMAC_SHA384,\n 'HMAC-SHA512': dns.tsig.HMAC_SHA512\n }\n\n PORT = 53\n\n description = 'Obtain certificates using a DNS TXT record (if you are using BIND for DNS).'\n ttl = 120\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.credentials = None\n\n @classmethod\n def add_parser_arguments(cls, add): # pylint: disable=arguments-differ\n super(Authenticator, cls).add_parser_arguments(add, default_propagation_seconds=60)\n add('credentials', help='RFC 2136 credentials INI file.')\n\n def more_info(self): # pylint: disable=missing-function-docstring\n return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \\\n 'RFC 2136 Dynamic Updates.'\n\n def _validate_algorithm(self, credentials):\n algorithm = credentials.conf('algorithm')\n if algorithm:\n if not self.ALGORITHMS.get(algorithm.upper()):\n raise errors.PluginError(\"Unknown algorithm: {0}.\".format(algorithm))\n\n def _setup_credentials(self):\n self.credentials = self._configure_credentials(\n 'credentials',\n 'RFC 2136 credentials INI file',\n {\n 'name': 'TSIG key name',\n 'secret': 'TSIG key secret',\n 'server': 'The target DNS server'\n },\n self._validate_algorithm\n )\n\n def _perform(self, _domain, validation_name, validation):\n self._get_rfc2136_client().add_txt_record(validation_name, validation, self.ttl)\n\n def _cleanup(self, _domain, validation_name, validation):\n self._get_rfc2136_client().del_txt_record(validation_name, validation)\n\n def _get_rfc2136_client(self):\n return _RFC2136Client(self.credentials.conf('server'),\n int(self.credentials.conf('port') or self.PORT),\n self.credentials.conf('name'),\n self.credentials.conf('secret'),\n self.ALGORITHMS.get(self.credentials.conf('algorithm'),\n dns.tsig.HMAC_MD5))\n\n\nclass _RFC2136Client(object):\n \"\"\"\n Encapsulates all communication with the target DNS server.\n \"\"\"\n def __init__(self, server, port, key_name, key_secret, key_algorithm,\n timeout=DEFAULT_NETWORK_TIMEOUT):\n self.server = server\n self.port = port\n self.keyring = dns.tsigkeyring.from_text({\n key_name: key_secret\n })\n self.algorithm = key_algorithm\n self._default_timeout = timeout\n\n def add_txt_record(self, record_name, record_content, record_ttl):\n \"\"\"\n Add a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.add(rel, record_ttl, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, self._default_timeout, self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error adding TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully added TXT record %s', record_name)\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def del_txt_record(self, record_name, record_content):\n \"\"\"\n Delete a TXT record using the supplied information.\n\n :param str record_name: The record name (typically beginning with '_acme-challenge.').\n :param str record_content: The record content (typically the challenge validation).\n :param int record_ttl: The record TTL (number of seconds that the record may be cached).\n :raises certbot.errors.PluginError: if an error occurs communicating with the DNS server\n \"\"\"\n\n domain = self._find_domain(record_name)\n\n n = dns.name.from_text(record_name)\n o = dns.name.from_text(domain)\n rel = n.relativize(o)\n\n update = dns.update.Update(\n domain,\n keyring=self.keyring,\n keyalgorithm=self.algorithm)\n update.delete(rel, dns.rdatatype.TXT, record_content)\n\n try:\n response = dns.query.tcp(update, self.server, self._default_timeout, self.port)\n except Exception as e:\n raise errors.PluginError('Encountered error deleting TXT record: {0}'\n .format(e))\n rcode = response.rcode()\n\n if rcode == dns.rcode.NOERROR:\n logger.debug('Successfully deleted TXT record %s', record_name)\n else:\n raise errors.PluginError('Received response from server: {0}'\n .format(dns.rcode.to_text(rcode)))\n\n def _find_domain(self, record_name):\n \"\"\"\n Find the closest domain with an SOA record for a given domain name.\n\n :param str record_name: The record name for which to find the closest SOA record.\n :returns: The domain, if found.\n :rtype: str\n :raises certbot.errors.PluginError: if no SOA record can be found.\n \"\"\"\n\n domain_name_guesses = dns_common.base_domain_name_guesses(record_name)\n\n # Loop through until we find an authoritative SOA record\n for guess in domain_name_guesses:\n if self._query_soa(guess):\n return guess\n\n raise errors.PluginError('Unable to determine base domain for {0} using names: {1}.'\n .format(record_name, domain_name_guesses))\n\n def _query_soa(self, domain_name):\n \"\"\"\n Query a domain name for an authoritative SOA record.\n\n :param str domain_name: The domain name to query for an SOA record.\n :returns: True if found, False otherwise.\n :rtype: bool\n :raises certbot.errors.PluginError: if no response is received.\n \"\"\"\n\n domain = dns.name.from_text(domain_name)\n\n request = dns.message.make_query(domain, dns.rdatatype.SOA, dns.rdataclass.IN)\n # Turn off Recursion Desired bit in query\n request.flags ^= dns.flags.RD\n\n try:\n try:\n response = dns.query.tcp(request, self.server, self._default_timeout, self.port)\n except (OSError, dns.exception.Timeout) as e:\n logger.debug('TCP query failed, fallback to UDP: %s', e)\n response = dns.query.udp(request, self.server, self._default_timeout, self.port)\n rcode = response.rcode()\n\n # Authoritative Answer bit should be set\n if (rcode == dns.rcode.NOERROR and response.get_rrset(response.answer,\n domain, dns.rdataclass.IN, dns.rdatatype.SOA) and response.flags & dns.flags.AA):\n logger.debug('Received authoritative SOA response for %s', domain_name)\n return True\n\n logger.debug('No authoritative SOA record found for %s', domain_name)\n return False\n except Exception as e:\n raise errors.PluginError('Encountered error when making query: {0}'\n .format(e))\n", "path": "certbot-dns-rfc2136/certbot_dns_rfc2136/_internal/dns_rfc2136.py"}]}
| 3,478 | 689 |
gh_patches_debug_8732
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-7427
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: `flask` not found when running `meltano-cloud` via `pipx`
### Meltano Version
N/A
### Python Version
3.11
### Bug scope
CLI (options, error messages, logging, etc.)
### Operating System
Python 3.11 Docker container
### Description
```sh
pip install pipx
pipx install 'git+https://github.com/meltano/meltano.git@cloud#subdirectory=src/cloud-cli'`
pipx ensurepath
bash
meltano-cloud login
```
Results in:
```
FileNotFoundError: [Errno 2] No such file or directory: 'flask'
```
This is because `pipx` only exposes the CLI entrypoints of our package, and not those of its dependencies like `flask`. We can resolve this by using the precise path to the `flask` binary in the active environment rather than searching for it on `$PATH`.
### Code
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cloud-cli/meltano/cloud/api/auth/auth.py`
Content:
```
1 """Authentication for Meltano Cloud."""
2
3 from __future__ import annotations
4
5 import asyncio
6 import os
7 import subprocess
8 import sys
9 import typing as t
10 import webbrowser
11 from contextlib import contextmanager
12 from pathlib import Path
13 from urllib.parse import urlencode, urljoin
14
15 import aiohttp
16 import click
17
18 from meltano.cloud.api.config import MeltanoCloudConfig
19
20 if sys.version_info <= (3, 8):
21 from cached_property import cached_property
22 else:
23 from functools import cached_property
24
25 LOGIN_STATUS_CHECK_DELAY_SECONDS = 0.2
26
27
28 class MeltanoCloudAuthError(Exception):
29 """Raised when an API call returns a 403."""
30
31
32 class MeltanoCloudAuth: # noqa: WPS214
33 """Authentication methods for Meltano Cloud."""
34
35 def __init__(self, config: MeltanoCloudConfig | None = None):
36 """Initialize a MeltanoCloudAuth instance.
37
38 Args:
39 config: the MeltanoCloudConfig to use
40 """
41 self.config = config or MeltanoCloudConfig.find()
42 self.base_url = self.config.base_auth_url
43 self.client_id = self.config.app_client_id
44
45 @cached_property
46 def login_url(self) -> str:
47 """Get the oauth2 authorization URL.
48
49 Returns:
50 the oauth2 authorization URL.
51 """
52 query_params = urlencode(
53 {
54 "client_id": self.client_id,
55 "response_type": "token",
56 "scope": "email openid profile",
57 "redirect_uri": f"http://localhost:{self.config.auth_callback_port}",
58 }
59 )
60 return f"{self.base_url}/oauth2/authorize?{query_params}"
61
62 @cached_property
63 def logout_url(self) -> str:
64 """Get the Meltano Cloud logout URL.
65
66 Returns:
67 the Meltano Cloud logout URL.
68 """
69 params = urlencode(
70 {
71 "client_id": self.client_id,
72 "logout_uri": f"http://localhost:{self.config.auth_callback_port}/logout", # noqa: E501)
73 }
74 )
75 return urljoin(self.base_url, f"logout?{params}")
76
77 @contextmanager
78 def callback_server(self) -> t.Iterator[None]:
79 """Context manager to run callback server locally.
80
81 Yields:
82 None
83 """
84 server = None
85 try:
86 server = subprocess.Popen( # noqa: S607
87 ("flask", "run", f"--port={self.config.auth_callback_port}"),
88 env={
89 **os.environ,
90 "FLASK_APP": "callback_server.py",
91 "MELTANO_CLOUD_CONFIG_PATH": str(self.config.config_path),
92 },
93 cwd=Path(__file__).parent,
94 stdout=subprocess.DEVNULL,
95 stderr=subprocess.STDOUT,
96 )
97 yield
98 finally:
99 if server:
100 server.kill()
101
102 async def login(self) -> None:
103 """Take user through login flow and get auth and id tokens."""
104 if await self.logged_in():
105 return
106 with self.callback_server():
107 click.echo("Logging in to Meltano Cloud.")
108 click.echo("You will be directed to a web browser to complete login.")
109 click.echo("If a web browser does not open, open the following link:")
110 click.secho(self.login_url, fg="green")
111 webbrowser.open_new_tab(self.login_url)
112 while not await self.logged_in():
113 self.config.refresh()
114 await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)
115
116 async def logout(self) -> None: # noqa: WPS213
117 """Log out."""
118 if not await self.logged_in():
119 click.secho("Not logged in.", fg="green")
120 return
121 with self.callback_server():
122 click.echo("Logging out of Meltano Cloud.")
123 click.echo("You will be directed to a web browser to complete logout.")
124 click.echo("If a web browser does not open, open the following link:")
125 click.secho(self.logout_url, fg="green")
126 webbrowser.open_new_tab(self.logout_url)
127 while await self.logged_in():
128 self.config.refresh()
129 await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)
130 click.secho("Successfully logged out.", fg="green")
131
132 def get_auth_header(self) -> dict[str, str]:
133 """Get the authorization header.
134
135 Used for authenticating to cloud API endpoints.
136
137 Returns:
138 Authorization header using ID token as bearer token.
139
140 """
141 return {"Authorization": f"Bearer {self.config.id_token}"}
142
143 def get_access_token_header(self) -> dict[str, str]:
144 """Get the access token header.
145
146 Used for authenticating to auth endpoints.
147
148 Returns:
149 Authorization header using access token as bearer token.
150 """
151 return {"Authorization": f"Bearer {self.config.access_token}"}
152
153 async def get_user_info_response(self) -> aiohttp.ClientResponse:
154 """Get user info.
155
156 Returns:
157 User info response
158 """
159 async with aiohttp.ClientSession() as session:
160 async with session.get(
161 urljoin(self.base_url, "oauth2/userInfo"),
162 headers=self.get_access_token_header(),
163 ) as response:
164 return response
165
166 async def get_user_info_json(self) -> dict:
167 """Get user info as dict.
168
169 Returns:
170 User info json
171 """
172 async with aiohttp.ClientSession() as session:
173 async with session.get(
174 urljoin(self.base_url, "oauth2/userInfo"),
175 headers=self.get_access_token_header(),
176 ) as response:
177 return await response.json()
178
179 async def logged_in(self) -> bool:
180 """Check if this instance is currently logged in.
181
182 Returns:
183 True if logged in, else False
184 """
185 user_info_resp = await self.get_user_info_response()
186 return bool(
187 self.config.access_token and self.config.id_token and user_info_resp.ok
188 )
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cloud-cli/meltano/cloud/api/auth/auth.py b/src/cloud-cli/meltano/cloud/api/auth/auth.py
--- a/src/cloud-cli/meltano/cloud/api/auth/auth.py
+++ b/src/cloud-cli/meltano/cloud/api/auth/auth.py
@@ -84,7 +84,11 @@
server = None
try:
server = subprocess.Popen( # noqa: S607
- ("flask", "run", f"--port={self.config.auth_callback_port}"),
+ (
+ str(Path(sys.prefix) / "bin" / "flask"),
+ "run",
+ f"--port={self.config.auth_callback_port}",
+ ),
env={
**os.environ,
"FLASK_APP": "callback_server.py",
|
{"golden_diff": "diff --git a/src/cloud-cli/meltano/cloud/api/auth/auth.py b/src/cloud-cli/meltano/cloud/api/auth/auth.py\n--- a/src/cloud-cli/meltano/cloud/api/auth/auth.py\n+++ b/src/cloud-cli/meltano/cloud/api/auth/auth.py\n@@ -84,7 +84,11 @@\n server = None\n try:\n server = subprocess.Popen( # noqa: S607\n- (\"flask\", \"run\", f\"--port={self.config.auth_callback_port}\"),\n+ (\n+ str(Path(sys.prefix) / \"bin\" / \"flask\"),\n+ \"run\",\n+ f\"--port={self.config.auth_callback_port}\",\n+ ),\n env={\n **os.environ,\n \"FLASK_APP\": \"callback_server.py\",\n", "issue": "bug: `flask` not found when running `meltano-cloud` via `pipx`\n### Meltano Version\n\nN/A\n\n### Python Version\n\n3.11\n\n### Bug scope\n\nCLI (options, error messages, logging, etc.)\n\n### Operating System\n\nPython 3.11 Docker container\n\n### Description\n\n```sh\r\npip install pipx\r\npipx install 'git+https://github.com/meltano/meltano.git@cloud#subdirectory=src/cloud-cli'`\r\npipx ensurepath\r\nbash\r\nmeltano-cloud login\r\n```\r\n\r\nResults in:\r\n```\r\nFileNotFoundError: [Errno 2] No such file or directory: 'flask'\r\n```\r\n\r\nThis is because `pipx` only exposes the CLI entrypoints of our package, and not those of its dependencies like `flask`. We can resolve this by using the precise path to the `flask` binary in the active environment rather than searching for it on `$PATH`.\n\n### Code\n\n_No response_\n", "before_files": [{"content": "\"\"\"Authentication for Meltano Cloud.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport os\nimport subprocess\nimport sys\nimport typing as t\nimport webbrowser\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom urllib.parse import urlencode, urljoin\n\nimport aiohttp\nimport click\n\nfrom meltano.cloud.api.config import MeltanoCloudConfig\n\nif sys.version_info <= (3, 8):\n from cached_property import cached_property\nelse:\n from functools import cached_property\n\nLOGIN_STATUS_CHECK_DELAY_SECONDS = 0.2\n\n\nclass MeltanoCloudAuthError(Exception):\n \"\"\"Raised when an API call returns a 403.\"\"\"\n\n\nclass MeltanoCloudAuth: # noqa: WPS214\n \"\"\"Authentication methods for Meltano Cloud.\"\"\"\n\n def __init__(self, config: MeltanoCloudConfig | None = None):\n \"\"\"Initialize a MeltanoCloudAuth instance.\n\n Args:\n config: the MeltanoCloudConfig to use\n \"\"\"\n self.config = config or MeltanoCloudConfig.find()\n self.base_url = self.config.base_auth_url\n self.client_id = self.config.app_client_id\n\n @cached_property\n def login_url(self) -> str:\n \"\"\"Get the oauth2 authorization URL.\n\n Returns:\n the oauth2 authorization URL.\n \"\"\"\n query_params = urlencode(\n {\n \"client_id\": self.client_id,\n \"response_type\": \"token\",\n \"scope\": \"email openid profile\",\n \"redirect_uri\": f\"http://localhost:{self.config.auth_callback_port}\",\n }\n )\n return f\"{self.base_url}/oauth2/authorize?{query_params}\"\n\n @cached_property\n def logout_url(self) -> str:\n \"\"\"Get the Meltano Cloud logout URL.\n\n Returns:\n the Meltano Cloud logout URL.\n \"\"\"\n params = urlencode(\n {\n \"client_id\": self.client_id,\n \"logout_uri\": f\"http://localhost:{self.config.auth_callback_port}/logout\", # noqa: E501)\n }\n )\n return urljoin(self.base_url, f\"logout?{params}\")\n\n @contextmanager\n def callback_server(self) -> t.Iterator[None]:\n \"\"\"Context manager to run callback server locally.\n\n Yields:\n None\n \"\"\"\n server = None\n try:\n server = subprocess.Popen( # noqa: S607\n (\"flask\", \"run\", f\"--port={self.config.auth_callback_port}\"),\n env={\n **os.environ,\n \"FLASK_APP\": \"callback_server.py\",\n \"MELTANO_CLOUD_CONFIG_PATH\": str(self.config.config_path),\n },\n cwd=Path(__file__).parent,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n yield\n finally:\n if server:\n server.kill()\n\n async def login(self) -> None:\n \"\"\"Take user through login flow and get auth and id tokens.\"\"\"\n if await self.logged_in():\n return\n with self.callback_server():\n click.echo(\"Logging in to Meltano Cloud.\")\n click.echo(\"You will be directed to a web browser to complete login.\")\n click.echo(\"If a web browser does not open, open the following link:\")\n click.secho(self.login_url, fg=\"green\")\n webbrowser.open_new_tab(self.login_url)\n while not await self.logged_in():\n self.config.refresh()\n await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)\n\n async def logout(self) -> None: # noqa: WPS213\n \"\"\"Log out.\"\"\"\n if not await self.logged_in():\n click.secho(\"Not logged in.\", fg=\"green\")\n return\n with self.callback_server():\n click.echo(\"Logging out of Meltano Cloud.\")\n click.echo(\"You will be directed to a web browser to complete logout.\")\n click.echo(\"If a web browser does not open, open the following link:\")\n click.secho(self.logout_url, fg=\"green\")\n webbrowser.open_new_tab(self.logout_url)\n while await self.logged_in():\n self.config.refresh()\n await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)\n click.secho(\"Successfully logged out.\", fg=\"green\")\n\n def get_auth_header(self) -> dict[str, str]:\n \"\"\"Get the authorization header.\n\n Used for authenticating to cloud API endpoints.\n\n Returns:\n Authorization header using ID token as bearer token.\n\n \"\"\"\n return {\"Authorization\": f\"Bearer {self.config.id_token}\"}\n\n def get_access_token_header(self) -> dict[str, str]:\n \"\"\"Get the access token header.\n\n Used for authenticating to auth endpoints.\n\n Returns:\n Authorization header using access token as bearer token.\n \"\"\"\n return {\"Authorization\": f\"Bearer {self.config.access_token}\"}\n\n async def get_user_info_response(self) -> aiohttp.ClientResponse:\n \"\"\"Get user info.\n\n Returns:\n User info response\n \"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.get(\n urljoin(self.base_url, \"oauth2/userInfo\"),\n headers=self.get_access_token_header(),\n ) as response:\n return response\n\n async def get_user_info_json(self) -> dict:\n \"\"\"Get user info as dict.\n\n Returns:\n User info json\n \"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.get(\n urljoin(self.base_url, \"oauth2/userInfo\"),\n headers=self.get_access_token_header(),\n ) as response:\n return await response.json()\n\n async def logged_in(self) -> bool:\n \"\"\"Check if this instance is currently logged in.\n\n Returns:\n True if logged in, else False\n \"\"\"\n user_info_resp = await self.get_user_info_response()\n return bool(\n self.config.access_token and self.config.id_token and user_info_resp.ok\n )\n", "path": "src/cloud-cli/meltano/cloud/api/auth/auth.py"}], "after_files": [{"content": "\"\"\"Authentication for Meltano Cloud.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport os\nimport subprocess\nimport sys\nimport typing as t\nimport webbrowser\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom urllib.parse import urlencode, urljoin\n\nimport aiohttp\nimport click\n\nfrom meltano.cloud.api.config import MeltanoCloudConfig\n\nif sys.version_info <= (3, 8):\n from cached_property import cached_property\nelse:\n from functools import cached_property\n\nLOGIN_STATUS_CHECK_DELAY_SECONDS = 0.2\n\n\nclass MeltanoCloudAuthError(Exception):\n \"\"\"Raised when an API call returns a 403.\"\"\"\n\n\nclass MeltanoCloudAuth: # noqa: WPS214\n \"\"\"Authentication methods for Meltano Cloud.\"\"\"\n\n def __init__(self, config: MeltanoCloudConfig | None = None):\n \"\"\"Initialize a MeltanoCloudAuth instance.\n\n Args:\n config: the MeltanoCloudConfig to use\n \"\"\"\n self.config = config or MeltanoCloudConfig.find()\n self.base_url = self.config.base_auth_url\n self.client_id = self.config.app_client_id\n\n @cached_property\n def login_url(self) -> str:\n \"\"\"Get the oauth2 authorization URL.\n\n Returns:\n the oauth2 authorization URL.\n \"\"\"\n query_params = urlencode(\n {\n \"client_id\": self.client_id,\n \"response_type\": \"token\",\n \"scope\": \"email openid profile\",\n \"redirect_uri\": f\"http://localhost:{self.config.auth_callback_port}\",\n }\n )\n return f\"{self.base_url}/oauth2/authorize?{query_params}\"\n\n @cached_property\n def logout_url(self) -> str:\n \"\"\"Get the Meltano Cloud logout URL.\n\n Returns:\n the Meltano Cloud logout URL.\n \"\"\"\n params = urlencode(\n {\n \"client_id\": self.client_id,\n \"logout_uri\": f\"http://localhost:{self.config.auth_callback_port}/logout\", # noqa: E501)\n }\n )\n return urljoin(self.base_url, f\"logout?{params}\")\n\n @contextmanager\n def callback_server(self) -> t.Iterator[None]:\n \"\"\"Context manager to run callback server locally.\n\n Yields:\n None\n \"\"\"\n server = None\n try:\n server = subprocess.Popen( # noqa: S607\n (\n str(Path(sys.prefix) / \"bin\" / \"flask\"),\n \"run\",\n f\"--port={self.config.auth_callback_port}\",\n ),\n env={\n **os.environ,\n \"FLASK_APP\": \"callback_server.py\",\n \"MELTANO_CLOUD_CONFIG_PATH\": str(self.config.config_path),\n },\n cwd=Path(__file__).parent,\n stdout=subprocess.DEVNULL,\n stderr=subprocess.STDOUT,\n )\n yield\n finally:\n if server:\n server.kill()\n\n async def login(self) -> None:\n \"\"\"Take user through login flow and get auth and id tokens.\"\"\"\n if await self.logged_in():\n return\n with self.callback_server():\n click.echo(\"Logging in to Meltano Cloud.\")\n click.echo(\"You will be directed to a web browser to complete login.\")\n click.echo(\"If a web browser does not open, open the following link:\")\n click.secho(self.login_url, fg=\"green\")\n webbrowser.open_new_tab(self.login_url)\n while not await self.logged_in():\n self.config.refresh()\n await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)\n\n async def logout(self) -> None: # noqa: WPS213\n \"\"\"Log out.\"\"\"\n if not await self.logged_in():\n click.secho(\"Not logged in.\", fg=\"green\")\n return\n with self.callback_server():\n click.echo(\"Logging out of Meltano Cloud.\")\n click.echo(\"You will be directed to a web browser to complete logout.\")\n click.echo(\"If a web browser does not open, open the following link:\")\n click.secho(self.logout_url, fg=\"green\")\n webbrowser.open_new_tab(self.logout_url)\n while await self.logged_in():\n self.config.refresh()\n await asyncio.sleep(LOGIN_STATUS_CHECK_DELAY_SECONDS)\n click.secho(\"Successfully logged out.\", fg=\"green\")\n\n def get_auth_header(self) -> dict[str, str]:\n \"\"\"Get the authorization header.\n\n Used for authenticating to cloud API endpoints.\n\n Returns:\n Authorization header using ID token as bearer token.\n\n \"\"\"\n return {\"Authorization\": f\"Bearer {self.config.id_token}\"}\n\n def get_access_token_header(self) -> dict[str, str]:\n \"\"\"Get the access token header.\n\n Used for authenticating to auth endpoints.\n\n Returns:\n Authorization header using access token as bearer token.\n \"\"\"\n return {\"Authorization\": f\"Bearer {self.config.access_token}\"}\n\n async def get_user_info_response(self) -> aiohttp.ClientResponse:\n \"\"\"Get user info.\n\n Returns:\n User info response\n \"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.get(\n urljoin(self.base_url, \"oauth2/userInfo\"),\n headers=self.get_access_token_header(),\n ) as response:\n return response\n\n async def get_user_info_json(self) -> dict:\n \"\"\"Get user info as dict.\n\n Returns:\n User info json\n \"\"\"\n async with aiohttp.ClientSession() as session:\n async with session.get(\n urljoin(self.base_url, \"oauth2/userInfo\"),\n headers=self.get_access_token_header(),\n ) as response:\n return await response.json()\n\n async def logged_in(self) -> bool:\n \"\"\"Check if this instance is currently logged in.\n\n Returns:\n True if logged in, else False\n \"\"\"\n user_info_resp = await self.get_user_info_response()\n return bool(\n self.config.access_token and self.config.id_token and user_info_resp.ok\n )\n", "path": "src/cloud-cli/meltano/cloud/api/auth/auth.py"}]}
| 2,211 | 170 |
gh_patches_debug_8064
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-37553
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot edit or delete alerts
### Environment
SaaS (https://sentry.io/)
### Version
_No response_
### Steps to Reproduce
1. Have alerts that were set up a while ago
2. Get a bunch of emails from one alert that is too touchy
3. Try to edit alert (fails)
4. Try to delete alert (fails)
### Expected Result
Can edit or delete alerts that I created on an account that I am the only user for
### Actual Result
Cannot edit or delete alerts


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/incidents/endpoints/bases.py`
Content:
```
1 from rest_framework.exceptions import PermissionDenied
2 from rest_framework.request import Request
3
4 from sentry import features
5 from sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint
6 from sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint
7 from sentry.api.exceptions import ResourceDoesNotExist
8 from sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction
9
10
11 class ProjectAlertRuleEndpoint(ProjectEndpoint):
12 permission_classes = (ProjectAlertRulePermission,)
13
14 def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):
15 args, kwargs = super().convert_args(request, *args, **kwargs)
16 project = kwargs["project"]
17
18 if not features.has("organizations:incidents", project.organization, actor=request.user):
19 raise ResourceDoesNotExist
20
21 if not request.access.has_project_access(project):
22 raise PermissionDenied
23
24 try:
25 kwargs["alert_rule"] = AlertRule.objects.get(
26 snuba_query__subscriptions__project=project, id=alert_rule_id
27 )
28 except AlertRule.DoesNotExist:
29 raise ResourceDoesNotExist
30
31 return args, kwargs
32
33
34 class OrganizationAlertRuleEndpoint(OrganizationEndpoint):
35 permission_classes = (OrganizationAlertRulePermission,)
36
37 def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):
38 args, kwargs = super().convert_args(request, *args, **kwargs)
39 organization = kwargs["organization"]
40
41 if not features.has("organizations:incidents", organization, actor=request.user):
42 raise ResourceDoesNotExist
43
44 try:
45 kwargs["alert_rule"] = AlertRule.objects.get(
46 organization=organization, id=alert_rule_id
47 )
48 except AlertRule.DoesNotExist:
49 raise ResourceDoesNotExist
50
51 return args, kwargs
52
53
54 class OrganizationAlertRuleTriggerEndpoint(OrganizationAlertRuleEndpoint):
55 def convert_args(self, request: Request, alert_rule_trigger_id, *args, **kwargs):
56 args, kwargs = super().convert_args(request, *args, **kwargs)
57 organization = kwargs["organization"]
58 alert_rule = kwargs["alert_rule"]
59
60 if not features.has("organizations:incidents", organization, actor=request.user):
61 raise ResourceDoesNotExist
62
63 try:
64 kwargs["alert_rule_trigger"] = AlertRuleTrigger.objects.get(
65 alert_rule=alert_rule, id=alert_rule_trigger_id
66 )
67 except AlertRuleTrigger.DoesNotExist:
68 raise ResourceDoesNotExist
69
70 return args, kwargs
71
72
73 class OrganizationAlertRuleTriggerActionEndpoint(OrganizationAlertRuleTriggerEndpoint):
74 def convert_args(self, request: Request, alert_rule_trigger_action_id, *args, **kwargs):
75 args, kwargs = super().convert_args(request, *args, **kwargs)
76 organization = kwargs["organization"]
77 trigger = kwargs["alert_rule_trigger"]
78
79 if not features.has("organizations:incidents", organization, actor=request.user):
80 raise ResourceDoesNotExist
81
82 try:
83 kwargs["alert_rule_trigger_action"] = AlertRuleTriggerAction.objects.get(
84 alert_rule_trigger=trigger, id=alert_rule_trigger_action_id
85 )
86 except AlertRuleTriggerAction.DoesNotExist:
87 raise ResourceDoesNotExist
88
89 return args, kwargs
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/incidents/endpoints/bases.py b/src/sentry/incidents/endpoints/bases.py
--- a/src/sentry/incidents/endpoints/bases.py
+++ b/src/sentry/incidents/endpoints/bases.py
@@ -38,7 +38,10 @@
args, kwargs = super().convert_args(request, *args, **kwargs)
organization = kwargs["organization"]
- if not features.has("organizations:incidents", organization, actor=request.user):
+ # Allow orgs that have downgraded plans to delete metric alerts
+ if request.method != "DELETE" and not features.has(
+ "organizations:incidents", organization, actor=request.user
+ ):
raise ResourceDoesNotExist
try:
|
{"golden_diff": "diff --git a/src/sentry/incidents/endpoints/bases.py b/src/sentry/incidents/endpoints/bases.py\n--- a/src/sentry/incidents/endpoints/bases.py\n+++ b/src/sentry/incidents/endpoints/bases.py\n@@ -38,7 +38,10 @@\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n \n- if not features.has(\"organizations:incidents\", organization, actor=request.user):\n+ # Allow orgs that have downgraded plans to delete metric alerts\n+ if request.method != \"DELETE\" and not features.has(\n+ \"organizations:incidents\", organization, actor=request.user\n+ ):\n raise ResourceDoesNotExist\n \n try:\n", "issue": "Cannot edit or delete alerts\n### Environment\n\nSaaS (https://sentry.io/)\n\n### Version\n\n_No response_\n\n### Steps to Reproduce\n\n1. Have alerts that were set up a while ago\r\n2. Get a bunch of emails from one alert that is too touchy\r\n3. Try to edit alert (fails)\r\n4. Try to delete alert (fails)\n\n### Expected Result\n\nCan edit or delete alerts that I created on an account that I am the only user for\n\n### Actual Result\n\nCannot edit or delete alerts\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from rest_framework.exceptions import PermissionDenied\nfrom rest_framework.request import Request\n\nfrom sentry import features\nfrom sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint\nfrom sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction\n\n\nclass ProjectAlertRuleEndpoint(ProjectEndpoint):\n permission_classes = (ProjectAlertRulePermission,)\n\n def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n project = kwargs[\"project\"]\n\n if not features.has(\"organizations:incidents\", project.organization, actor=request.user):\n raise ResourceDoesNotExist\n\n if not request.access.has_project_access(project):\n raise PermissionDenied\n\n try:\n kwargs[\"alert_rule\"] = AlertRule.objects.get(\n snuba_query__subscriptions__project=project, id=alert_rule_id\n )\n except AlertRule.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationAlertRulePermission,)\n\n def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule\"] = AlertRule.objects.get(\n organization=organization, id=alert_rule_id\n )\n except AlertRule.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleTriggerEndpoint(OrganizationAlertRuleEndpoint):\n def convert_args(self, request: Request, alert_rule_trigger_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n alert_rule = kwargs[\"alert_rule\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule_trigger\"] = AlertRuleTrigger.objects.get(\n alert_rule=alert_rule, id=alert_rule_trigger_id\n )\n except AlertRuleTrigger.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleTriggerActionEndpoint(OrganizationAlertRuleTriggerEndpoint):\n def convert_args(self, request: Request, alert_rule_trigger_action_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n trigger = kwargs[\"alert_rule_trigger\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule_trigger_action\"] = AlertRuleTriggerAction.objects.get(\n alert_rule_trigger=trigger, id=alert_rule_trigger_action_id\n )\n except AlertRuleTriggerAction.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n", "path": "src/sentry/incidents/endpoints/bases.py"}], "after_files": [{"content": "from rest_framework.exceptions import PermissionDenied\nfrom rest_framework.request import Request\n\nfrom sentry import features\nfrom sentry.api.bases.organization import OrganizationAlertRulePermission, OrganizationEndpoint\nfrom sentry.api.bases.project import ProjectAlertRulePermission, ProjectEndpoint\nfrom sentry.api.exceptions import ResourceDoesNotExist\nfrom sentry.incidents.models import AlertRule, AlertRuleTrigger, AlertRuleTriggerAction\n\n\nclass ProjectAlertRuleEndpoint(ProjectEndpoint):\n permission_classes = (ProjectAlertRulePermission,)\n\n def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n project = kwargs[\"project\"]\n\n if not features.has(\"organizations:incidents\", project.organization, actor=request.user):\n raise ResourceDoesNotExist\n\n if not request.access.has_project_access(project):\n raise PermissionDenied\n\n try:\n kwargs[\"alert_rule\"] = AlertRule.objects.get(\n snuba_query__subscriptions__project=project, id=alert_rule_id\n )\n except AlertRule.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleEndpoint(OrganizationEndpoint):\n permission_classes = (OrganizationAlertRulePermission,)\n\n def convert_args(self, request: Request, alert_rule_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n\n # Allow orgs that have downgraded plans to delete metric alerts\n if request.method != \"DELETE\" and not features.has(\n \"organizations:incidents\", organization, actor=request.user\n ):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule\"] = AlertRule.objects.get(\n organization=organization, id=alert_rule_id\n )\n except AlertRule.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleTriggerEndpoint(OrganizationAlertRuleEndpoint):\n def convert_args(self, request: Request, alert_rule_trigger_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n alert_rule = kwargs[\"alert_rule\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule_trigger\"] = AlertRuleTrigger.objects.get(\n alert_rule=alert_rule, id=alert_rule_trigger_id\n )\n except AlertRuleTrigger.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n\n\nclass OrganizationAlertRuleTriggerActionEndpoint(OrganizationAlertRuleTriggerEndpoint):\n def convert_args(self, request: Request, alert_rule_trigger_action_id, *args, **kwargs):\n args, kwargs = super().convert_args(request, *args, **kwargs)\n organization = kwargs[\"organization\"]\n trigger = kwargs[\"alert_rule_trigger\"]\n\n if not features.has(\"organizations:incidents\", organization, actor=request.user):\n raise ResourceDoesNotExist\n\n try:\n kwargs[\"alert_rule_trigger_action\"] = AlertRuleTriggerAction.objects.get(\n alert_rule_trigger=trigger, id=alert_rule_trigger_action_id\n )\n except AlertRuleTriggerAction.DoesNotExist:\n raise ResourceDoesNotExist\n\n return args, kwargs\n", "path": "src/sentry/incidents/endpoints/bases.py"}]}
| 1,383 | 164 |
gh_patches_debug_6619
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-2502
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip install web3, errors due to incompatible version of eth-rlp
* Version: 5.28.0
* Python: 3.8
* OS: linux
* `pip freeze` output
```
appdirs==1.4.3
certifi==2019.11.28
chardet==3.0.4
dbus-python==1.2.16
distlib==0.3.0
distro-info===0.23ubuntu1
filelock==3.0.12
idna==2.8
importlib-metadata==1.5.0
more-itertools==4.2.0
netifaces==0.10.4
PyGObject==3.36.0
pymacaroons==0.13.0
PyNaCl==1.3.0
python-apt==2.0.0+ubuntu0.20.4.6
python-debian===0.1.36ubuntu1
PyYAML==5.3.1
requests==2.22.0
requests-unixsocket==0.2.0
six==1.14.0
ubuntu-advantage-tools==27.4
urllib3==1.25.8
virtualenv==20.0.17
zipp==1.0.0
```
### What was wrong?
When trying to install web3.py in a new virtualenv, on a new installation of Ubuntu 20.04, the following error is thrown:
```
virtualenv -p python3.8 venv
source venv/bin/activate
pip install web3
...
ERROR: eth-rlp 0.3.0 has requirement eth-utils<3,>=2.0.0, but you'll have eth-utils 1.10.0 which is incompatible.
...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from setuptools import (
3 find_packages,
4 setup,
5 )
6
7 extras_require = {
8 'tester': [
9 "eth-tester[py-evm]==v0.6.0-beta.6",
10 "py-geth>=3.8.0,<4",
11 ],
12 'linter': [
13 "flake8==3.8.3",
14 "isort>=4.2.15,<4.3.5",
15 "mypy==0.910",
16 "types-setuptools>=57.4.4,<58",
17 "types-requests>=2.26.1,<3",
18 "types-protobuf==3.19.13",
19 ],
20 'docs': [
21 "mock",
22 "sphinx-better-theme>=0.1.4",
23 "click>=5.1",
24 "configparser==3.5.0",
25 "contextlib2>=0.5.4",
26 "py-geth>=3.8.0,<4",
27 "py-solc>=0.4.0",
28 "pytest>=4.4.0,<5.0.0",
29 "sphinx>=3.0,<4",
30 "sphinx_rtd_theme>=0.1.9",
31 "toposort>=1.4",
32 "towncrier==18.5.0",
33 "urllib3",
34 "wheel",
35 "Jinja2<=3.0.3", # Jinja v3.1.0 dropped support for python 3.6
36 ],
37 'dev': [
38 "bumpversion",
39 "flaky>=3.7.0,<4",
40 "hypothesis>=3.31.2,<6",
41 "pytest>=4.4.0,<5.0.0",
42 "pytest-asyncio>=0.10.0,<0.11",
43 "pytest-mock>=1.10,<2",
44 "pytest-pythonpath>=0.3",
45 "pytest-watch>=4.2,<5",
46 "pytest-xdist>=1.29,<2",
47 "setuptools>=38.6.0",
48 "tox>=1.8.0",
49 "tqdm>4.32,<5",
50 "twine>=1.13,<2",
51 "pluggy==0.13.1",
52 "when-changed>=0.3.0,<0.4"
53 ]
54 }
55
56 extras_require['dev'] = (
57 extras_require['tester']
58 + extras_require['linter']
59 + extras_require['docs']
60 + extras_require['dev']
61 )
62
63 with open('./README.md') as readme:
64 long_description = readme.read()
65
66 setup(
67 name='web3',
68 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
69 version='5.29.2',
70 description="""Web3.py""",
71 long_description_content_type='text/markdown',
72 long_description=long_description,
73 author='Piper Merriam',
74 author_email='[email protected]',
75 url='https://github.com/ethereum/web3.py',
76 include_package_data=True,
77 install_requires=[
78 "aiohttp>=3.7.4.post0,<4",
79 "eth-abi>=2.0.0b6,<3.0.0",
80 "eth-account>=0.5.7,<0.6.0",
81 "eth-hash[pycryptodome]>=0.2.0,<1.0.0",
82 "eth-typing>=2.0.0,<3.0.0",
83 "eth-utils>=1.9.5,<2.0.0",
84 "hexbytes>=0.1.0,<1.0.0",
85 "ipfshttpclient==0.8.0a2",
86 "jsonschema>=3.2.0,<5",
87 "lru-dict>=1.1.6,<2.0.0",
88 "protobuf>=3.10.0,<4",
89 "pywin32>=223;platform_system=='Windows'",
90 "requests>=2.16.0,<3.0.0",
91 # remove typing_extensions after python_requires>=3.8, see web3._utils.compat
92 "typing-extensions>=3.7.4.1,<5;python_version<'3.8'",
93 "websockets>=9.1,<10",
94 ],
95 python_requires='>=3.6,<4',
96 extras_require=extras_require,
97 py_modules=['web3', 'ens', 'ethpm'],
98 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]},
99 license="MIT",
100 zip_safe=False,
101 keywords='ethereum',
102 packages=find_packages(exclude=["tests", "tests.*"]),
103 package_data={"web3": ["py.typed"]},
104 classifiers=[
105 'Development Status :: 5 - Production/Stable',
106 'Intended Audience :: Developers',
107 'License :: OSI Approved :: MIT License',
108 'Natural Language :: English',
109 'Programming Language :: Python :: 3',
110 'Programming Language :: Python :: 3.6',
111 'Programming Language :: Python :: 3.7',
112 'Programming Language :: Python :: 3.8',
113 'Programming Language :: Python :: 3.9',
114 ],
115 )
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -79,6 +79,9 @@
"eth-abi>=2.0.0b6,<3.0.0",
"eth-account>=0.5.7,<0.6.0",
"eth-hash[pycryptodome]>=0.2.0,<1.0.0",
+ # eth-account allows too broad of an eth-rlp dependency.
+ # This eth-rlp pin can be removed once it gets tightened up in eth-account
+ "eth-rlp<0.3",
"eth-typing>=2.0.0,<3.0.0",
"eth-utils>=1.9.5,<2.0.0",
"hexbytes>=0.1.0,<1.0.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -79,6 +79,9 @@\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.7,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n+ # eth-account allows too broad of an eth-rlp dependency.\n+ # This eth-rlp pin can be removed once it gets tightened up in eth-account\n+ \"eth-rlp<0.3\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n", "issue": "pip install web3, errors due to incompatible version of eth-rlp\n* Version: 5.28.0\r\n* Python: 3.8\r\n* OS: linux\r\n* `pip freeze` output\r\n\r\n```\r\nappdirs==1.4.3\r\ncertifi==2019.11.28\r\nchardet==3.0.4\r\ndbus-python==1.2.16\r\ndistlib==0.3.0\r\ndistro-info===0.23ubuntu1\r\nfilelock==3.0.12\r\nidna==2.8\r\nimportlib-metadata==1.5.0\r\nmore-itertools==4.2.0\r\nnetifaces==0.10.4\r\nPyGObject==3.36.0\r\npymacaroons==0.13.0\r\nPyNaCl==1.3.0\r\npython-apt==2.0.0+ubuntu0.20.4.6\r\npython-debian===0.1.36ubuntu1\r\nPyYAML==5.3.1\r\nrequests==2.22.0\r\nrequests-unixsocket==0.2.0\r\nsix==1.14.0\r\nubuntu-advantage-tools==27.4\r\nurllib3==1.25.8\r\nvirtualenv==20.0.17\r\nzipp==1.0.0\r\n\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\n\r\nWhen trying to install web3.py in a new virtualenv, on a new installation of Ubuntu 20.04, the following error is thrown:\r\n\r\n```\r\n\r\nvirtualenv -p python3.8 venv\r\nsource venv/bin/activate\r\npip install web3\r\n...\r\nERROR: eth-rlp 0.3.0 has requirement eth-utils<3,>=2.0.0, but you'll have eth-utils 1.10.0 which is incompatible.\r\n...\r\n\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.6\",\n \"py-geth>=3.8.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.910\",\n \"types-setuptools>=57.4.4,<58\",\n \"types-requests>=2.26.1,<3\",\n \"types-protobuf==3.19.13\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.8.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\",\n \"Jinja2<=3.0.3\", # Jinja v3.1.0 dropped support for python 3.6\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.29.2',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.7,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<5\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.6\",\n \"py-geth>=3.8.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.910\",\n \"types-setuptools>=57.4.4,<58\",\n \"types-requests>=2.26.1,<3\",\n \"types-protobuf==3.19.13\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.8.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\",\n \"Jinja2<=3.0.3\", # Jinja v3.1.0 dropped support for python 3.6\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.29.2',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.7,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n # eth-account allows too broad of an eth-rlp dependency.\n # This eth-rlp pin can be removed once it gets tightened up in eth-account\n \"eth-rlp<0.3\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<5\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}]}
| 2,100 | 193 |
gh_patches_debug_15468
|
rasdani/github-patches
|
git_diff
|
codespell-project__codespell-2477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Build] QA warning about codespell_lib.tests being installed as data
While packaging v2.2.0 for Gentoo Linux, I got a QA notice about this:
```
* QA Notice: setuptools warnings detected:
*
* Installing 'codespell_lib.tests' as data is deprecated, please list it in `packages`.
```
The actual setuptools warning is as (here shown for Python 3.11, but same for 3.10)
```
/usr/lib/python3.11/site-packages/setuptools/command/build_py.py:202: SetuptoolsDeprecationWarning: Instal
ling 'codespell_lib.tests' as data is deprecated, please list it in `packages`.
!!
############################
# Package would be ignored #
############################
Python recognizes 'codespell_lib.tests' as an importable package,
but it is not listed in the `packages` configuration of setuptools.
'codespell_lib.tests' has been automatically added to the distribution only
because it may contain data files, but this behavior is likely to change
in future versions of setuptools (and therefore is considered deprecated).
Please make sure that 'codespell_lib.tests' is included as a package by using
the `packages` configuration field or the proper discovery methods
(for example by using `find_namespace_packages(...)`/`find_namespace:`
instead of `find_packages(...)`/`find:`).
You can read more about "package discovery" and "data files" on setuptools
documentation page.
!!
check.warn(importable)
```
Find attached the full build log.
[codespell-2.2.0:20220818-083735.log](https://github.com/codespell-project/codespell/files/9371941/codespell-2.2.0.20220818-083735.log)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 # adapted from mne-python
4
5 import os
6
7 from setuptools import setup
8
9 from codespell_lib import __version__
10
11 DISTNAME = 'codespell'
12 DESCRIPTION = """Codespell"""
13 MAINTAINER = 'Lucas De Marchi'
14 MAINTAINER_EMAIL = '[email protected]'
15 URL = 'https://github.com/codespell-project/codespell/'
16 LICENSE = 'GPL v2'
17 DOWNLOAD_URL = 'https://github.com/codespell-project/codespell/'
18 with open('README.rst', 'r') as f:
19 LONG_DESCRIPTION = f.read()
20
21 if __name__ == "__main__":
22 if os.path.exists('MANIFEST'):
23 os.remove('MANIFEST')
24
25 setup(name=DISTNAME,
26 maintainer=MAINTAINER,
27 include_package_data=True,
28 maintainer_email=MAINTAINER_EMAIL,
29 description=DESCRIPTION,
30 license=LICENSE,
31 url=URL,
32 version=__version__,
33 download_url=DOWNLOAD_URL,
34 long_description=LONG_DESCRIPTION,
35 long_description_content_type='text/x-rst',
36 zip_safe=False,
37 classifiers=['Intended Audience :: Developers',
38 'License :: OSI Approved',
39 'Programming Language :: Python',
40 'Topic :: Software Development',
41 'Operating System :: Microsoft :: Windows',
42 'Operating System :: POSIX',
43 'Operating System :: Unix',
44 'Operating System :: MacOS'],
45 platforms='any',
46 python_requires='>=3.6',
47 packages=[
48 'codespell_lib',
49 'codespell_lib.data',
50 ],
51 package_data={'codespell_lib': [
52 os.path.join('data', 'dictionary*.txt'),
53 os.path.join('data', 'linux-kernel.exclude'),
54 ]},
55 exclude_package_data={'codespell_lib': [
56 os.path.join('tests', '*'),
57 ]},
58 entry_points={
59 'console_scripts': [
60 'codespell = codespell_lib:_script_main'
61 ],
62 },
63 extras_require={
64 "dev": ["check-manifest", "flake8", "pytest", "pytest-cov",
65 "pytest-dependency"],
66 "hard-encoding-detection": ["chardet"],
67 }
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,15 +46,13 @@
python_requires='>=3.6',
packages=[
'codespell_lib',
+ 'codespell_lib.tests',
'codespell_lib.data',
],
package_data={'codespell_lib': [
os.path.join('data', 'dictionary*.txt'),
os.path.join('data', 'linux-kernel.exclude'),
]},
- exclude_package_data={'codespell_lib': [
- os.path.join('tests', '*'),
- ]},
entry_points={
'console_scripts': [
'codespell = codespell_lib:_script_main'
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,15 +46,13 @@\n python_requires='>=3.6',\n packages=[\n 'codespell_lib',\n+ 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n os.path.join('data', 'dictionary*.txt'),\n os.path.join('data', 'linux-kernel.exclude'),\n ]},\n- exclude_package_data={'codespell_lib': [\n- os.path.join('tests', '*'),\n- ]},\n entry_points={\n 'console_scripts': [\n 'codespell = codespell_lib:_script_main'\n", "issue": "[Build] QA warning about codespell_lib.tests being installed as data\nWhile packaging v2.2.0 for Gentoo Linux, I got a QA notice about this:\r\n\r\n```\r\n* QA Notice: setuptools warnings detected:\r\n * \r\n * Installing 'codespell_lib.tests' as data is deprecated, please list it in `packages`.\r\n```\r\n\r\nThe actual setuptools warning is as (here shown for Python 3.11, but same for 3.10)\r\n\r\n```\r\n/usr/lib/python3.11/site-packages/setuptools/command/build_py.py:202: SetuptoolsDeprecationWarning: Instal\r\nling 'codespell_lib.tests' as data is deprecated, please list it in `packages`.\r\n !!\r\n\r\n\r\n ############################\r\n # Package would be ignored #\r\n ############################\r\n Python recognizes 'codespell_lib.tests' as an importable package,\r\n but it is not listed in the `packages` configuration of setuptools.\r\n\r\n 'codespell_lib.tests' has been automatically added to the distribution only\r\n because it may contain data files, but this behavior is likely to change\r\n in future versions of setuptools (and therefore is considered deprecated).\r\n\r\n Please make sure that 'codespell_lib.tests' is included as a package by using\r\n the `packages` configuration field or the proper discovery methods\r\n (for example by using `find_namespace_packages(...)`/`find_namespace:`\r\n instead of `find_packages(...)`/`find:`).\r\n\r\n You can read more about \"package discovery\" and \"data files\" on setuptools\r\n documentation page.\r\n\r\n\r\n!!\r\n\r\n check.warn(importable)\r\n```\r\n\r\nFind attached the full build log.\r\n[codespell-2.2.0:20220818-083735.log](https://github.com/codespell-project/codespell/files/9371941/codespell-2.2.0.20220818-083735.log)\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\n\nfrom setuptools import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/codespell-project/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/codespell-project/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/x-rst',\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n python_requires='>=3.6',\n packages=[\n 'codespell_lib',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n os.path.join('data', 'dictionary*.txt'),\n os.path.join('data', 'linux-kernel.exclude'),\n ]},\n exclude_package_data={'codespell_lib': [\n os.path.join('tests', '*'),\n ]},\n entry_points={\n 'console_scripts': [\n 'codespell = codespell_lib:_script_main'\n ],\n },\n extras_require={\n \"dev\": [\"check-manifest\", \"flake8\", \"pytest\", \"pytest-cov\",\n \"pytest-dependency\"],\n \"hard-encoding-detection\": [\"chardet\"],\n }\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\n\nfrom setuptools import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/codespell-project/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/codespell-project/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/x-rst',\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n python_requires='>=3.6',\n packages=[\n 'codespell_lib',\n 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n os.path.join('data', 'dictionary*.txt'),\n os.path.join('data', 'linux-kernel.exclude'),\n ]},\n entry_points={\n 'console_scripts': [\n 'codespell = codespell_lib:_script_main'\n ],\n },\n extras_require={\n \"dev\": [\"check-manifest\", \"flake8\", \"pytest\", \"pytest-cov\",\n \"pytest-dependency\"],\n \"hard-encoding-detection\": [\"chardet\"],\n }\n )\n", "path": "setup.py"}]}
| 1,275 | 153 |
gh_patches_debug_15763
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-499
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation of oauth2client dependency
According to the readme google/oauth2client is deprecated. They suggest switching to [google-auth](https://google-auth.readthedocs.io/) or [oauthlib](http://oauthlib.readthedocs.io/).
This probably means that this package and all of the docs should also be refactored into using these new packages?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 7):
25 print('google-api-python-client requires python version >= 2.7.',
26 file=sys.stderr)
27 sys.exit(1)
28 if (3, 1) <= sys.version_info < (3, 3):
29 print('google-api-python-client requires python3 version >= 3.3.',
30 file=sys.stderr)
31 sys.exit(1)
32
33 from setuptools import setup
34 import pkg_resources
35
36 def _DetectBadness():
37 import os
38 if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:
39 return
40 o2c_pkg = None
41 try:
42 o2c_pkg = pkg_resources.get_distribution('oauth2client')
43 except pkg_resources.DistributionNotFound:
44 pass
45 oauth2client = None
46 try:
47 import oauth2client
48 except ImportError:
49 pass
50 if o2c_pkg is None and oauth2client is not None:
51 raise RuntimeError(
52 'Previous version of google-api-python-client detected; due to a '
53 'packaging issue, we cannot perform an in-place upgrade. Please remove '
54 'the old version and re-install this package.'
55 )
56
57 _DetectBadness()
58
59 packages = [
60 'apiclient',
61 'googleapiclient',
62 'googleapiclient/discovery_cache',
63 ]
64
65 install_requires = [
66 'httplib2>=0.9.2,<1dev',
67 'oauth2client>=1.5.0,<5.0.0dev',
68 'six>=1.6.1,<2dev',
69 'uritemplate>=3.0.0,<4dev',
70 ]
71
72 long_desc = """The Google API Client for Python is a client library for
73 accessing the Plus, Moderator, and many other Google APIs."""
74
75 import googleapiclient
76 version = googleapiclient.__version__
77
78 setup(
79 name="google-api-python-client",
80 version=version,
81 description="Google API Client Library for Python",
82 long_description=long_desc,
83 author="Google Inc.",
84 url="http://github.com/google/google-api-python-client/",
85 install_requires=install_requires,
86 packages=packages,
87 package_data={},
88 license="Apache 2.0",
89 keywords="google api client",
90 classifiers=[
91 'Programming Language :: Python :: 2',
92 'Programming Language :: Python :: 2.7',
93 'Programming Language :: Python :: 3',
94 'Programming Language :: Python :: 3.3',
95 'Programming Language :: Python :: 3.4',
96 'Programming Language :: Python :: 3.5',
97 'Programming Language :: Python :: 3.6',
98 'Development Status :: 5 - Production/Stable',
99 'Intended Audience :: Developers',
100 'License :: OSI Approved :: Apache Software License',
101 'Operating System :: OS Independent',
102 'Topic :: Internet :: WWW/HTTP',
103 ],
104 )
105
```
Path: `googleapiclient/discovery_cache/file_cache.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """File based cache for the discovery document.
16
17 The cache is stored in a single file so that multiple processes can
18 share the same cache. It locks the file whenever accesing to the
19 file. When the cache content is corrupted, it will be initialized with
20 an empty cache.
21 """
22
23 from __future__ import division
24
25 import datetime
26 import json
27 import logging
28 import os
29 import tempfile
30 import threading
31
32 try:
33 from oauth2client.contrib.locked_file import LockedFile
34 except ImportError:
35 # oauth2client < 2.0.0
36 try:
37 from oauth2client.locked_file import LockedFile
38 except ImportError:
39 # oauth2client > 4.0.0
40 raise ImportError(
41 'file_cache is unavailable when using oauth2client >= 4.0.0')
42
43 from . import base
44 from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
45
46 LOGGER = logging.getLogger(__name__)
47
48 FILENAME = 'google-api-python-client-discovery-doc.cache'
49 EPOCH = datetime.datetime.utcfromtimestamp(0)
50
51
52 def _to_timestamp(date):
53 try:
54 return (date - EPOCH).total_seconds()
55 except AttributeError:
56 # The following is the equivalent of total_seconds() in Python2.6.
57 # See also: https://docs.python.org/2/library/datetime.html
58 delta = date - EPOCH
59 return ((delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
60 * 10**6) / 10**6)
61
62
63 def _read_or_initialize_cache(f):
64 f.file_handle().seek(0)
65 try:
66 cache = json.load(f.file_handle())
67 except Exception:
68 # This means it opens the file for the first time, or the cache is
69 # corrupted, so initializing the file with an empty dict.
70 cache = {}
71 f.file_handle().truncate(0)
72 f.file_handle().seek(0)
73 json.dump(cache, f.file_handle())
74 return cache
75
76
77 class Cache(base.Cache):
78 """A file based cache for the discovery documents."""
79
80 def __init__(self, max_age):
81 """Constructor.
82
83 Args:
84 max_age: Cache expiration in seconds.
85 """
86 self._max_age = max_age
87 self._file = os.path.join(tempfile.gettempdir(), FILENAME)
88 f = LockedFile(self._file, 'a+', 'r')
89 try:
90 f.open_and_lock()
91 if f.is_locked():
92 _read_or_initialize_cache(f)
93 # If we can not obtain the lock, other process or thread must
94 # have initialized the file.
95 except Exception as e:
96 LOGGER.warning(e, exc_info=True)
97 finally:
98 f.unlock_and_close()
99
100 def get(self, url):
101 f = LockedFile(self._file, 'r+', 'r')
102 try:
103 f.open_and_lock()
104 if f.is_locked():
105 cache = _read_or_initialize_cache(f)
106 if url in cache:
107 content, t = cache.get(url, (None, 0))
108 if _to_timestamp(datetime.datetime.now()) < t + self._max_age:
109 return content
110 return None
111 else:
112 LOGGER.debug('Could not obtain a lock for the cache file.')
113 return None
114 except Exception as e:
115 LOGGER.warning(e, exc_info=True)
116 finally:
117 f.unlock_and_close()
118
119 def set(self, url, content):
120 f = LockedFile(self._file, 'r+', 'r')
121 try:
122 f.open_and_lock()
123 if f.is_locked():
124 cache = _read_or_initialize_cache(f)
125 cache[url] = (content, _to_timestamp(datetime.datetime.now()))
126 # Remove stale cache.
127 for k, (_, timestamp) in list(cache.items()):
128 if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:
129 del cache[k]
130 f.file_handle().truncate(0)
131 f.file_handle().seek(0)
132 json.dump(cache, f.file_handle())
133 else:
134 LOGGER.debug('Could not obtain a lock for the cache file.')
135 except Exception as e:
136 LOGGER.warning(e, exc_info=True)
137 finally:
138 f.unlock_and_close()
139
140
141 cache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/googleapiclient/discovery_cache/file_cache.py b/googleapiclient/discovery_cache/file_cache.py
--- a/googleapiclient/discovery_cache/file_cache.py
+++ b/googleapiclient/discovery_cache/file_cache.py
@@ -36,9 +36,9 @@
try:
from oauth2client.locked_file import LockedFile
except ImportError:
- # oauth2client > 4.0.0
+ # oauth2client > 4.0.0 or google-auth
raise ImportError(
- 'file_cache is unavailable when using oauth2client >= 4.0.0')
+ 'file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth')
from . import base
from ..discovery_cache import DISCOVERY_DOC_MAX_AGE
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -64,7 +64,8 @@
install_requires = [
'httplib2>=0.9.2,<1dev',
- 'oauth2client>=1.5.0,<5.0.0dev',
+ 'google-auth>=1.4.1',
+ 'google-auth-httplib2>=0.0.3',
'six>=1.6.1,<2dev',
'uritemplate>=3.0.0,<4dev',
]
|
{"golden_diff": "diff --git a/googleapiclient/discovery_cache/file_cache.py b/googleapiclient/discovery_cache/file_cache.py\n--- a/googleapiclient/discovery_cache/file_cache.py\n+++ b/googleapiclient/discovery_cache/file_cache.py\n@@ -36,9 +36,9 @@\n try:\n from oauth2client.locked_file import LockedFile\n except ImportError:\n- # oauth2client > 4.0.0\n+ # oauth2client > 4.0.0 or google-auth\n raise ImportError(\n- 'file_cache is unavailable when using oauth2client >= 4.0.0')\n+ 'file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth')\n \n from . import base\n from ..discovery_cache import DISCOVERY_DOC_MAX_AGE\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,7 +64,8 @@\n \n install_requires = [\n 'httplib2>=0.9.2,<1dev',\n- 'oauth2client>=1.5.0,<5.0.0dev',\n+ 'google-auth>=1.4.1',\n+ 'google-auth-httplib2>=0.0.3',\n 'six>=1.6.1,<2dev',\n 'uritemplate>=3.0.0,<4dev',\n ]\n", "issue": "Deprecation of oauth2client dependency\nAccording to the readme google/oauth2client is deprecated. They suggest switching to [google-auth](https://google-auth.readthedocs.io/) or [oauthlib](http://oauthlib.readthedocs.io/). \r\n\r\nThis probably means that this package and all of the docs should also be refactored into using these new packages?\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print('google-api-python-client requires python version >= 2.7.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.9.2,<1dev',\n 'oauth2client>=1.5.0,<5.0.0dev',\n 'six>=1.6.1,<2dev',\n 'uritemplate>=3.0.0,<4dev',\n]\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}, {"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"File based cache for the discovery document.\n\nThe cache is stored in a single file so that multiple processes can\nshare the same cache. It locks the file whenever accesing to the\nfile. When the cache content is corrupted, it will be initialized with\nan empty cache.\n\"\"\"\n\nfrom __future__ import division\n\nimport datetime\nimport json\nimport logging\nimport os\nimport tempfile\nimport threading\n\ntry:\n from oauth2client.contrib.locked_file import LockedFile\nexcept ImportError:\n # oauth2client < 2.0.0\n try:\n from oauth2client.locked_file import LockedFile\n except ImportError:\n # oauth2client > 4.0.0\n raise ImportError(\n 'file_cache is unavailable when using oauth2client >= 4.0.0')\n\nfrom . import base\nfrom ..discovery_cache import DISCOVERY_DOC_MAX_AGE\n\nLOGGER = logging.getLogger(__name__)\n\nFILENAME = 'google-api-python-client-discovery-doc.cache'\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\n\ndef _to_timestamp(date):\n try:\n return (date - EPOCH).total_seconds()\n except AttributeError:\n # The following is the equivalent of total_seconds() in Python2.6.\n # See also: https://docs.python.org/2/library/datetime.html\n delta = date - EPOCH\n return ((delta.microseconds + (delta.seconds + delta.days * 24 * 3600)\n * 10**6) / 10**6)\n\n\ndef _read_or_initialize_cache(f):\n f.file_handle().seek(0)\n try:\n cache = json.load(f.file_handle())\n except Exception:\n # This means it opens the file for the first time, or the cache is\n # corrupted, so initializing the file with an empty dict.\n cache = {}\n f.file_handle().truncate(0)\n f.file_handle().seek(0)\n json.dump(cache, f.file_handle())\n return cache\n\n\nclass Cache(base.Cache):\n \"\"\"A file based cache for the discovery documents.\"\"\"\n\n def __init__(self, max_age):\n \"\"\"Constructor.\n\n Args:\n max_age: Cache expiration in seconds.\n \"\"\"\n self._max_age = max_age\n self._file = os.path.join(tempfile.gettempdir(), FILENAME)\n f = LockedFile(self._file, 'a+', 'r')\n try:\n f.open_and_lock()\n if f.is_locked():\n _read_or_initialize_cache(f)\n # If we can not obtain the lock, other process or thread must\n # have initialized the file.\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n finally:\n f.unlock_and_close()\n\n def get(self, url):\n f = LockedFile(self._file, 'r+', 'r')\n try:\n f.open_and_lock()\n if f.is_locked():\n cache = _read_or_initialize_cache(f)\n if url in cache:\n content, t = cache.get(url, (None, 0))\n if _to_timestamp(datetime.datetime.now()) < t + self._max_age:\n return content\n return None\n else:\n LOGGER.debug('Could not obtain a lock for the cache file.')\n return None\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n finally:\n f.unlock_and_close()\n\n def set(self, url, content):\n f = LockedFile(self._file, 'r+', 'r')\n try:\n f.open_and_lock()\n if f.is_locked():\n cache = _read_or_initialize_cache(f)\n cache[url] = (content, _to_timestamp(datetime.datetime.now()))\n # Remove stale cache.\n for k, (_, timestamp) in list(cache.items()):\n if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:\n del cache[k]\n f.file_handle().truncate(0)\n f.file_handle().seek(0)\n json.dump(cache, f.file_handle())\n else:\n LOGGER.debug('Could not obtain a lock for the cache file.')\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n finally:\n f.unlock_and_close()\n\n\ncache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)\n", "path": "googleapiclient/discovery_cache/file_cache.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print('google-api-python-client requires python version >= 2.7.',\n file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 3):\n print('google-api-python-client requires python3 version >= 3.3.',\n file=sys.stderr)\n sys.exit(1)\n\nfrom setuptools import setup\nimport pkg_resources\n\ndef _DetectBadness():\n import os\n if 'SKIP_GOOGLEAPICLIENT_COMPAT_CHECK' in os.environ:\n return\n o2c_pkg = None\n try:\n o2c_pkg = pkg_resources.get_distribution('oauth2client')\n except pkg_resources.DistributionNotFound:\n pass\n oauth2client = None\n try:\n import oauth2client\n except ImportError:\n pass\n if o2c_pkg is None and oauth2client is not None:\n raise RuntimeError(\n 'Previous version of google-api-python-client detected; due to a '\n 'packaging issue, we cannot perform an in-place upgrade. Please remove '\n 'the old version and re-install this package.'\n )\n\n_DetectBadness()\n\npackages = [\n 'apiclient',\n 'googleapiclient',\n 'googleapiclient/discovery_cache',\n]\n\ninstall_requires = [\n 'httplib2>=0.9.2,<1dev',\n 'google-auth>=1.4.1',\n 'google-auth-httplib2>=0.0.3',\n 'six>=1.6.1,<2dev',\n 'uritemplate>=3.0.0,<4dev',\n]\n\nlong_desc = \"\"\"The Google API Client for Python is a client library for\naccessing the Plus, Moderator, and many other Google APIs.\"\"\"\n\nimport googleapiclient\nversion = googleapiclient.__version__\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=long_desc,\n author=\"Google Inc.\",\n url=\"http://github.com/google/google-api-python-client/\",\n install_requires=install_requires,\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n ],\n)\n", "path": "setup.py"}, {"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"File based cache for the discovery document.\n\nThe cache is stored in a single file so that multiple processes can\nshare the same cache. It locks the file whenever accesing to the\nfile. When the cache content is corrupted, it will be initialized with\nan empty cache.\n\"\"\"\n\nfrom __future__ import division\n\nimport datetime\nimport json\nimport logging\nimport os\nimport tempfile\nimport threading\n\ntry:\n from oauth2client.contrib.locked_file import LockedFile\nexcept ImportError:\n # oauth2client < 2.0.0\n try:\n from oauth2client.locked_file import LockedFile\n except ImportError:\n # oauth2client > 4.0.0 or google-auth\n raise ImportError(\n 'file_cache is unavailable when using oauth2client >= 4.0.0 or google-auth')\n\nfrom . import base\nfrom ..discovery_cache import DISCOVERY_DOC_MAX_AGE\n\nLOGGER = logging.getLogger(__name__)\n\nFILENAME = 'google-api-python-client-discovery-doc.cache'\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\n\ndef _to_timestamp(date):\n try:\n return (date - EPOCH).total_seconds()\n except AttributeError:\n # The following is the equivalent of total_seconds() in Python2.6.\n # See also: https://docs.python.org/2/library/datetime.html\n delta = date - EPOCH\n return ((delta.microseconds + (delta.seconds + delta.days * 24 * 3600)\n * 10**6) / 10**6)\n\n\ndef _read_or_initialize_cache(f):\n f.file_handle().seek(0)\n try:\n cache = json.load(f.file_handle())\n except Exception:\n # This means it opens the file for the first time, or the cache is\n # corrupted, so initializing the file with an empty dict.\n cache = {}\n f.file_handle().truncate(0)\n f.file_handle().seek(0)\n json.dump(cache, f.file_handle())\n return cache\n\n\nclass Cache(base.Cache):\n \"\"\"A file based cache for the discovery documents.\"\"\"\n\n def __init__(self, max_age):\n \"\"\"Constructor.\n\n Args:\n max_age: Cache expiration in seconds.\n \"\"\"\n self._max_age = max_age\n self._file = os.path.join(tempfile.gettempdir(), FILENAME)\n f = LockedFile(self._file, 'a+', 'r')\n try:\n f.open_and_lock()\n if f.is_locked():\n _read_or_initialize_cache(f)\n # If we can not obtain the lock, other process or thread must\n # have initialized the file.\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n finally:\n f.unlock_and_close()\n\n def get(self, url):\n f = LockedFile(self._file, 'r+', 'r')\n try:\n f.open_and_lock()\n if f.is_locked():\n cache = _read_or_initialize_cache(f)\n if url in cache:\n content, t = cache.get(url, (None, 0))\n if _to_timestamp(datetime.datetime.now()) < t + self._max_age:\n return content\n return None\n else:\n LOGGER.debug('Could not obtain a lock for the cache file.')\n return None\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n finally:\n f.unlock_and_close()\n\n def set(self, url, content):\n f = LockedFile(self._file, 'r+', 'r')\n try:\n f.open_and_lock()\n if f.is_locked():\n cache = _read_or_initialize_cache(f)\n cache[url] = (content, _to_timestamp(datetime.datetime.now()))\n # Remove stale cache.\n for k, (_, timestamp) in list(cache.items()):\n if _to_timestamp(datetime.datetime.now()) >= timestamp + self._max_age:\n del cache[k]\n f.file_handle().truncate(0)\n f.file_handle().seek(0)\n json.dump(cache, f.file_handle())\n else:\n LOGGER.debug('Could not obtain a lock for the cache file.')\n except Exception as e:\n LOGGER.warning(e, exc_info=True)\n finally:\n f.unlock_and_close()\n\n\ncache = Cache(max_age=DISCOVERY_DOC_MAX_AGE)\n", "path": "googleapiclient/discovery_cache/file_cache.py"}]}
| 2,726 | 309 |
gh_patches_debug_31815
|
rasdani/github-patches
|
git_diff
|
pytorch__audio-1385
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linting doc config
Currently, `doc/source/conf.py` is not checked with `flake8` in our CI and it contains errors. We need to fix this and make CI check it.
```
$ flake8 docs/source/conf.py
docs/source/conf.py:23:1: F401 'torch' imported but unused
docs/source/conf.py:24:1: F401 'torchaudio' imported but unused
docs/source/conf.py:138:1: E302 expected 2 blank lines, found 1
docs/source/conf.py:229:5: F821 undefined name 'List'
docs/source/conf.py:229:5: F821 undefined name 'unicode'
docs/source/conf.py:229:5: F821 undefined name 'Tuple'
```
## Steps
1. Fix the above error.
2. Add `docs/source/conf.py` [here](https://github.com/pytorch/audio/blob/ea857940de9e3738166989ad3bf1726741a13f04/.circleci/unittest/linux/scripts/run_style_checks.sh#L32)
## Build and testing
For setting up dev env, please refer to [CONTRIBUTING.md](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # PyTorch documentation build configuration file, created by
5 # sphinx-quickstart on Fri Dec 23 13:31:47 2016.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23 import torch
24 import torchaudio
25 import pytorch_sphinx_theme
26
27 # -- General configuration ------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #
31 needs_sphinx = '1.6'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 'sphinx.ext.autodoc',
38 'sphinx.ext.autosummary',
39 'sphinx.ext.doctest',
40 'sphinx.ext.intersphinx',
41 'sphinx.ext.todo',
42 'sphinx.ext.coverage',
43 'sphinx.ext.napoleon',
44 'sphinx.ext.viewcode',
45 'sphinxcontrib.katex',
46 ]
47
48 # katex options
49 #
50 #
51
52 katex_options = r'''
53 delimiters : [
54 {left: "$$", right: "$$", display: true},
55 {left: "\\(", right: "\\)", display: false},
56 {left: "\\[", right: "\\]", display: true}
57 ]
58 '''
59
60 napoleon_use_ivar = True
61 napoleon_numpy_docstring = False
62 napoleon_google_docstring = True
63
64 # Add any paths that contain templates here, relative to this directory.
65 templates_path = ['_templates']
66
67 # The suffix(es) of source filenames.
68 # You can specify multiple suffix as a list of string:
69 #
70 # source_suffix = ['.rst', '.md']
71 source_suffix = '.rst'
72
73 # The master toctree document.
74 master_doc = 'index'
75
76 # General information about the project.
77 project = 'Torchaudio'
78 copyright = '2018, Torchaudio Contributors'
79 author = 'Torchaudio Contributors'
80
81 # The version info for the project you're documenting, acts as replacement for
82 # |version| and |release|, also used in various other places throughout the
83 # built documents.
84 #
85 # The short X.Y version.
86 # TODO: change to [:2] at v1.0
87 version = 'master '
88 # The full version, including alpha/beta/rc tags.
89 # TODO: verify this works as expected
90 release = 'master'
91
92 # The language for content autogenerated by Sphinx. Refer to documentation
93 # for a list of supported languages.
94 #
95 # This is also used if you do content translation via gettext catalogs.
96 # Usually you set "language" from the command line for these cases.
97 language = None
98
99 # List of patterns, relative to source directory, that match files and
100 # directories to ignore when looking for source files.
101 # This patterns also effect to html_static_path and html_extra_path
102 exclude_patterns = []
103
104 # The name of the Pygments (syntax highlighting) style to use.
105 pygments_style = 'sphinx'
106
107 # If true, `todo` and `todoList` produce output, else they produce nothing.
108 todo_include_todos = True
109
110
111 # -- Options for HTML output ----------------------------------------------
112
113 # The theme to use for HTML and HTML Help pages. See the documentation for
114 # a list of builtin themes.
115 #
116 html_theme = 'pytorch_sphinx_theme'
117 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
118
119 # Theme options are theme-specific and customize the look and feel of a theme
120 # further. For a list of options available for each theme, see the
121 # documentation.
122 #
123 html_theme_options = {
124 'pytorch_project': 'audio',
125 'collapse_navigation': False,
126 'display_version': True,
127 'logo_only': True,
128 'navigation_with_keys': True
129 }
130
131 html_logo = '_static/img/pytorch-logo-dark.svg'
132
133 # Add any paths that contain custom static files (such as style sheets) here,
134 # relative to this directory. They are copied after the builtin static files,
135 # so a file named "default.css" will overwrite the builtin "default.css".
136 html_static_path = ['_static']
137
138 def setup(app):
139 # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
140 # and can be moved outside of this function (and the setup(app) function
141 # can be deleted).
142 html_css_files = [
143 'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'
144 ]
145
146 # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
147 # `add_stylesheet` (deprecated in 1.8).
148 add_css = getattr(app, 'add_css_file', getattr(app, 'add_stylesheet'))
149 for css_file in html_css_files:
150 add_css(css_file)
151
152
153 # -- Options for HTMLHelp output ------------------------------------------
154
155 # Output file base name for HTML help builder.
156 htmlhelp_basename = 'TorchAudiodoc'
157
158
159 # -- Options for LaTeX output ---------------------------------------------
160
161 latex_elements = {
162 # The paper size ('letterpaper' or 'a4paper').
163 #
164 # 'papersize': 'letterpaper',
165
166 # The font size ('10pt', '11pt' or '12pt').
167 #
168 # 'pointsize': '10pt',
169
170 # Additional stuff for the LaTeX preamble.
171 #
172 # 'preamble': '',
173
174 # Latex figure (float) alignment
175 #
176 # 'figure_align': 'htbp',
177 }
178
179 # Grouping the document tree into LaTeX files. List of tuples
180 # (source start file, target name, title,
181 # author, documentclass [howto, manual, or own class]).
182 latex_documents = [
183 (master_doc, 'pytorch.tex', 'Torchaudio Documentation',
184 'Torch Contributors', 'manual'),
185 ]
186
187
188 # -- Options for manual page output ---------------------------------------
189
190 # One entry per manual page. List of tuples
191 # (source start file, name, description, authors, manual section).
192 man_pages = [
193 (master_doc, 'Torchaudio', 'Torchaudio Documentation',
194 [author], 1)
195 ]
196
197
198 # -- Options for Texinfo output -------------------------------------------
199
200 # Grouping the document tree into Texinfo files. List of tuples
201 # (source start file, target name, title, author,
202 # dir menu entry, description, category)
203 texinfo_documents = [
204 (master_doc, 'Torchaudio', 'Torchaudio Documentation',
205 author, 'Torchaudio', 'Load audio files into pytorch tensors.',
206 'Miscellaneous'),
207 ]
208
209
210 # Example configuration for intersphinx: refer to the Python standard library.
211 intersphinx_mapping = {
212 'python': ('https://docs.python.org/', None),
213 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
214 'torch': ('https://pytorch.org/docs/stable/', None),
215 }
216
217 # -- A patch that prevents Sphinx from cross-referencing ivar tags -------
218 # See http://stackoverflow.com/a/41184353/3343043
219
220 from docutils import nodes
221 from sphinx.util.docfields import TypedField
222 from sphinx import addnodes
223
224
225 def patched_make_field(self, types, domain, items, **kw):
226 # `kw` catches `env=None` needed for newer sphinx while maintaining
227 # backwards compatibility when passed along further down!
228
229 # type: (List, unicode, Tuple) -> nodes.field
230 def handle_item(fieldarg, content):
231 par = nodes.paragraph()
232 par += addnodes.literal_strong('', fieldarg) # Patch: this line added
233 # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
234 # addnodes.literal_strong))
235 if fieldarg in types:
236 par += nodes.Text(' (')
237 # NOTE: using .pop() here to prevent a single type node to be
238 # inserted twice into the doctree, which leads to
239 # inconsistencies later when references are resolved
240 fieldtype = types.pop(fieldarg)
241 if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
242 typename = u''.join(n.astext() for n in fieldtype)
243 typename = typename.replace('int', 'python:int')
244 typename = typename.replace('long', 'python:long')
245 typename = typename.replace('float', 'python:float')
246 typename = typename.replace('type', 'python:type')
247 par.extend(self.make_xrefs(self.typerolename, domain, typename,
248 addnodes.literal_emphasis, **kw))
249 else:
250 par += fieldtype
251 par += nodes.Text(')')
252 par += nodes.Text(' -- ')
253 par += content
254 return par
255
256 fieldname = nodes.field_name('', self.label)
257 if len(items) == 1 and self.can_collapse:
258 fieldarg, content = items[0]
259 bodynode = handle_item(fieldarg, content)
260 else:
261 bodynode = self.list_type()
262 for fieldarg, content in items:
263 bodynode += nodes.list_item('', handle_item(fieldarg, content))
264 fieldbody = nodes.field_body('', bodynode)
265 return nodes.field('', fieldname, fieldbody)
266
267 TypedField.make_field = patched_make_field
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -20,8 +20,6 @@
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
-import torch
-import torchaudio
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
@@ -135,6 +133,7 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
+
def setup(app):
# NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value
# and can be moved outside of this function (and the setup(app) function
@@ -145,7 +144,7 @@
# In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is
# `add_stylesheet` (deprecated in 1.8).
- add_css = getattr(app, 'add_css_file', getattr(app, 'add_stylesheet'))
+ add_css = getattr(app, 'add_css_file', app.add_stylesheet)
for css_file in html_css_files:
add_css(css_file)
@@ -226,7 +225,7 @@
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
- # type: (List, unicode, Tuple) -> nodes.field
+ # type: (list, str, tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -20,8 +20,6 @@\n # import os\n # import sys\n # sys.path.insert(0, os.path.abspath('.'))\n-import torch\n-import torchaudio\n import pytorch_sphinx_theme\n \n # -- General configuration ------------------------------------------------\n@@ -135,6 +133,7 @@\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = ['_static']\n \n+\n def setup(app):\n # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value\n # and can be moved outside of this function (and the setup(app) function\n@@ -145,7 +144,7 @@\n \n # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is\n # `add_stylesheet` (deprecated in 1.8).\n- add_css = getattr(app, 'add_css_file', getattr(app, 'add_stylesheet'))\n+ add_css = getattr(app, 'add_css_file', app.add_stylesheet)\n for css_file in html_css_files:\n add_css(css_file)\n \n@@ -226,7 +225,7 @@\n # `kw` catches `env=None` needed for newer sphinx while maintaining\n # backwards compatibility when passed along further down!\n \n- # type: (List, unicode, Tuple) -> nodes.field\n+ # type: (list, str, tuple) -> nodes.field\n def handle_item(fieldarg, content):\n par = nodes.paragraph()\n par += addnodes.literal_strong('', fieldarg) # Patch: this line added\n", "issue": "Linting doc config\nCurrently, `doc/source/conf.py` is not checked with `flake8` in our CI and it contains errors. We need to fix this and make CI check it.\r\n\r\n```\r\n$ flake8 docs/source/conf.py\r\ndocs/source/conf.py:23:1: F401 'torch' imported but unused\r\ndocs/source/conf.py:24:1: F401 'torchaudio' imported but unused\r\ndocs/source/conf.py:138:1: E302 expected 2 blank lines, found 1\r\ndocs/source/conf.py:229:5: F821 undefined name 'List'\r\ndocs/source/conf.py:229:5: F821 undefined name 'unicode'\r\ndocs/source/conf.py:229:5: F821 undefined name 'Tuple'\r\n```\r\n\r\n## Steps\r\n1. Fix the above error.\r\n2. Add `docs/source/conf.py` [here](https://github.com/pytorch/audio/blob/ea857940de9e3738166989ad3bf1726741a13f04/.circleci/unittest/linux/scripts/run_style_checks.sh#L32)\r\n\r\n## Build and testing\r\n\r\nFor setting up dev env, please refer to [CONTRIBUTING.md](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md).\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# PyTorch documentation build configuration file, created by\n# sphinx-quickstart on Fri Dec 23 13:31:47 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport torch\nimport torchaudio\nimport pytorch_sphinx_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '1.6'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.katex',\n]\n\n# katex options\n#\n#\n\nkatex_options = r'''\ndelimiters : [\n {left: \"$$\", right: \"$$\", display: true},\n {left: \"\\\\(\", right: \"\\\\)\", display: false},\n {left: \"\\\\[\", right: \"\\\\]\", display: true}\n]\n'''\n\nnapoleon_use_ivar = True\nnapoleon_numpy_docstring = False\nnapoleon_google_docstring = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Torchaudio'\ncopyright = '2018, Torchaudio Contributors'\nauthor = 'Torchaudio Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# TODO: change to [:2] at v1.0\nversion = 'master '\n# The full version, including alpha/beta/rc tags.\n# TODO: verify this works as expected\nrelease = 'master'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pytorch_sphinx_theme'\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'pytorch_project': 'audio',\n 'collapse_navigation': False,\n 'display_version': True,\n 'logo_only': True,\n 'navigation_with_keys': True\n}\n\nhtml_logo = '_static/img/pytorch-logo-dark.svg'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\ndef setup(app):\n # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value\n # and can be moved outside of this function (and the setup(app) function\n # can be deleted).\n html_css_files = [\n 'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'\n ]\n\n # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is\n # `add_stylesheet` (deprecated in 1.8).\n add_css = getattr(app, 'add_css_file', getattr(app, 'add_stylesheet'))\n for css_file in html_css_files:\n add_css(css_file)\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'TorchAudiodoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'pytorch.tex', 'Torchaudio Documentation',\n 'Torch Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'Torchaudio', 'Torchaudio Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Torchaudio', 'Torchaudio Documentation',\n author, 'Torchaudio', 'Load audio files into pytorch tensors.',\n 'Miscellaneous'),\n]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'torch': ('https://pytorch.org/docs/stable/', None),\n}\n\n# -- A patch that prevents Sphinx from cross-referencing ivar tags -------\n# See http://stackoverflow.com/a/41184353/3343043\n\nfrom docutils import nodes\nfrom sphinx.util.docfields import TypedField\nfrom sphinx import addnodes\n\n\ndef patched_make_field(self, types, domain, items, **kw):\n # `kw` catches `env=None` needed for newer sphinx while maintaining\n # backwards compatibility when passed along further down!\n\n # type: (List, unicode, Tuple) -> nodes.field\n def handle_item(fieldarg, content):\n par = nodes.paragraph()\n par += addnodes.literal_strong('', fieldarg) # Patch: this line added\n # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,\n # addnodes.literal_strong))\n if fieldarg in types:\n par += nodes.Text(' (')\n # NOTE: using .pop() here to prevent a single type node to be\n # inserted twice into the doctree, which leads to\n # inconsistencies later when references are resolved\n fieldtype = types.pop(fieldarg)\n if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):\n typename = u''.join(n.astext() for n in fieldtype)\n typename = typename.replace('int', 'python:int')\n typename = typename.replace('long', 'python:long')\n typename = typename.replace('float', 'python:float')\n typename = typename.replace('type', 'python:type')\n par.extend(self.make_xrefs(self.typerolename, domain, typename,\n addnodes.literal_emphasis, **kw))\n else:\n par += fieldtype\n par += nodes.Text(')')\n par += nodes.Text(' -- ')\n par += content\n return par\n\n fieldname = nodes.field_name('', self.label)\n if len(items) == 1 and self.can_collapse:\n fieldarg, content = items[0]\n bodynode = handle_item(fieldarg, content)\n else:\n bodynode = self.list_type()\n for fieldarg, content in items:\n bodynode += nodes.list_item('', handle_item(fieldarg, content))\n fieldbody = nodes.field_body('', bodynode)\n return nodes.field('', fieldname, fieldbody)\n\nTypedField.make_field = patched_make_field\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# PyTorch documentation build configuration file, created by\n# sphinx-quickstart on Fri Dec 23 13:31:47 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport pytorch_sphinx_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\nneeds_sphinx = '1.6'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.katex',\n]\n\n# katex options\n#\n#\n\nkatex_options = r'''\ndelimiters : [\n {left: \"$$\", right: \"$$\", display: true},\n {left: \"\\\\(\", right: \"\\\\)\", display: false},\n {left: \"\\\\[\", right: \"\\\\]\", display: true}\n]\n'''\n\nnapoleon_use_ivar = True\nnapoleon_numpy_docstring = False\nnapoleon_google_docstring = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Torchaudio'\ncopyright = '2018, Torchaudio Contributors'\nauthor = 'Torchaudio Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\n# TODO: change to [:2] at v1.0\nversion = 'master '\n# The full version, including alpha/beta/rc tags.\n# TODO: verify this works as expected\nrelease = 'master'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pytorch_sphinx_theme'\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'pytorch_project': 'audio',\n 'collapse_navigation': False,\n 'display_version': True,\n 'logo_only': True,\n 'navigation_with_keys': True\n}\n\nhtml_logo = '_static/img/pytorch-logo-dark.svg'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\ndef setup(app):\n # NOTE: in Sphinx 1.8+ `html_css_files` is an official configuration value\n # and can be moved outside of this function (and the setup(app) function\n # can be deleted).\n html_css_files = [\n 'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'\n ]\n\n # In Sphinx 1.8 it was renamed to `add_css_file`, 1.7 and prior it is\n # `add_stylesheet` (deprecated in 1.8).\n add_css = getattr(app, 'add_css_file', app.add_stylesheet)\n for css_file in html_css_files:\n add_css(css_file)\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'TorchAudiodoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'pytorch.tex', 'Torchaudio Documentation',\n 'Torch Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'Torchaudio', 'Torchaudio Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Torchaudio', 'Torchaudio Documentation',\n author, 'Torchaudio', 'Load audio files into pytorch tensors.',\n 'Miscellaneous'),\n]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'torch': ('https://pytorch.org/docs/stable/', None),\n}\n\n# -- A patch that prevents Sphinx from cross-referencing ivar tags -------\n# See http://stackoverflow.com/a/41184353/3343043\n\nfrom docutils import nodes\nfrom sphinx.util.docfields import TypedField\nfrom sphinx import addnodes\n\n\ndef patched_make_field(self, types, domain, items, **kw):\n # `kw` catches `env=None` needed for newer sphinx while maintaining\n # backwards compatibility when passed along further down!\n\n # type: (list, str, tuple) -> nodes.field\n def handle_item(fieldarg, content):\n par = nodes.paragraph()\n par += addnodes.literal_strong('', fieldarg) # Patch: this line added\n # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,\n # addnodes.literal_strong))\n if fieldarg in types:\n par += nodes.Text(' (')\n # NOTE: using .pop() here to prevent a single type node to be\n # inserted twice into the doctree, which leads to\n # inconsistencies later when references are resolved\n fieldtype = types.pop(fieldarg)\n if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):\n typename = u''.join(n.astext() for n in fieldtype)\n typename = typename.replace('int', 'python:int')\n typename = typename.replace('long', 'python:long')\n typename = typename.replace('float', 'python:float')\n typename = typename.replace('type', 'python:type')\n par.extend(self.make_xrefs(self.typerolename, domain, typename,\n addnodes.literal_emphasis, **kw))\n else:\n par += fieldtype\n par += nodes.Text(')')\n par += nodes.Text(' -- ')\n par += content\n return par\n\n fieldname = nodes.field_name('', self.label)\n if len(items) == 1 and self.can_collapse:\n fieldarg, content = items[0]\n bodynode = handle_item(fieldarg, content)\n else:\n bodynode = self.list_type()\n for fieldarg, content in items:\n bodynode += nodes.list_item('', handle_item(fieldarg, content))\n fieldbody = nodes.field_body('', bodynode)\n return nodes.field('', fieldname, fieldbody)\n\nTypedField.make_field = patched_make_field\n", "path": "docs/source/conf.py"}]}
| 3,402 | 386 |
gh_patches_debug_2906
|
rasdani/github-patches
|
git_diff
|
AlexsLemonade__refinebio-471
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expose transformation option to API
### Context
https://github.com/AlexsLemonade/refinebio-frontend/issues/208
### Problem or idea
We want a dropdown to change the transformation option, but the API currently doesn't support changing that value.
### Solution or next step
I think transformation just needs to be added to the DataSetSerializer
### New Issue Checklist
- [x] The title is short and descriptive.
- [x] You have explained the context that led you to write this issue.
- [x] You have reported a problem or idea.
- [x] You have proposed a solution or next step.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/data_refinery_api/serializers.py`
Content:
```
1 from rest_framework import serializers
2 from rest_framework_hstore.fields import HStoreField
3 from data_refinery_common.models import ProcessorJob, DownloaderJob, SurveyJob
4 from data_refinery_common.models import (
5 Experiment,
6 ExperimentAnnotation,
7 Sample,
8 SampleAnnotation,
9 Organism,
10 OrganismIndex,
11 OriginalFile,
12 Processor,
13 ComputationalResult,
14 ComputationalResultAnnotation,
15 ComputedFile,
16 Dataset,
17 APIToken
18 )
19
20 ##
21 # Organism
22 ##
23
24 class OrganismSerializer(serializers.ModelSerializer):
25 class Meta:
26 model = Organism
27 fields = (
28 'name',
29 'taxonomy_id',
30 )
31
32
33 ##
34 # Processor
35 ##
36
37 class ProcessorSerializer(serializers.ModelSerializer):
38 class Meta:
39 model = Processor
40 fields = (
41 'name',
42 'docker_image',
43 'environment'
44 )
45
46
47 ##
48 # Transcriptome Index
49 ##
50
51 class OrganismIndexSerializer(serializers.ModelSerializer):
52 class Meta:
53 model = OrganismIndex
54 fields = (
55 's3_url',
56 'source_version',
57 'salmon_version',
58 'last_modified',
59 )
60
61 ##
62 # Results
63 ##
64
65 class ComputationalResultAnnotationSerializer(serializers.ModelSerializer):
66 data = HStoreField()
67
68 class Meta:
69 model = ComputationalResultAnnotation
70 fields = (
71 'id',
72 'data',
73 'is_ccdl',
74 'created_at',
75 'last_modified'
76 )
77
78 class ComputedFileSerializer(serializers.ModelSerializer):
79 class Meta:
80 model = ComputedFile
81 fields = (
82 'id',
83 'filename',
84 'size_in_bytes',
85 'sha1',
86 's3_bucket',
87 's3_key',
88 'created_at',
89 'last_modified'
90 )
91
92 class ComputationalResultSerializer(serializers.ModelSerializer):
93 annotations = ComputationalResultAnnotationSerializer(many=True, source='computationalresultannotation_set')
94 files = ComputedFileSerializer(many=True, source='computedfile_set')
95
96 class Meta:
97 model = ComputationalResult
98 fields = (
99 'id',
100 'commands',
101 'processor',
102 'is_ccdl',
103 'annotations',
104 'files',
105 'time_start',
106 'time_end',
107 'created_at',
108 'last_modified'
109 )
110
111
112 ##
113 # Samples
114 ##
115
116 class SampleSerializer(serializers.ModelSerializer):
117 organism = OrganismSerializer(many=False)
118
119 class Meta:
120 model = Sample
121 fields = (
122 'id',
123 'title',
124 'accession_code',
125 'source_database',
126 'organism',
127 'platform_accession_code',
128 'platform_name',
129 'pretty_platform',
130 'technology',
131 'manufacturer',
132 'is_downloaded',
133 'is_processed',
134 'created_at',
135 'last_modified',
136 )
137
138 class SampleAnnotationSerializer(serializers.ModelSerializer):
139 data = HStoreField()
140
141 class Meta:
142 model = SampleAnnotation
143 fields = (
144 'data',
145 'is_ccdl',
146 'created_at',
147 'last_modified',
148 )
149
150 class DetailedSampleSerializer(serializers.ModelSerializer):
151 annotations = SampleAnnotationSerializer(many=True, source='sampleannotation_set')
152 organism = OrganismSerializer(many=False)
153 results = ComputationalResultSerializer(many=True)
154
155 class Meta:
156 model = Sample
157 fields = (
158 'id',
159 'title',
160 'accession_code',
161 'source_database',
162 'organism',
163 'platform_accession_code',
164 'platform_name',
165 'pretty_platform',
166 'technology',
167 'manufacturer',
168 'annotations',
169 'results',
170 'pipelines',
171 'source_archive_url',
172 'has_raw',
173 'sex',
174 'age',
175 'specimen_part',
176 'genotype',
177 'disease',
178 'disease_stage',
179 'cell_line',
180 'treatment',
181 'race',
182 'subject',
183 'compound',
184 'time',
185 'is_downloaded',
186 'is_processed',
187 'created_at',
188 'last_modified',
189 )
190
191 ##
192 # Experiments
193 ##
194
195 class ExperimentSerializer(serializers.ModelSerializer):
196 organisms = serializers.StringRelatedField(many=True)
197 platforms = serializers.ReadOnlyField()
198 samples = serializers.StringRelatedField(many=True)
199 pretty_platforms = serializers.ReadOnlyField()
200
201 class Meta:
202 model = Experiment
203 fields = (
204 'id',
205 'title',
206 'description',
207 'accession_code',
208 'source_database',
209 'source_url',
210 'platforms',
211 'pretty_platforms',
212 'has_publication',
213 'publication_title',
214 'publication_doi',
215 'publication_authors',
216 'pubmed_id',
217 'samples',
218 'organisms',
219 'submitter_institution',
220 'created_at',
221 'last_modified'
222 )
223
224 class ExperimentAnnotationSerializer(serializers.ModelSerializer):
225 data = HStoreField()
226
227 class Meta:
228 model = ExperimentAnnotation
229 fields = (
230 'data',
231 'is_ccdl',
232 'created_at',
233 'last_modified',
234 )
235
236 class DetailedExperimentSerializer(serializers.ModelSerializer):
237 annotations = ExperimentAnnotationSerializer(many=True, source='experimentannotation_set')
238 samples = SampleSerializer(many=True)
239 organisms = OrganismSerializer(many=True)
240
241 class Meta:
242 model = Experiment
243 fields = (
244 'id',
245 'title',
246 'description',
247 'annotations',
248 'samples',
249 'protocol_description',
250 'accession_code',
251 'source_database',
252 'source_url',
253 'has_publication',
254 'publication_title',
255 'publication_doi',
256 'publication_authors',
257 'pubmed_id',
258 'source_first_published',
259 'source_last_modified',
260 'submitter_institution',
261 'last_modified',
262 'created_at',
263 'organisms',
264 )
265
266 class PlatformSerializer(serializers.ModelSerializer):
267
268 class Meta:
269 model = Sample
270 fields = (
271 'platform_accession_code',
272 'platform_name',
273 )
274
275 class InstitutionSerializer(serializers.ModelSerializer):
276
277 class Meta:
278 model = Experiment
279 fields = (
280 'submitter_institution',
281 )
282
283 ##
284 # Files
285 ##
286
287 class OriginalFileSerializer(serializers.ModelSerializer):
288
289 class Meta:
290 model = OriginalFile
291 fields = (
292 'id',
293 'filename',
294 'size_in_bytes',
295 'sha1',
296 'source_url',
297 'source_filename',
298 'is_downloaded',
299 'is_archive',
300 'has_raw',
301 'is_downloaded',
302 'created_at',
303 'last_modified'
304 )
305
306 ##
307 # Jobs
308 ##
309
310 class SurveyJobSerializer(serializers.ModelSerializer):
311
312 class Meta:
313 model = SurveyJob
314 fields = (
315 'id',
316 'source_type',
317 'success',
318 'start_time',
319 'end_time',
320 'created_at',
321 'last_modified'
322 )
323
324 class DownloaderJobSerializer(serializers.ModelSerializer):
325 original_files = OriginalFileSerializer(many=True)
326
327 class Meta:
328 model = DownloaderJob
329 fields = (
330 'id',
331 'downloader_task',
332 'num_retries',
333 'retried',
334 'worker_id',
335 'worker_version',
336 'failure_reason',
337 'success',
338 'original_files',
339 'start_time',
340 'end_time',
341 'created_at',
342 'last_modified'
343 )
344
345 class ProcessorJobSerializer(serializers.ModelSerializer):
346 original_files = OriginalFileSerializer(many=True)
347
348 class Meta:
349 model = ProcessorJob
350 fields = (
351 'id',
352 'pipeline_applied',
353 'num_retries',
354 'retried',
355 'worker_id',
356 'worker_version',
357 'failure_reason',
358 'success',
359 'original_files',
360 'start_time',
361 'end_time',
362 'created_at',
363 'last_modified'
364 )
365
366 ##
367 # Datasets
368 ##
369
370 def validate_dataset(data):
371 """ Basic dataset validation. Currently only checks formatting, not values. """
372 if data['data'] != None:
373 if type(data['data']) != dict:
374 raise serializers.ValidationError("`data` must be a dict of lists.")
375
376 for key, value in data['data'].items():
377 if type(value) != list:
378 raise serializers.ValidationError("`data` must be a dict of lists. Problem with `" + str(key) + "`")
379
380 try:
381 if len(value) != len(set(value)):
382 raise serializers.ValidationError("Duplicate values detected in " + str(value))
383 except Exception as e:
384 raise serializers.ValidationError("Received bad dataset data: " + str(e))
385
386 else:
387 raise serializers.ValidationError("`data` must be a dict of lists.")
388
389 class CreateDatasetSerializer(serializers.ModelSerializer):
390
391 class Meta:
392 model = Dataset
393 fields = (
394 'id',
395 'data',
396 'email_address'
397 )
398
399 def validate(self, data):
400 """
401 Ensure this is something we want in our dataset.
402 """
403 try:
404 validate_dataset(data)
405 except Exception:
406 raise
407 return data
408
409 class DatasetSerializer(serializers.ModelSerializer):
410
411 start = serializers.NullBooleanField(required=False)
412
413 class Meta:
414 model = Dataset
415 fields = (
416 'id',
417 'data',
418 'aggregate_by',
419 'is_processing',
420 'is_processed',
421 'is_available',
422 'email_address',
423 'expires_on',
424 's3_bucket',
425 's3_key',
426 'created_at',
427 'last_modified',
428 'start'
429 )
430 extra_kwargs = {
431 'id': {
432 'read_only': True,
433 },
434 'is_processing': {
435 'read_only': True,
436 },
437 'is_processed': {
438 'read_only': True,
439 },
440 'is_available': {
441 'read_only': True,
442 },
443 'expires_on': {
444 'read_only': True,
445 },
446 's3_bucket': {
447 'read_only': True,
448 },
449 's3_key': {
450 'read_only': True,
451 },
452 'created_at': {
453 'read_only': True,
454 },
455 'last_modified': {
456 'read_only': True,
457 }
458 }
459
460 def validate(self, data):
461 """
462 Ensure this is something we want in our dataset.
463 """
464 try:
465 validate_dataset(data)
466 except Exception:
467 raise
468 return data
469
470 class APITokenSerializer(serializers.ModelSerializer):
471
472 class Meta:
473 model = APIToken
474 fields = (
475 'id',
476 'is_activated',
477 'terms_and_conditions'
478 )
479 extra_kwargs = {
480 'id': {
481 'read_only': True
482 },
483 'is_activated': {
484 'read_only': False
485 },
486 'terms_and_conditions': {
487 'read_only': True
488 }
489 }
490
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/data_refinery_api/serializers.py b/api/data_refinery_api/serializers.py
--- a/api/data_refinery_api/serializers.py
+++ b/api/data_refinery_api/serializers.py
@@ -416,6 +416,7 @@
'id',
'data',
'aggregate_by',
+ 'scale_by',
'is_processing',
'is_processed',
'is_available',
|
{"golden_diff": "diff --git a/api/data_refinery_api/serializers.py b/api/data_refinery_api/serializers.py\n--- a/api/data_refinery_api/serializers.py\n+++ b/api/data_refinery_api/serializers.py\n@@ -416,6 +416,7 @@\n 'id',\n 'data',\n 'aggregate_by',\n+ 'scale_by',\n 'is_processing',\n 'is_processed',\n 'is_available',\n", "issue": "Expose transformation option to API\n### Context\r\n\r\nhttps://github.com/AlexsLemonade/refinebio-frontend/issues/208\r\n\r\n### Problem or idea\r\n\r\nWe want a dropdown to change the transformation option, but the API currently doesn't support changing that value.\r\n\r\n### Solution or next step\r\n\r\nI think transformation just needs to be added to the DataSetSerializer\r\n\r\n### New Issue Checklist\r\n\r\n- [x] The title is short and descriptive.\r\n- [x] You have explained the context that led you to write this issue.\r\n- [x] You have reported a problem or idea.\r\n- [x] You have proposed a solution or next step.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_framework_hstore.fields import HStoreField\nfrom data_refinery_common.models import ProcessorJob, DownloaderJob, SurveyJob\nfrom data_refinery_common.models import (\n Experiment,\n ExperimentAnnotation,\n Sample,\n SampleAnnotation,\n Organism,\n OrganismIndex,\n OriginalFile,\n Processor,\n ComputationalResult,\n ComputationalResultAnnotation,\n ComputedFile,\n Dataset,\n APIToken\n)\n\n##\n# Organism\n##\n\nclass OrganismSerializer(serializers.ModelSerializer):\n class Meta:\n model = Organism\n fields = (\n 'name',\n 'taxonomy_id',\n )\n\n\n##\n# Processor\n##\n\nclass ProcessorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Processor\n fields = (\n 'name',\n 'docker_image',\n 'environment'\n )\n\n\n##\n# Transcriptome Index\n##\n\nclass OrganismIndexSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrganismIndex\n fields = (\n 's3_url',\n 'source_version',\n 'salmon_version',\n 'last_modified',\n )\n\n##\n# Results\n##\n\nclass ComputationalResultAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ComputationalResultAnnotation\n fields = (\n 'id',\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputedFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = ComputedFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputationalResultSerializer(serializers.ModelSerializer):\n annotations = ComputationalResultAnnotationSerializer(many=True, source='computationalresultannotation_set')\n files = ComputedFileSerializer(many=True, source='computedfile_set')\n\n class Meta:\n model = ComputationalResult\n fields = (\n 'id',\n 'commands',\n 'processor',\n 'is_ccdl',\n 'annotations',\n 'files',\n 'time_start',\n 'time_end',\n 'created_at',\n 'last_modified'\n )\n\n\n##\n# Samples\n##\n\nclass SampleSerializer(serializers.ModelSerializer):\n organism = OrganismSerializer(many=False)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\nclass SampleAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = SampleAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedSampleSerializer(serializers.ModelSerializer):\n annotations = SampleAnnotationSerializer(many=True, source='sampleannotation_set')\n organism = OrganismSerializer(many=False)\n results = ComputationalResultSerializer(many=True)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'annotations',\n 'results',\n 'pipelines',\n 'source_archive_url',\n 'has_raw',\n 'sex',\n 'age',\n 'specimen_part',\n 'genotype',\n 'disease',\n 'disease_stage',\n 'cell_line',\n 'treatment',\n 'race',\n 'subject',\n 'compound',\n 'time',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\n##\n# Experiments\n##\n\nclass ExperimentSerializer(serializers.ModelSerializer):\n organisms = serializers.StringRelatedField(many=True)\n platforms = serializers.ReadOnlyField()\n samples = serializers.StringRelatedField(many=True)\n pretty_platforms = serializers.ReadOnlyField()\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'platforms',\n 'pretty_platforms',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'samples',\n 'organisms',\n 'submitter_institution',\n 'created_at',\n 'last_modified'\n )\n\nclass ExperimentAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ExperimentAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedExperimentSerializer(serializers.ModelSerializer):\n annotations = ExperimentAnnotationSerializer(many=True, source='experimentannotation_set')\n samples = SampleSerializer(many=True)\n organisms = OrganismSerializer(many=True)\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'annotations',\n 'samples',\n 'protocol_description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'source_first_published',\n 'source_last_modified',\n 'submitter_institution',\n 'last_modified',\n 'created_at',\n 'organisms',\n )\n\nclass PlatformSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Sample\n fields = (\n 'platform_accession_code',\n 'platform_name',\n )\n\nclass InstitutionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Experiment\n fields = (\n 'submitter_institution',\n )\n\n##\n# Files\n##\n\nclass OriginalFileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = OriginalFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 'source_url',\n 'source_filename',\n 'is_downloaded',\n 'is_archive',\n 'has_raw',\n 'is_downloaded',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Jobs\n##\n\nclass SurveyJobSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = SurveyJob\n fields = (\n 'id',\n 'source_type',\n 'success',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass DownloaderJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = DownloaderJob\n fields = (\n 'id',\n 'downloader_task',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass ProcessorJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = ProcessorJob\n fields = (\n 'id',\n 'pipeline_applied',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Datasets\n##\n\ndef validate_dataset(data):\n \"\"\" Basic dataset validation. Currently only checks formatting, not values. \"\"\"\n if data['data'] != None:\n if type(data['data']) != dict:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\n for key, value in data['data'].items():\n if type(value) != list:\n raise serializers.ValidationError(\"`data` must be a dict of lists. Problem with `\" + str(key) + \"`\")\n\n try:\n if len(value) != len(set(value)):\n raise serializers.ValidationError(\"Duplicate values detected in \" + str(value))\n except Exception as e:\n raise serializers.ValidationError(\"Received bad dataset data: \" + str(e))\n\n else:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\nclass CreateDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'email_address'\n )\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass DatasetSerializer(serializers.ModelSerializer):\n\n start = serializers.NullBooleanField(required=False)\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'aggregate_by',\n 'is_processing',\n 'is_processed',\n 'is_available',\n 'email_address',\n 'expires_on',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified',\n 'start'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True,\n },\n 'is_processing': {\n 'read_only': True,\n },\n 'is_processed': {\n 'read_only': True,\n },\n 'is_available': {\n 'read_only': True,\n },\n 'expires_on': {\n 'read_only': True,\n },\n 's3_bucket': {\n 'read_only': True,\n },\n 's3_key': {\n 'read_only': True,\n },\n 'created_at': {\n 'read_only': True,\n },\n 'last_modified': {\n 'read_only': True,\n }\n }\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass APITokenSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = APIToken\n fields = (\n 'id',\n 'is_activated',\n 'terms_and_conditions'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True\n },\n 'is_activated': {\n 'read_only': False\n },\n 'terms_and_conditions': {\n 'read_only': True\n }\n }\n", "path": "api/data_refinery_api/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\nfrom rest_framework_hstore.fields import HStoreField\nfrom data_refinery_common.models import ProcessorJob, DownloaderJob, SurveyJob\nfrom data_refinery_common.models import (\n Experiment,\n ExperimentAnnotation,\n Sample,\n SampleAnnotation,\n Organism,\n OrganismIndex,\n OriginalFile,\n Processor,\n ComputationalResult,\n ComputationalResultAnnotation,\n ComputedFile,\n Dataset,\n APIToken\n)\n\n##\n# Organism\n##\n\nclass OrganismSerializer(serializers.ModelSerializer):\n class Meta:\n model = Organism\n fields = (\n 'name',\n 'taxonomy_id',\n )\n\n\n##\n# Processor\n##\n\nclass ProcessorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Processor\n fields = (\n 'name',\n 'docker_image',\n 'environment'\n )\n\n\n##\n# Transcriptome Index\n##\n\nclass OrganismIndexSerializer(serializers.ModelSerializer):\n class Meta:\n model = OrganismIndex\n fields = (\n 's3_url',\n 'source_version',\n 'salmon_version',\n 'last_modified',\n )\n\n##\n# Results\n##\n\nclass ComputationalResultAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ComputationalResultAnnotation\n fields = (\n 'id',\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputedFileSerializer(serializers.ModelSerializer):\n class Meta:\n model = ComputedFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified'\n )\n\nclass ComputationalResultSerializer(serializers.ModelSerializer):\n annotations = ComputationalResultAnnotationSerializer(many=True, source='computationalresultannotation_set')\n files = ComputedFileSerializer(many=True, source='computedfile_set')\n\n class Meta:\n model = ComputationalResult\n fields = (\n 'id',\n 'commands',\n 'processor',\n 'is_ccdl',\n 'annotations',\n 'files',\n 'time_start',\n 'time_end',\n 'created_at',\n 'last_modified'\n )\n\n\n##\n# Samples\n##\n\nclass SampleSerializer(serializers.ModelSerializer):\n organism = OrganismSerializer(many=False)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\nclass SampleAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = SampleAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedSampleSerializer(serializers.ModelSerializer):\n annotations = SampleAnnotationSerializer(many=True, source='sampleannotation_set')\n organism = OrganismSerializer(many=False)\n results = ComputationalResultSerializer(many=True)\n\n class Meta:\n model = Sample\n fields = (\n 'id',\n 'title',\n 'accession_code',\n 'source_database',\n 'organism',\n 'platform_accession_code',\n 'platform_name',\n 'pretty_platform',\n 'technology',\n 'manufacturer',\n 'annotations',\n 'results',\n 'pipelines',\n 'source_archive_url',\n 'has_raw',\n 'sex',\n 'age',\n 'specimen_part',\n 'genotype',\n 'disease',\n 'disease_stage',\n 'cell_line',\n 'treatment',\n 'race',\n 'subject',\n 'compound',\n 'time',\n 'is_downloaded',\n 'is_processed',\n 'created_at',\n 'last_modified',\n )\n\n##\n# Experiments\n##\n\nclass ExperimentSerializer(serializers.ModelSerializer):\n organisms = serializers.StringRelatedField(many=True)\n platforms = serializers.ReadOnlyField()\n samples = serializers.StringRelatedField(many=True)\n pretty_platforms = serializers.ReadOnlyField()\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'platforms',\n 'pretty_platforms',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'samples',\n 'organisms',\n 'submitter_institution',\n 'created_at',\n 'last_modified'\n )\n\nclass ExperimentAnnotationSerializer(serializers.ModelSerializer):\n data = HStoreField()\n\n class Meta:\n model = ExperimentAnnotation\n fields = (\n 'data',\n 'is_ccdl',\n 'created_at',\n 'last_modified',\n )\n\nclass DetailedExperimentSerializer(serializers.ModelSerializer):\n annotations = ExperimentAnnotationSerializer(many=True, source='experimentannotation_set')\n samples = SampleSerializer(many=True)\n organisms = OrganismSerializer(many=True)\n\n class Meta:\n model = Experiment\n fields = (\n 'id',\n 'title',\n 'description',\n 'annotations',\n 'samples',\n 'protocol_description',\n 'accession_code',\n 'source_database',\n 'source_url',\n 'has_publication',\n 'publication_title',\n 'publication_doi',\n 'publication_authors',\n 'pubmed_id',\n 'source_first_published',\n 'source_last_modified',\n 'submitter_institution',\n 'last_modified',\n 'created_at',\n 'organisms',\n )\n\nclass PlatformSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Sample\n fields = (\n 'platform_accession_code',\n 'platform_name',\n )\n\nclass InstitutionSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Experiment\n fields = (\n 'submitter_institution',\n )\n\n##\n# Files\n##\n\nclass OriginalFileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = OriginalFile\n fields = (\n 'id',\n 'filename',\n 'size_in_bytes',\n 'sha1',\n 'source_url',\n 'source_filename',\n 'is_downloaded',\n 'is_archive',\n 'has_raw',\n 'is_downloaded',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Jobs\n##\n\nclass SurveyJobSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = SurveyJob\n fields = (\n 'id',\n 'source_type',\n 'success',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass DownloaderJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = DownloaderJob\n fields = (\n 'id',\n 'downloader_task',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\nclass ProcessorJobSerializer(serializers.ModelSerializer):\n original_files = OriginalFileSerializer(many=True)\n\n class Meta:\n model = ProcessorJob\n fields = (\n 'id',\n 'pipeline_applied',\n 'num_retries',\n 'retried',\n 'worker_id',\n 'worker_version',\n 'failure_reason',\n 'success',\n 'original_files',\n 'start_time',\n 'end_time',\n 'created_at',\n 'last_modified'\n )\n\n##\n# Datasets\n##\n\ndef validate_dataset(data):\n \"\"\" Basic dataset validation. Currently only checks formatting, not values. \"\"\"\n if data['data'] != None:\n if type(data['data']) != dict:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\n for key, value in data['data'].items():\n if type(value) != list:\n raise serializers.ValidationError(\"`data` must be a dict of lists. Problem with `\" + str(key) + \"`\")\n\n try:\n if len(value) != len(set(value)):\n raise serializers.ValidationError(\"Duplicate values detected in \" + str(value))\n except Exception as e:\n raise serializers.ValidationError(\"Received bad dataset data: \" + str(e))\n\n else:\n raise serializers.ValidationError(\"`data` must be a dict of lists.\")\n\nclass CreateDatasetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'email_address'\n )\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass DatasetSerializer(serializers.ModelSerializer):\n\n start = serializers.NullBooleanField(required=False)\n\n class Meta:\n model = Dataset\n fields = (\n 'id',\n 'data',\n 'aggregate_by',\n 'scale_by',\n 'is_processing',\n 'is_processed',\n 'is_available',\n 'email_address',\n 'expires_on',\n 's3_bucket',\n 's3_key',\n 'created_at',\n 'last_modified',\n 'start'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True,\n },\n 'is_processing': {\n 'read_only': True,\n },\n 'is_processed': {\n 'read_only': True,\n },\n 'is_available': {\n 'read_only': True,\n },\n 'expires_on': {\n 'read_only': True,\n },\n 's3_bucket': {\n 'read_only': True,\n },\n 's3_key': {\n 'read_only': True,\n },\n 'created_at': {\n 'read_only': True,\n },\n 'last_modified': {\n 'read_only': True,\n }\n }\n\n def validate(self, data):\n \"\"\"\n Ensure this is something we want in our dataset.\n \"\"\"\n try:\n validate_dataset(data)\n except Exception:\n raise\n return data\n\nclass APITokenSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = APIToken\n fields = (\n 'id',\n 'is_activated',\n 'terms_and_conditions'\n )\n extra_kwargs = {\n 'id': {\n 'read_only': True\n },\n 'is_activated': {\n 'read_only': False\n },\n 'terms_and_conditions': {\n 'read_only': True\n }\n }\n", "path": "api/data_refinery_api/serializers.py"}]}
| 4,037 | 96 |
gh_patches_debug_9172
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-201
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pwnlib loads very slowly
On my system it takes two thirds of a second to load pwnlib:
```
~> time python -c "import pwn"
real 0m0.641s
user 0m0.576s
sys 0m0.044s
```
I've tracked down the culprit: `pwnlib.util.web` imports the `requests` module which takes forever (https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L3).
I suggest we load `requests` lazily in `pwnlib.util.web.wget()`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/util/web.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import os, tempfile, logging
3 from requests import *
4 from .misc import size
5 log = logging.getLogger(__name__)
6
7 def wget(url, save=None, timeout=5, **kwargs):
8 """wget(url, save=None, timeout=5) -> str
9
10 Downloads a file via HTTP/HTTPS.
11
12 Args:
13 url (str): URL to download
14 save (str or bool): Name to save as. Any truthy value
15 will auto-generate a name based on the URL.
16 timeout (int): Timeout, in seconds
17
18 Example:
19
20 >>> url = 'http://httpbin.org/robots.txt'
21 >>> with context.local(log_level='ERROR'): result = wget(url)
22 >>> result
23 'User-agent: *\nDisallow: /deny\n'
24 >>> with context.local(log_level='ERROR'): wget(url, True)
25 >>> result == file('robots.txt').read()
26 True
27 """
28 with log.progress("Downloading '%s'" % url) as w:
29 w.status("Making request...")
30
31 response = get(url, stream=True, **kwargs)
32
33 if not response.ok:
34 w.failure("Got code %s" % response.status_code)
35 return
36
37 total_size = int(response.headers.get('content-length',0))
38
39 w.status('0 / %s' % size(total_size))
40
41 # Find out the next largest size we can represent as
42 chunk_size = 1
43 while chunk_size < (total_size/10):
44 chunk_size *= 1000
45
46 # Count chunks as they're received
47 total_data = ''
48
49 # Loop until we have all of the data
50 for chunk in response.iter_content(chunk_size = 2**10):
51 total_data += chunk
52 if total_size:
53 w.status('%s / %s' % (size(total_data), size(total_size)))
54 else:
55 w.status('%s' % size(total_data))
56
57 # Save to the target file if provided
58 if save:
59 if not isinstance(save, (str, unicode)):
60 save = os.path.basename(url)
61 save = save or NamedTemporaryFile(dir='.', delete=False).name
62 with file(save,'wb+') as f:
63 f.write(total_data)
64 w.success('Saved %r (%s)' % (f.name, size(total_data)))
65 else:
66 w.success('%s' % size(total_data))
67
68 return total_data
69
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py
--- a/pwnlib/util/web.py
+++ b/pwnlib/util/web.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
import os, tempfile, logging
-from requests import *
from .misc import size
log = logging.getLogger(__name__)
@@ -25,6 +24,8 @@
>>> result == file('robots.txt').read()
True
"""
+ from requests import *
+
with log.progress("Downloading '%s'" % url) as w:
w.status("Making request...")
|
{"golden_diff": "diff --git a/pwnlib/util/web.py b/pwnlib/util/web.py\n--- a/pwnlib/util/web.py\n+++ b/pwnlib/util/web.py\n@@ -1,6 +1,5 @@\n # -*- coding: utf-8 -*-\n import os, tempfile, logging\n-from requests import *\n from .misc import size\n log = logging.getLogger(__name__)\n \n@@ -25,6 +24,8 @@\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n+ from requests import *\n+\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n", "issue": "Pwnlib loads very slowly\nOn my system it takes two thirds of a second to load pwnlib:\n\n```\n~> time python -c \"import pwn\"\n\nreal 0m0.641s\nuser 0m0.576s\nsys 0m0.044s\n```\n\nI've tracked down the culprit: `pwnlib.util.web` imports the `requests` module which takes forever (https://github.com/Gallopsled/pwntools/blob/master/pwnlib/util/web.py#L3).\n\nI suggest we load `requests` lazily in `pwnlib.util.web.wget()`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom requests import *\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'): result = wget(url)\n >>> result\n 'User-agent: *\\nDisallow: /deny\\n'\n >>> with context.local(log_level='ERROR'): wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n\n", "path": "pwnlib/util/web.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, tempfile, logging\nfrom .misc import size\nlog = logging.getLogger(__name__)\n\ndef wget(url, save=None, timeout=5, **kwargs):\n \"\"\"wget(url, save=None, timeout=5) -> str\n\n Downloads a file via HTTP/HTTPS.\n\n Args:\n url (str): URL to download\n save (str or bool): Name to save as. Any truthy value\n will auto-generate a name based on the URL.\n timeout (int): Timeout, in seconds\n\n Example:\n\n >>> url = 'http://httpbin.org/robots.txt'\n >>> with context.local(log_level='ERROR'): result = wget(url)\n >>> result\n 'User-agent: *\\nDisallow: /deny\\n'\n >>> with context.local(log_level='ERROR'): wget(url, True)\n >>> result == file('robots.txt').read()\n True\n \"\"\"\n from requests import *\n\n with log.progress(\"Downloading '%s'\" % url) as w:\n w.status(\"Making request...\")\n\n response = get(url, stream=True, **kwargs)\n\n if not response.ok:\n w.failure(\"Got code %s\" % response.status_code)\n return\n\n total_size = int(response.headers.get('content-length',0))\n\n w.status('0 / %s' % size(total_size))\n\n # Find out the next largest size we can represent as\n chunk_size = 1\n while chunk_size < (total_size/10):\n chunk_size *= 1000\n\n # Count chunks as they're received\n total_data = ''\n\n # Loop until we have all of the data\n for chunk in response.iter_content(chunk_size = 2**10):\n total_data += chunk\n if total_size:\n w.status('%s / %s' % (size(total_data), size(total_size)))\n else:\n w.status('%s' % size(total_data))\n\n # Save to the target file if provided\n if save:\n if not isinstance(save, (str, unicode)):\n save = os.path.basename(url)\n save = save or NamedTemporaryFile(dir='.', delete=False).name\n with file(save,'wb+') as f:\n f.write(total_data)\n w.success('Saved %r (%s)' % (f.name, size(total_data)))\n else:\n w.success('%s' % size(total_data))\n\n return total_data\n\n", "path": "pwnlib/util/web.py"}]}
| 1,068 | 136 |
gh_patches_debug_28246
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-1002
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'dict' object has no attribute 'append' in job.to_chunked_dataframe()
## Expected Behavior
Return a generator of a chunked dataframe
## Current Behavior
Giving error :
```
/home/dev/feast-venv/lib/python3.7/site-packages/feast/job.py in to_chunked_dataframe(self, max_chunk_size, timeout_sec)
187 records = []
188 for result in self.result(timeout_sec=timeout_sec):
--> 189 result.append(records)
190 if len(records) == max_chunk_size:
191 df = pd.DataFrame.from_records(records)
AttributeError: 'dict' object has no attribute 'append'
```
## Steps to reproduce
```
test = job.to_chunked_dataframe(10)
next(test)
```
### Specifications
- Version: 0.5.0
- Platform: Python 3.7
- Subsystem:
## Possible Solution
In line 189, it should be `records.append(result)` instead of `result.append(records)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/job.py`
Content:
```
1 from typing import List
2 from urllib.parse import urlparse
3
4 import fastavro
5 import grpc
6 import pandas as pd
7
8 from feast.constants import CONFIG_TIMEOUT_KEY
9 from feast.constants import FEAST_DEFAULT_OPTIONS as defaults
10 from feast.serving.ServingService_pb2 import (
11 DATA_FORMAT_AVRO,
12 JOB_STATUS_DONE,
13 GetJobRequest,
14 )
15 from feast.serving.ServingService_pb2 import Job as JobProto
16 from feast.serving.ServingService_pb2_grpc import ServingServiceStub
17 from feast.staging.storage_client import get_staging_client
18 from feast.wait import wait_retry_backoff
19 from tensorflow_metadata.proto.v0 import statistics_pb2
20
21 # Maximum no of seconds to wait until the retrieval jobs status is DONE in Feast
22 # Currently set to the maximum query execution time limit in BigQuery
23 DEFAULT_TIMEOUT_SEC: int = 21600
24
25 # Maximum no of seconds to wait before reloading the job status in Feast
26 MAX_WAIT_INTERVAL_SEC: int = 60
27
28
29 class RetrievalJob:
30 """
31 A class representing a job for feature retrieval in Feast.
32 """
33
34 def __init__(
35 self,
36 job_proto: JobProto,
37 serving_stub: ServingServiceStub,
38 auth_metadata_plugin: grpc.AuthMetadataPlugin = None,
39 ):
40 """
41 Args:
42 job_proto: Job proto object (wrapped by this job object)
43 serving_stub: Stub for Feast serving service
44 auth_metadata_plugin: plugin to fetch auth metadata
45 """
46 self.job_proto = job_proto
47 self.serving_stub = serving_stub
48 self.auth_metadata = auth_metadata_plugin
49
50 @property
51 def id(self):
52 """
53 Getter for the Job Id
54 """
55 return self.job_proto.id
56
57 @property
58 def status(self):
59 """
60 Getter for the Job status from Feast Core
61 """
62 return self.job_proto.status
63
64 def reload(self):
65 """
66 Reload the latest job status
67 Returns: None
68 """
69 self.job_proto = self.serving_stub.GetJob(
70 GetJobRequest(job=self.job_proto),
71 metadata=self.auth_metadata.get_signed_meta() if self.auth_metadata else (),
72 ).job
73
74 def get_avro_files(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):
75 """
76 Wait until job is done to get the file uri to Avro result files on
77 Google Cloud Storage.
78
79 Args:
80 timeout_sec (int):
81 Max no of seconds to wait until job is done. If "timeout_sec"
82 is exceeded, an exception will be raised.
83
84 Returns:
85 str: Google Cloud Storage file uris of the returned Avro files.
86 """
87
88 def try_retrieve():
89 self.reload()
90 return None, self.status == JOB_STATUS_DONE
91
92 wait_retry_backoff(
93 retry_fn=try_retrieve,
94 timeout_secs=timeout_sec,
95 timeout_msg="Timeout exceeded while waiting for result. Please retry "
96 "this method or use a longer timeout value.",
97 )
98
99 if self.job_proto.error:
100 raise Exception(self.job_proto.error)
101
102 if self.job_proto.data_format != DATA_FORMAT_AVRO:
103 raise Exception(
104 "Feast only supports Avro data format for now. Please check "
105 "your Feast Serving deployment."
106 )
107
108 return [urlparse(uri) for uri in self.job_proto.file_uris]
109
110 def result(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):
111 """
112 Wait until job is done to get an iterable rows of result. The row can
113 only represent an Avro row in Feast 0.3.
114
115 Args:
116 timeout_sec (int):
117 Max no of seconds to wait until job is done. If "timeout_sec"
118 is exceeded, an exception will be raised.
119
120 Returns:
121 Iterable of Avro rows.
122 """
123 uris = self.get_avro_files(timeout_sec)
124 for file_uri in uris:
125 file_obj = get_staging_client(file_uri.scheme).download_file(file_uri)
126 file_obj.seek(0)
127 avro_reader = fastavro.reader(file_obj)
128
129 for record in avro_reader:
130 yield record
131
132 def to_dataframe(
133 self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])
134 ) -> pd.DataFrame:
135 """
136 Wait until a job is done to get an iterable rows of result. This method
137 will split the response into chunked DataFrame of a specified size to
138 to be yielded to the instance calling it.
139
140 Args:
141 max_chunk_size (int):
142 Maximum number of rows that the DataFrame should contain.
143
144 timeout_sec (int):
145 Max no of seconds to wait until job is done. If "timeout_sec"
146 is exceeded, an exception will be raised.
147
148 Returns:
149 pd.DataFrame:
150 Pandas DataFrame of the feature values.
151 """
152 records = [r for r in self.result(timeout_sec=timeout_sec)]
153 return pd.DataFrame.from_records(records)
154
155 def to_chunked_dataframe(
156 self,
157 max_chunk_size: int = -1,
158 timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY]),
159 ) -> pd.DataFrame:
160 """
161 Wait until a job is done to get an iterable rows of result. This method
162 will split the response into chunked DataFrame of a specified size to
163 to be yielded to the instance calling it.
164
165 Args:
166 max_chunk_size (int):
167 Maximum number of rows that the DataFrame should contain.
168
169 timeout_sec (int):
170 Max no of seconds to wait until job is done. If "timeout_sec"
171 is exceeded, an exception will be raised.
172
173 Returns:
174 pd.DataFrame:
175 Pandas DataFrame of the feature values.
176 """
177
178 # Object is Avro row type object, refer to self.result function for this type
179 records: List[dict] = []
180
181 # Max chunk size defined by user
182 for result in self.result(timeout_sec=timeout_sec):
183 result.append(records)
184 if len(records) == max_chunk_size:
185 df = pd.DataFrame.from_records(records)
186 records.clear() # Empty records array
187 yield df
188
189 # Handle for last chunk that is < max_chunk_size
190 if not records:
191 yield pd.DataFrame.from_records(records)
192
193 def __iter__(self):
194 return iter(self.result())
195
196 def statistics(
197 self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])
198 ) -> statistics_pb2.DatasetFeatureStatisticsList:
199 """
200 Get statistics computed over the retrieved data set. Statistics will only be computed for
201 columns that are part of Feast, and not the columns that were provided.
202
203 Args:
204 timeout_sec (int):
205 Max no of seconds to wait until job is done. If "timeout_sec"
206 is exceeded, an exception will be raised.
207
208 Returns:
209 DatasetFeatureStatisticsList containing statistics of Feast features over the retrieved dataset.
210 """
211 self.get_avro_files(timeout_sec) # wait for job completion
212 if self.job_proto.error:
213 raise Exception(self.job_proto.error)
214 return self.job_proto.dataset_feature_statistics_list
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/feast/job.py b/sdk/python/feast/job.py
--- a/sdk/python/feast/job.py
+++ b/sdk/python/feast/job.py
@@ -134,13 +134,9 @@
) -> pd.DataFrame:
"""
Wait until a job is done to get an iterable rows of result. This method
- will split the response into chunked DataFrame of a specified size to
- to be yielded to the instance calling it.
+ will return the response as a DataFrame.
Args:
- max_chunk_size (int):
- Maximum number of rows that the DataFrame should contain.
-
timeout_sec (int):
Max no of seconds to wait until job is done. If "timeout_sec"
is exceeded, an exception will be raised.
@@ -180,14 +176,14 @@
# Max chunk size defined by user
for result in self.result(timeout_sec=timeout_sec):
- result.append(records)
+ records.append(result)
if len(records) == max_chunk_size:
df = pd.DataFrame.from_records(records)
records.clear() # Empty records array
yield df
# Handle for last chunk that is < max_chunk_size
- if not records:
+ if records:
yield pd.DataFrame.from_records(records)
def __iter__(self):
|
{"golden_diff": "diff --git a/sdk/python/feast/job.py b/sdk/python/feast/job.py\n--- a/sdk/python/feast/job.py\n+++ b/sdk/python/feast/job.py\n@@ -134,13 +134,9 @@\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n- will split the response into chunked DataFrame of a specified size to\n- to be yielded to the instance calling it.\n+ will return the response as a DataFrame.\n \n Args:\n- max_chunk_size (int):\n- Maximum number of rows that the DataFrame should contain.\n-\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n@@ -180,14 +176,14 @@\n \n # Max chunk size defined by user\n for result in self.result(timeout_sec=timeout_sec):\n- result.append(records)\n+ records.append(result)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n \n # Handle for last chunk that is < max_chunk_size\n- if not records:\n+ if records:\n yield pd.DataFrame.from_records(records)\n \n def __iter__(self):\n", "issue": "AttributeError: 'dict' object has no attribute 'append' in job.to_chunked_dataframe()\n## Expected Behavior \r\nReturn a generator of a chunked dataframe\r\n## Current Behavior\r\nGiving error :\r\n```\r\n/home/dev/feast-venv/lib/python3.7/site-packages/feast/job.py in to_chunked_dataframe(self, max_chunk_size, timeout_sec)\r\n 187 records = []\r\n 188 for result in self.result(timeout_sec=timeout_sec):\r\n--> 189 result.append(records)\r\n 190 if len(records) == max_chunk_size:\r\n 191 df = pd.DataFrame.from_records(records)\r\nAttributeError: 'dict' object has no attribute 'append'\r\n```\r\n## Steps to reproduce\r\n```\r\ntest = job.to_chunked_dataframe(10)\r\nnext(test)\r\n```\r\n### Specifications\r\n\r\n- Version: 0.5.0\r\n- Platform: Python 3.7\r\n- Subsystem: \r\n\r\n## Possible Solution\r\nIn line 189, it should be `records.append(result)` instead of `result.append(records)`\n", "before_files": [{"content": "from typing import List\nfrom urllib.parse import urlparse\n\nimport fastavro\nimport grpc\nimport pandas as pd\n\nfrom feast.constants import CONFIG_TIMEOUT_KEY\nfrom feast.constants import FEAST_DEFAULT_OPTIONS as defaults\nfrom feast.serving.ServingService_pb2 import (\n DATA_FORMAT_AVRO,\n JOB_STATUS_DONE,\n GetJobRequest,\n)\nfrom feast.serving.ServingService_pb2 import Job as JobProto\nfrom feast.serving.ServingService_pb2_grpc import ServingServiceStub\nfrom feast.staging.storage_client import get_staging_client\nfrom feast.wait import wait_retry_backoff\nfrom tensorflow_metadata.proto.v0 import statistics_pb2\n\n# Maximum no of seconds to wait until the retrieval jobs status is DONE in Feast\n# Currently set to the maximum query execution time limit in BigQuery\nDEFAULT_TIMEOUT_SEC: int = 21600\n\n# Maximum no of seconds to wait before reloading the job status in Feast\nMAX_WAIT_INTERVAL_SEC: int = 60\n\n\nclass RetrievalJob:\n \"\"\"\n A class representing a job for feature retrieval in Feast.\n \"\"\"\n\n def __init__(\n self,\n job_proto: JobProto,\n serving_stub: ServingServiceStub,\n auth_metadata_plugin: grpc.AuthMetadataPlugin = None,\n ):\n \"\"\"\n Args:\n job_proto: Job proto object (wrapped by this job object)\n serving_stub: Stub for Feast serving service\n auth_metadata_plugin: plugin to fetch auth metadata\n \"\"\"\n self.job_proto = job_proto\n self.serving_stub = serving_stub\n self.auth_metadata = auth_metadata_plugin\n\n @property\n def id(self):\n \"\"\"\n Getter for the Job Id\n \"\"\"\n return self.job_proto.id\n\n @property\n def status(self):\n \"\"\"\n Getter for the Job status from Feast Core\n \"\"\"\n return self.job_proto.status\n\n def reload(self):\n \"\"\"\n Reload the latest job status\n Returns: None\n \"\"\"\n self.job_proto = self.serving_stub.GetJob(\n GetJobRequest(job=self.job_proto),\n metadata=self.auth_metadata.get_signed_meta() if self.auth_metadata else (),\n ).job\n\n def get_avro_files(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):\n \"\"\"\n Wait until job is done to get the file uri to Avro result files on\n Google Cloud Storage.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n str: Google Cloud Storage file uris of the returned Avro files.\n \"\"\"\n\n def try_retrieve():\n self.reload()\n return None, self.status == JOB_STATUS_DONE\n\n wait_retry_backoff(\n retry_fn=try_retrieve,\n timeout_secs=timeout_sec,\n timeout_msg=\"Timeout exceeded while waiting for result. Please retry \"\n \"this method or use a longer timeout value.\",\n )\n\n if self.job_proto.error:\n raise Exception(self.job_proto.error)\n\n if self.job_proto.data_format != DATA_FORMAT_AVRO:\n raise Exception(\n \"Feast only supports Avro data format for now. Please check \"\n \"your Feast Serving deployment.\"\n )\n\n return [urlparse(uri) for uri in self.job_proto.file_uris]\n\n def result(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):\n \"\"\"\n Wait until job is done to get an iterable rows of result. The row can\n only represent an Avro row in Feast 0.3.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n Iterable of Avro rows.\n \"\"\"\n uris = self.get_avro_files(timeout_sec)\n for file_uri in uris:\n file_obj = get_staging_client(file_uri.scheme).download_file(file_uri)\n file_obj.seek(0)\n avro_reader = fastavro.reader(file_obj)\n\n for record in avro_reader:\n yield record\n\n def to_dataframe(\n self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n will split the response into chunked DataFrame of a specified size to\n to be yielded to the instance calling it.\n\n Args:\n max_chunk_size (int):\n Maximum number of rows that the DataFrame should contain.\n\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n pd.DataFrame:\n Pandas DataFrame of the feature values.\n \"\"\"\n records = [r for r in self.result(timeout_sec=timeout_sec)]\n return pd.DataFrame.from_records(records)\n\n def to_chunked_dataframe(\n self,\n max_chunk_size: int = -1,\n timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY]),\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n will split the response into chunked DataFrame of a specified size to\n to be yielded to the instance calling it.\n\n Args:\n max_chunk_size (int):\n Maximum number of rows that the DataFrame should contain.\n\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n pd.DataFrame:\n Pandas DataFrame of the feature values.\n \"\"\"\n\n # Object is Avro row type object, refer to self.result function for this type\n records: List[dict] = []\n\n # Max chunk size defined by user\n for result in self.result(timeout_sec=timeout_sec):\n result.append(records)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n\n # Handle for last chunk that is < max_chunk_size\n if not records:\n yield pd.DataFrame.from_records(records)\n\n def __iter__(self):\n return iter(self.result())\n\n def statistics(\n self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])\n ) -> statistics_pb2.DatasetFeatureStatisticsList:\n \"\"\"\n Get statistics computed over the retrieved data set. Statistics will only be computed for\n columns that are part of Feast, and not the columns that were provided.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n DatasetFeatureStatisticsList containing statistics of Feast features over the retrieved dataset.\n \"\"\"\n self.get_avro_files(timeout_sec) # wait for job completion\n if self.job_proto.error:\n raise Exception(self.job_proto.error)\n return self.job_proto.dataset_feature_statistics_list\n", "path": "sdk/python/feast/job.py"}], "after_files": [{"content": "from typing import List\nfrom urllib.parse import urlparse\n\nimport fastavro\nimport grpc\nimport pandas as pd\n\nfrom feast.constants import CONFIG_TIMEOUT_KEY\nfrom feast.constants import FEAST_DEFAULT_OPTIONS as defaults\nfrom feast.serving.ServingService_pb2 import (\n DATA_FORMAT_AVRO,\n JOB_STATUS_DONE,\n GetJobRequest,\n)\nfrom feast.serving.ServingService_pb2 import Job as JobProto\nfrom feast.serving.ServingService_pb2_grpc import ServingServiceStub\nfrom feast.staging.storage_client import get_staging_client\nfrom feast.wait import wait_retry_backoff\nfrom tensorflow_metadata.proto.v0 import statistics_pb2\n\n# Maximum no of seconds to wait until the retrieval jobs status is DONE in Feast\n# Currently set to the maximum query execution time limit in BigQuery\nDEFAULT_TIMEOUT_SEC: int = 21600\n\n# Maximum no of seconds to wait before reloading the job status in Feast\nMAX_WAIT_INTERVAL_SEC: int = 60\n\n\nclass RetrievalJob:\n \"\"\"\n A class representing a job for feature retrieval in Feast.\n \"\"\"\n\n def __init__(\n self,\n job_proto: JobProto,\n serving_stub: ServingServiceStub,\n auth_metadata_plugin: grpc.AuthMetadataPlugin = None,\n ):\n \"\"\"\n Args:\n job_proto: Job proto object (wrapped by this job object)\n serving_stub: Stub for Feast serving service\n auth_metadata_plugin: plugin to fetch auth metadata\n \"\"\"\n self.job_proto = job_proto\n self.serving_stub = serving_stub\n self.auth_metadata = auth_metadata_plugin\n\n @property\n def id(self):\n \"\"\"\n Getter for the Job Id\n \"\"\"\n return self.job_proto.id\n\n @property\n def status(self):\n \"\"\"\n Getter for the Job status from Feast Core\n \"\"\"\n return self.job_proto.status\n\n def reload(self):\n \"\"\"\n Reload the latest job status\n Returns: None\n \"\"\"\n self.job_proto = self.serving_stub.GetJob(\n GetJobRequest(job=self.job_proto),\n metadata=self.auth_metadata.get_signed_meta() if self.auth_metadata else (),\n ).job\n\n def get_avro_files(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):\n \"\"\"\n Wait until job is done to get the file uri to Avro result files on\n Google Cloud Storage.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n str: Google Cloud Storage file uris of the returned Avro files.\n \"\"\"\n\n def try_retrieve():\n self.reload()\n return None, self.status == JOB_STATUS_DONE\n\n wait_retry_backoff(\n retry_fn=try_retrieve,\n timeout_secs=timeout_sec,\n timeout_msg=\"Timeout exceeded while waiting for result. Please retry \"\n \"this method or use a longer timeout value.\",\n )\n\n if self.job_proto.error:\n raise Exception(self.job_proto.error)\n\n if self.job_proto.data_format != DATA_FORMAT_AVRO:\n raise Exception(\n \"Feast only supports Avro data format for now. Please check \"\n \"your Feast Serving deployment.\"\n )\n\n return [urlparse(uri) for uri in self.job_proto.file_uris]\n\n def result(self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])):\n \"\"\"\n Wait until job is done to get an iterable rows of result. The row can\n only represent an Avro row in Feast 0.3.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n Iterable of Avro rows.\n \"\"\"\n uris = self.get_avro_files(timeout_sec)\n for file_uri in uris:\n file_obj = get_staging_client(file_uri.scheme).download_file(file_uri)\n file_obj.seek(0)\n avro_reader = fastavro.reader(file_obj)\n\n for record in avro_reader:\n yield record\n\n def to_dataframe(\n self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n will return the response as a DataFrame.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n pd.DataFrame:\n Pandas DataFrame of the feature values.\n \"\"\"\n records = [r for r in self.result(timeout_sec=timeout_sec)]\n return pd.DataFrame.from_records(records)\n\n def to_chunked_dataframe(\n self,\n max_chunk_size: int = -1,\n timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY]),\n ) -> pd.DataFrame:\n \"\"\"\n Wait until a job is done to get an iterable rows of result. This method\n will split the response into chunked DataFrame of a specified size to\n to be yielded to the instance calling it.\n\n Args:\n max_chunk_size (int):\n Maximum number of rows that the DataFrame should contain.\n\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n pd.DataFrame:\n Pandas DataFrame of the feature values.\n \"\"\"\n\n # Object is Avro row type object, refer to self.result function for this type\n records: List[dict] = []\n\n # Max chunk size defined by user\n for result in self.result(timeout_sec=timeout_sec):\n records.append(result)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n\n # Handle for last chunk that is < max_chunk_size\n if records:\n yield pd.DataFrame.from_records(records)\n\n def __iter__(self):\n return iter(self.result())\n\n def statistics(\n self, timeout_sec: int = int(defaults[CONFIG_TIMEOUT_KEY])\n ) -> statistics_pb2.DatasetFeatureStatisticsList:\n \"\"\"\n Get statistics computed over the retrieved data set. Statistics will only be computed for\n columns that are part of Feast, and not the columns that were provided.\n\n Args:\n timeout_sec (int):\n Max no of seconds to wait until job is done. If \"timeout_sec\"\n is exceeded, an exception will be raised.\n\n Returns:\n DatasetFeatureStatisticsList containing statistics of Feast features over the retrieved dataset.\n \"\"\"\n self.get_avro_files(timeout_sec) # wait for job completion\n if self.job_proto.error:\n raise Exception(self.job_proto.error)\n return self.job_proto.dataset_feature_statistics_list\n", "path": "sdk/python/feast/job.py"}]}
| 2,545 | 299 |
gh_patches_debug_34055
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-1552
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add readthedocs documentation for kafka python instrumentation
Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Instrument `kafka-python` to report instrumentation-kafka produced and consumed messages
17
18 Usage
19 -----
20
21 ..code:: python
22
23 from opentelemetry.instrumentation.kafka import KafkaInstrumentor
24 from kafka import KafkaProducer, KafkaConsumer
25
26 # Instrument kafka
27 KafkaInstrumentor().instrument()
28
29 # report a span of type producer with the default settings
30 producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
31 producer.send('my-topic', b'raw_bytes')
32
33
34 # report a span of type consumer with the default settings
35 consumer = KafkaConsumer('my-topic',
36 group_id='my-group',
37 bootstrap_servers=['localhost:9092'])
38 for message in consumer:
39 # process message
40
41 The `_instrument` method accepts the following keyword args:
42 tracer_provider (TracerProvider) - an optional tracer provider
43 produce_hook (Callable) - a function with extra user-defined logic to be performed before sending the message
44 this function signature is:
45 def produce_hook(span: Span, args, kwargs)
46 consume_hook (Callable) - a function with extra user-defined logic to be performed after consuming a message
47 this function signature is:
48 def consume
49 _hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)
50 for example:
51 .. code: python
52 from opentelemetry.instrumentation.kafka import KafkaInstrumentor
53 from kafka import KafkaProducer, KafkaConsumer
54
55 def produce_hook(span, args, kwargs):
56 if span and span.is_recording():
57 span.set_attribute("custom_user_attribute_from_produce_hook", "some-value")
58 def consume_hook(span, record, args, kwargs):
59 if span and span.is_recording():
60 span.set_attribute("custom_user_attribute_from_consume_hook", "some-value")
61
62 # instrument kafka with produce and consume hooks
63 KafkaInstrumentor().instrument(produce_hook=produce_hook, consume_hook=consume_hook)
64
65 # Using kafka as normal now will automatically generate spans,
66 # including user custom attributes added from the hooks
67 producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
68 producer.send('my-topic', b'raw_bytes')
69
70 API
71 ___
72 """
73 from typing import Collection
74
75 import kafka
76 from wrapt import wrap_function_wrapper
77
78 from opentelemetry import trace
79 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
80 from opentelemetry.instrumentation.kafka.package import _instruments
81 from opentelemetry.instrumentation.kafka.utils import _wrap_next, _wrap_send
82 from opentelemetry.instrumentation.kafka.version import __version__
83 from opentelemetry.instrumentation.utils import unwrap
84
85
86 class KafkaInstrumentor(BaseInstrumentor):
87 """An instrumentor for kafka module
88 See `BaseInstrumentor`
89 """
90
91 def instrumentation_dependencies(self) -> Collection[str]:
92 return _instruments
93
94 def _instrument(self, **kwargs):
95 """Instruments the kafka module
96
97 Args:
98 **kwargs: Optional arguments
99 ``tracer_provider``: a TracerProvider, defaults to global.
100 ``produce_hook``: a callable to be executed just before producing a message
101 ``consume_hook``: a callable to be executed just after consuming a message
102 """
103 tracer_provider = kwargs.get("tracer_provider")
104 produce_hook = kwargs.get("produce_hook")
105 consume_hook = kwargs.get("consume_hook")
106
107 tracer = trace.get_tracer(
108 __name__, __version__, tracer_provider=tracer_provider
109 )
110
111 wrap_function_wrapper(
112 kafka.KafkaProducer, "send", _wrap_send(tracer, produce_hook)
113 )
114 wrap_function_wrapper(
115 kafka.KafkaConsumer,
116 "__next__",
117 _wrap_next(tracer, consume_hook),
118 )
119
120 def _uninstrument(self, **kwargs):
121 unwrap(kafka.KafkaProducer, "send")
122 unwrap(kafka.KafkaConsumer, "__next__")
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py
@@ -13,7 +13,7 @@
# limitations under the License.
"""
-Instrument `kafka-python` to report instrumentation-kafka produced and consumed messages
+Instrument kafka-python to report instrumentation-kafka produced and consumed messages
Usage
-----
@@ -30,24 +30,21 @@
producer = KafkaProducer(bootstrap_servers=['localhost:9092'])
producer.send('my-topic', b'raw_bytes')
-
# report a span of type consumer with the default settings
- consumer = KafkaConsumer('my-topic',
- group_id='my-group',
- bootstrap_servers=['localhost:9092'])
+ consumer = KafkaConsumer('my-topic', group_id='my-group', bootstrap_servers=['localhost:9092'])
for message in consumer:
- # process message
+ # process message
-The `_instrument` method accepts the following keyword args:
+The _instrument() method accepts the following keyword args:
tracer_provider (TracerProvider) - an optional tracer provider
produce_hook (Callable) - a function with extra user-defined logic to be performed before sending the message
- this function signature is:
- def produce_hook(span: Span, args, kwargs)
+this function signature is:
+def produce_hook(span: Span, args, kwargs)
consume_hook (Callable) - a function with extra user-defined logic to be performed after consuming a message
- this function signature is:
- def consume
- _hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)
+this function signature is:
+def consume_hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)
for example:
+
.. code: python
from opentelemetry.instrumentation.kafka import KafkaInstrumentor
from kafka import KafkaProducer, KafkaConsumer
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py b/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n \"\"\"\n-Instrument `kafka-python` to report instrumentation-kafka produced and consumed messages\n+Instrument kafka-python to report instrumentation-kafka produced and consumed messages\n \n Usage\n -----\n@@ -30,24 +30,21 @@\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'])\n producer.send('my-topic', b'raw_bytes')\n \n-\n # report a span of type consumer with the default settings\n- consumer = KafkaConsumer('my-topic',\n- group_id='my-group',\n- bootstrap_servers=['localhost:9092'])\n+ consumer = KafkaConsumer('my-topic', group_id='my-group', bootstrap_servers=['localhost:9092'])\n for message in consumer:\n- # process message\n+ # process message\n \n-The `_instrument` method accepts the following keyword args:\n+The _instrument() method accepts the following keyword args:\n tracer_provider (TracerProvider) - an optional tracer provider\n produce_hook (Callable) - a function with extra user-defined logic to be performed before sending the message\n- this function signature is:\n- def produce_hook(span: Span, args, kwargs)\n+this function signature is:\n+def produce_hook(span: Span, args, kwargs)\n consume_hook (Callable) - a function with extra user-defined logic to be performed after consuming a message\n- this function signature is:\n- def consume\n- _hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)\n+this function signature is:\n+def consume_hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)\n for example:\n+\n .. code: python\n from opentelemetry.instrumentation.kafka import KafkaInstrumentor\n from kafka import KafkaProducer, KafkaConsumer\n", "issue": "Add readthedocs documentation for kafka python instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInstrument `kafka-python` to report instrumentation-kafka produced and consumed messages\n\nUsage\n-----\n\n..code:: python\n\n from opentelemetry.instrumentation.kafka import KafkaInstrumentor\n from kafka import KafkaProducer, KafkaConsumer\n\n # Instrument kafka\n KafkaInstrumentor().instrument()\n\n # report a span of type producer with the default settings\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'])\n producer.send('my-topic', b'raw_bytes')\n\n\n # report a span of type consumer with the default settings\n consumer = KafkaConsumer('my-topic',\n group_id='my-group',\n bootstrap_servers=['localhost:9092'])\n for message in consumer:\n # process message\n\nThe `_instrument` method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\nproduce_hook (Callable) - a function with extra user-defined logic to be performed before sending the message\n this function signature is:\n def produce_hook(span: Span, args, kwargs)\nconsume_hook (Callable) - a function with extra user-defined logic to be performed after consuming a message\n this function signature is:\n def consume\n _hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)\nfor example:\n.. code: python\n from opentelemetry.instrumentation.kafka import KafkaInstrumentor\n from kafka import KafkaProducer, KafkaConsumer\n\n def produce_hook(span, args, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_produce_hook\", \"some-value\")\n def consume_hook(span, record, args, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_consume_hook\", \"some-value\")\n\n # instrument kafka with produce and consume hooks\n KafkaInstrumentor().instrument(produce_hook=produce_hook, consume_hook=consume_hook)\n\n # Using kafka as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'])\n producer.send('my-topic', b'raw_bytes')\n\nAPI\n___\n\"\"\"\nfrom typing import Collection\n\nimport kafka\nfrom wrapt import wrap_function_wrapper\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.kafka.package import _instruments\nfrom opentelemetry.instrumentation.kafka.utils import _wrap_next, _wrap_send\nfrom opentelemetry.instrumentation.kafka.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\n\n\nclass KafkaInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for kafka module\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Instruments the kafka module\n\n Args:\n **kwargs: Optional arguments\n ``tracer_provider``: a TracerProvider, defaults to global.\n ``produce_hook``: a callable to be executed just before producing a message\n ``consume_hook``: a callable to be executed just after consuming a message\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n produce_hook = kwargs.get(\"produce_hook\")\n consume_hook = kwargs.get(\"consume_hook\")\n\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n wrap_function_wrapper(\n kafka.KafkaProducer, \"send\", _wrap_send(tracer, produce_hook)\n )\n wrap_function_wrapper(\n kafka.KafkaConsumer,\n \"__next__\",\n _wrap_next(tracer, consume_hook),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(kafka.KafkaProducer, \"send\")\n unwrap(kafka.KafkaConsumer, \"__next__\")\n", "path": "instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInstrument kafka-python to report instrumentation-kafka produced and consumed messages\n\nUsage\n-----\n\n..code:: python\n\n from opentelemetry.instrumentation.kafka import KafkaInstrumentor\n from kafka import KafkaProducer, KafkaConsumer\n\n # Instrument kafka\n KafkaInstrumentor().instrument()\n\n # report a span of type producer with the default settings\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'])\n producer.send('my-topic', b'raw_bytes')\n\n # report a span of type consumer with the default settings\n consumer = KafkaConsumer('my-topic', group_id='my-group', bootstrap_servers=['localhost:9092'])\n for message in consumer:\n # process message\n\nThe _instrument() method accepts the following keyword args:\ntracer_provider (TracerProvider) - an optional tracer provider\nproduce_hook (Callable) - a function with extra user-defined logic to be performed before sending the message\nthis function signature is:\ndef produce_hook(span: Span, args, kwargs)\nconsume_hook (Callable) - a function with extra user-defined logic to be performed after consuming a message\nthis function signature is:\ndef consume_hook(span: Span, record: kafka.record.ABCRecord, args, kwargs)\nfor example:\n\n.. code: python\n from opentelemetry.instrumentation.kafka import KafkaInstrumentor\n from kafka import KafkaProducer, KafkaConsumer\n\n def produce_hook(span, args, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_produce_hook\", \"some-value\")\n def consume_hook(span, record, args, kwargs):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_consume_hook\", \"some-value\")\n\n # instrument kafka with produce and consume hooks\n KafkaInstrumentor().instrument(produce_hook=produce_hook, consume_hook=consume_hook)\n\n # Using kafka as normal now will automatically generate spans,\n # including user custom attributes added from the hooks\n producer = KafkaProducer(bootstrap_servers=['localhost:9092'])\n producer.send('my-topic', b'raw_bytes')\n\nAPI\n___\n\"\"\"\nfrom typing import Collection\n\nimport kafka\nfrom wrapt import wrap_function_wrapper\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.kafka.package import _instruments\nfrom opentelemetry.instrumentation.kafka.utils import _wrap_next, _wrap_send\nfrom opentelemetry.instrumentation.kafka.version import __version__\nfrom opentelemetry.instrumentation.utils import unwrap\n\n\nclass KafkaInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for kafka module\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Instruments the kafka module\n\n Args:\n **kwargs: Optional arguments\n ``tracer_provider``: a TracerProvider, defaults to global.\n ``produce_hook``: a callable to be executed just before producing a message\n ``consume_hook``: a callable to be executed just after consuming a message\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n produce_hook = kwargs.get(\"produce_hook\")\n consume_hook = kwargs.get(\"consume_hook\")\n\n tracer = trace.get_tracer(\n __name__, __version__, tracer_provider=tracer_provider\n )\n\n wrap_function_wrapper(\n kafka.KafkaProducer, \"send\", _wrap_send(tracer, produce_hook)\n )\n wrap_function_wrapper(\n kafka.KafkaConsumer,\n \"__next__\",\n _wrap_next(tracer, consume_hook),\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(kafka.KafkaProducer, \"send\")\n unwrap(kafka.KafkaConsumer, \"__next__\")\n", "path": "instrumentation/opentelemetry-instrumentation-kafka-python/src/opentelemetry/instrumentation/kafka/__init__.py"}]}
| 1,529 | 503 |
gh_patches_debug_37763
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-1853
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Loftr does not work with some image size (not a memory issue)
### Describe the bug
LoFTR incorrectly does something with positional embeddings
```
RuntimeError Traceback (most recent call last)
[<ipython-input-1-54d246337ab1>](https://9t3p2yszpxn-496ff2e9c6d22116-0-colab.googleusercontent.com/outputframe.html?vrz=colab-20220613-060046-RC00_454553376#) in <module>()
10 "image1": torch.rand(1,1, 1704, 2272).cuda()}
11 with torch.no_grad():
---> 12 correspondences = matcher(input_dict)
3 frames
[/usr/local/lib/python3.7/dist-packages/kornia/feature/loftr/utils/position_encoding.py](https://9t3p2yszpxn-496ff2e9c6d22116-0-colab.googleusercontent.com/outputframe.html?vrz=colab-20220613-060046-RC00_454553376#) in forward(self, x)
39 x: [N, C, H, W]
40 """
---> 41 return x + self.pe[:, :, :x.size(2), :x.size(3)]
RuntimeError: The size of tensor a (284) must match the size of tensor b (256) at non-singleton dimension 3
```
### Reproduction steps
```bash
import kornia as K
import kornia.feature as KF
import numpy as np
import torch
matcher = KF.LoFTR(pretrained='outdoor').cuda()
input_dict = {"image0": torch.rand(1,1, 1704, 2272),
"image1": torch.rand(1,1, 1704, 2272)}
with torch.no_grad():
correspondences = matcher(input_dict)
```
### Expected behavior
Not an error
### Environment
```shell
not relevant
```
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/feature/loftr/utils/position_encoding.py`
Content:
```
1 import math
2
3 import torch
4 from torch import nn
5
6
7 class PositionEncodingSine(nn.Module):
8 """This is a sinusoidal position encoding that generalized to 2-dimensional images."""
9
10 def __init__(self, d_model, max_shape=(256, 256), temp_bug_fix=True):
11 """
12 Args:
13 max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels
14 temp_bug_fix (bool): As noted in this [issue](https://github.com/zju3dv/LoFTR/issues/41),
15 the original implementation of LoFTR includes a bug in the pos-enc impl, which has little impact
16 on the final performance. For now, we keep both impls for backward compatibility.
17 We will remove the buggy impl after re-training all variants of our released models.
18 """
19 super().__init__()
20
21 pe = torch.zeros((d_model, *max_shape))
22 y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)
23 x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)
24 if temp_bug_fix:
25 div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / (d_model // 2)))
26 else: # a buggy implementation (for backward compatibility only)
27 div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / d_model // 2))
28 div_term = div_term[:, None, None] # [C//4, 1, 1]
29 pe[0::4, :, :] = torch.sin(x_position * div_term)
30 pe[1::4, :, :] = torch.cos(x_position * div_term)
31 pe[2::4, :, :] = torch.sin(y_position * div_term)
32 pe[3::4, :, :] = torch.cos(y_position * div_term)
33
34 self.register_buffer('pe', pe.unsqueeze(0), persistent=False) # [1, C, H, W]
35
36 def forward(self, x):
37 """
38 Args:
39 x: [N, C, H, W]
40 """
41 return x + self.pe[:, :, : x.size(2), : x.size(3)]
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kornia/feature/loftr/utils/position_encoding.py b/kornia/feature/loftr/utils/position_encoding.py
--- a/kornia/feature/loftr/utils/position_encoding.py
+++ b/kornia/feature/loftr/utils/position_encoding.py
@@ -17,25 +17,51 @@
We will remove the buggy impl after re-training all variants of our released models.
"""
super().__init__()
+ self.d_model = d_model
+ self.temp_bug_fix = temp_bug_fix
- pe = torch.zeros((d_model, *max_shape))
+ pe = self._create_position_encoding(max_shape)
+ self.register_buffer('pe', pe, persistent=False) # [1, C, H, W]
+
+ def _create_position_encoding(self, max_shape):
+ """Creates a position encoding from scratch.
+
+ For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape
+ should be (H//8, W//8).
+ """
+ pe = torch.zeros((self.d_model, *max_shape))
y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)
x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)
- if temp_bug_fix:
- div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / (d_model // 2)))
+ if self.temp_bug_fix:
+ div_term = torch.exp(
+ torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / (self.d_model // 2))
+ )
else: # a buggy implementation (for backward compatibility only)
- div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / d_model // 2))
+ div_term = torch.exp(
+ torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / self.d_model // 2)
+ )
div_term = div_term[:, None, None] # [C//4, 1, 1]
pe[0::4, :, :] = torch.sin(x_position * div_term)
pe[1::4, :, :] = torch.cos(x_position * div_term)
pe[2::4, :, :] = torch.sin(y_position * div_term)
pe[3::4, :, :] = torch.cos(y_position * div_term)
+ return pe.unsqueeze(0)
- self.register_buffer('pe', pe.unsqueeze(0), persistent=False) # [1, C, H, W]
+ def update_position_encoding_size(self, max_shape):
+ """Updates position encoding to new max_shape.
+
+ For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape
+ should be (H//8, W//8).
+ """
+ self.pe = self._create_position_encoding(max_shape).to(self.pe.device)
def forward(self, x):
"""
Args:
x: [N, C, H, W]
"""
+ if x.size(2) > self.pe.size(2) or x.size(3) > self.pe.size(3):
+ max_shape = (max(x.size(2), self.pe.size(2)), max(x.size(3), self.pe.size(3)))
+ self.update_position_encoding_size(max_shape)
+
return x + self.pe[:, :, : x.size(2), : x.size(3)]
|
{"golden_diff": "diff --git a/kornia/feature/loftr/utils/position_encoding.py b/kornia/feature/loftr/utils/position_encoding.py\n--- a/kornia/feature/loftr/utils/position_encoding.py\n+++ b/kornia/feature/loftr/utils/position_encoding.py\n@@ -17,25 +17,51 @@\n We will remove the buggy impl after re-training all variants of our released models.\n \"\"\"\n super().__init__()\n+ self.d_model = d_model\n+ self.temp_bug_fix = temp_bug_fix\n \n- pe = torch.zeros((d_model, *max_shape))\n+ pe = self._create_position_encoding(max_shape)\n+ self.register_buffer('pe', pe, persistent=False) # [1, C, H, W]\n+\n+ def _create_position_encoding(self, max_shape):\n+ \"\"\"Creates a position encoding from scratch.\n+\n+ For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape\n+ should be (H//8, W//8).\n+ \"\"\"\n+ pe = torch.zeros((self.d_model, *max_shape))\n y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)\n x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)\n- if temp_bug_fix:\n- div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / (d_model // 2)))\n+ if self.temp_bug_fix:\n+ div_term = torch.exp(\n+ torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / (self.d_model // 2))\n+ )\n else: # a buggy implementation (for backward compatibility only)\n- div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / d_model // 2))\n+ div_term = torch.exp(\n+ torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / self.d_model // 2)\n+ )\n div_term = div_term[:, None, None] # [C//4, 1, 1]\n pe[0::4, :, :] = torch.sin(x_position * div_term)\n pe[1::4, :, :] = torch.cos(x_position * div_term)\n pe[2::4, :, :] = torch.sin(y_position * div_term)\n pe[3::4, :, :] = torch.cos(y_position * div_term)\n+ return pe.unsqueeze(0)\n \n- self.register_buffer('pe', pe.unsqueeze(0), persistent=False) # [1, C, H, W]\n+ def update_position_encoding_size(self, max_shape):\n+ \"\"\"Updates position encoding to new max_shape.\n+\n+ For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape\n+ should be (H//8, W//8).\n+ \"\"\"\n+ self.pe = self._create_position_encoding(max_shape).to(self.pe.device)\n \n def forward(self, x):\n \"\"\"\n Args:\n x: [N, C, H, W]\n \"\"\"\n+ if x.size(2) > self.pe.size(2) or x.size(3) > self.pe.size(3):\n+ max_shape = (max(x.size(2), self.pe.size(2)), max(x.size(3), self.pe.size(3)))\n+ self.update_position_encoding_size(max_shape)\n+\n return x + self.pe[:, :, : x.size(2), : x.size(3)]\n", "issue": "Loftr does not work with some image size (not a memory issue)\n### Describe the bug\n\nLoFTR incorrectly does something with positional embeddings\r\n```\r\nRuntimeError Traceback (most recent call last)\r\n[<ipython-input-1-54d246337ab1>](https://9t3p2yszpxn-496ff2e9c6d22116-0-colab.googleusercontent.com/outputframe.html?vrz=colab-20220613-060046-RC00_454553376#) in <module>()\r\n 10 \"image1\": torch.rand(1,1, 1704, 2272).cuda()}\r\n 11 with torch.no_grad():\r\n---> 12 correspondences = matcher(input_dict)\r\n\r\n3 frames\r\n[/usr/local/lib/python3.7/dist-packages/kornia/feature/loftr/utils/position_encoding.py](https://9t3p2yszpxn-496ff2e9c6d22116-0-colab.googleusercontent.com/outputframe.html?vrz=colab-20220613-060046-RC00_454553376#) in forward(self, x)\r\n 39 x: [N, C, H, W]\r\n 40 \"\"\"\r\n---> 41 return x + self.pe[:, :, :x.size(2), :x.size(3)]\r\n\r\nRuntimeError: The size of tensor a (284) must match the size of tensor b (256) at non-singleton dimension 3\r\n```\n\n### Reproduction steps\n\n```bash\nimport kornia as K\r\nimport kornia.feature as KF\r\nimport numpy as np\r\nimport torch\r\n\r\nmatcher = KF.LoFTR(pretrained='outdoor').cuda()\r\n\r\ninput_dict = {\"image0\": torch.rand(1,1, 1704, 2272),\r\n \"image1\": torch.rand(1,1, 1704, 2272)}\r\nwith torch.no_grad():\r\n correspondences = matcher(input_dict)\n```\n\n\n### Expected behavior\n\nNot an error \n\n### Environment\n\n```shell\nnot relevant\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import math\n\nimport torch\nfrom torch import nn\n\n\nclass PositionEncodingSine(nn.Module):\n \"\"\"This is a sinusoidal position encoding that generalized to 2-dimensional images.\"\"\"\n\n def __init__(self, d_model, max_shape=(256, 256), temp_bug_fix=True):\n \"\"\"\n Args:\n max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels\n temp_bug_fix (bool): As noted in this [issue](https://github.com/zju3dv/LoFTR/issues/41),\n the original implementation of LoFTR includes a bug in the pos-enc impl, which has little impact\n on the final performance. For now, we keep both impls for backward compatibility.\n We will remove the buggy impl after re-training all variants of our released models.\n \"\"\"\n super().__init__()\n\n pe = torch.zeros((d_model, *max_shape))\n y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)\n x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)\n if temp_bug_fix:\n div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / (d_model // 2)))\n else: # a buggy implementation (for backward compatibility only)\n div_term = torch.exp(torch.arange(0, d_model // 2, 2).float() * (-math.log(10000.0) / d_model // 2))\n div_term = div_term[:, None, None] # [C//4, 1, 1]\n pe[0::4, :, :] = torch.sin(x_position * div_term)\n pe[1::4, :, :] = torch.cos(x_position * div_term)\n pe[2::4, :, :] = torch.sin(y_position * div_term)\n pe[3::4, :, :] = torch.cos(y_position * div_term)\n\n self.register_buffer('pe', pe.unsqueeze(0), persistent=False) # [1, C, H, W]\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [N, C, H, W]\n \"\"\"\n return x + self.pe[:, :, : x.size(2), : x.size(3)]\n", "path": "kornia/feature/loftr/utils/position_encoding.py"}], "after_files": [{"content": "import math\n\nimport torch\nfrom torch import nn\n\n\nclass PositionEncodingSine(nn.Module):\n \"\"\"This is a sinusoidal position encoding that generalized to 2-dimensional images.\"\"\"\n\n def __init__(self, d_model, max_shape=(256, 256), temp_bug_fix=True):\n \"\"\"\n Args:\n max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels\n temp_bug_fix (bool): As noted in this [issue](https://github.com/zju3dv/LoFTR/issues/41),\n the original implementation of LoFTR includes a bug in the pos-enc impl, which has little impact\n on the final performance. For now, we keep both impls for backward compatibility.\n We will remove the buggy impl after re-training all variants of our released models.\n \"\"\"\n super().__init__()\n self.d_model = d_model\n self.temp_bug_fix = temp_bug_fix\n\n pe = self._create_position_encoding(max_shape)\n self.register_buffer('pe', pe, persistent=False) # [1, C, H, W]\n\n def _create_position_encoding(self, max_shape):\n \"\"\"Creates a position encoding from scratch.\n\n For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape\n should be (H//8, W//8).\n \"\"\"\n pe = torch.zeros((self.d_model, *max_shape))\n y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)\n x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)\n if self.temp_bug_fix:\n div_term = torch.exp(\n torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / (self.d_model // 2))\n )\n else: # a buggy implementation (for backward compatibility only)\n div_term = torch.exp(\n torch.arange(0, self.d_model // 2, 2).float() * (-math.log(10000.0) / self.d_model // 2)\n )\n div_term = div_term[:, None, None] # [C//4, 1, 1]\n pe[0::4, :, :] = torch.sin(x_position * div_term)\n pe[1::4, :, :] = torch.cos(x_position * div_term)\n pe[2::4, :, :] = torch.sin(y_position * div_term)\n pe[3::4, :, :] = torch.cos(y_position * div_term)\n return pe.unsqueeze(0)\n\n def update_position_encoding_size(self, max_shape):\n \"\"\"Updates position encoding to new max_shape.\n\n For 1/8 feature map (which is standard): If the input image size is H, W (both divisible by 8), the max_shape\n should be (H//8, W//8).\n \"\"\"\n self.pe = self._create_position_encoding(max_shape).to(self.pe.device)\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [N, C, H, W]\n \"\"\"\n if x.size(2) > self.pe.size(2) or x.size(3) > self.pe.size(3):\n max_shape = (max(x.size(2), self.pe.size(2)), max(x.size(3), self.pe.size(3)))\n self.update_position_encoding_size(max_shape)\n\n return x + self.pe[:, :, : x.size(2), : x.size(3)]\n", "path": "kornia/feature/loftr/utils/position_encoding.py"}]}
| 1,397 | 864 |
gh_patches_debug_8026
|
rasdani/github-patches
|
git_diff
|
dmlc__dgl-3696
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] FileExistsError when sometimes importing dgl from multiprocess training
## 🐛 Bug
Sometimes, when I launch my Pytorch distributed trainer (which spawns multiple trainer processes, eg once for each GPU for multi-gpu model training), my training job fails with the following error:
```
# pardon the possibly out-of-order stack trace, multiple processes are interleaving the stdout
import dgl
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dgl/__init__.py", line 13, in <module>
from .backend import load_backend, backend_name
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
File "trainer/utils/cli.py", line 137, in <module>
locals()["run_" + args.which](args, extra)
File "/usr/local/lib/python3.7/site-packages/dgl/backend/__init__.py", line 107, in <module>
load_backend(get_preferred_backend())
File "trainer/utils/cli.py", line 27, in run_local
trainer_class = locate(args.trainer)
FileExistsError: [Errno 17] File exists: '/root/.dgl'
File "/usr/local/lib/python3.7/site-packages/dgl/backend/__init__.py", line 103, in get_preferred_backend
set_default_backend(default_dir, 'pytorch')
FileExistsError: [Errno 17] File exists: '/root/.dgl'
```
I see this occur fairly often, say ~10-20% of the time. Usually, retrying the train command fixes things.
For what it's worth: I am running this within a Docker container, using a DGL nightly build from `2021-10-18`
## To Reproduce
Steps to reproduce the behavior:
I don't have a repro script. But, hopefully this stack trace can point out a diagnosis + fix.
## Expected behavior
Importing dgl shouldn't cause an error.
## Environment
- DGL Version (e.g., 1.0): >0.7 (Nightly build from 2021-10-18).
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3):
- OS (e.g., Linux): Linux
- How you installed DGL (`conda`, `pip`, source): From nightly
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version (if applicable):
- GPU models and configuration (e.g. V100):
- Any other relevant information:
## Additional context
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/dgl/backend/set_default_backend.py`
Content:
```
1 import argparse
2 import os
3 import json
4
5 def set_default_backend(default_dir, backend_name):
6 if not os.path.exists(default_dir):
7 os.makedirs(default_dir)
8 config_path = os.path.join(default_dir, 'config.json')
9 with open(config_path, "w") as config_file:
10 json.dump({'backend': backend_name.lower()}, config_file)
11 print('Setting the default backend to "{}". You can change it in the '
12 '~/.dgl/config.json file or export the DGLBACKEND environment variable. '
13 'Valid options are: pytorch, mxnet, tensorflow (all lowercase)'.format(
14 backend_name))
15
16 if __name__ == "__main__":
17 parser = argparse.ArgumentParser()
18 parser.add_argument("default_dir", type=str, default=os.path.join(os.path.expanduser('~'), '.dgl'))
19 parser.add_argument("backend", nargs=1, type=str, choices=[
20 'pytorch', 'tensorflow', 'mxnet'], help="Set default backend")
21 args = parser.parse_args()
22 set_default_backend(args.default_dir, args.backend[0])
23
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/dgl/backend/set_default_backend.py b/python/dgl/backend/set_default_backend.py
--- a/python/dgl/backend/set_default_backend.py
+++ b/python/dgl/backend/set_default_backend.py
@@ -3,8 +3,8 @@
import json
def set_default_backend(default_dir, backend_name):
- if not os.path.exists(default_dir):
- os.makedirs(default_dir)
+ # the exists_ok requires python >= 3.2
+ os.makedirs(default_dir, exists_ok=True)
config_path = os.path.join(default_dir, 'config.json')
with open(config_path, "w") as config_file:
json.dump({'backend': backend_name.lower()}, config_file)
|
{"golden_diff": "diff --git a/python/dgl/backend/set_default_backend.py b/python/dgl/backend/set_default_backend.py\n--- a/python/dgl/backend/set_default_backend.py\n+++ b/python/dgl/backend/set_default_backend.py\n@@ -3,8 +3,8 @@\n import json\n \n def set_default_backend(default_dir, backend_name):\n- if not os.path.exists(default_dir):\n- os.makedirs(default_dir)\n+ # the exists_ok requires python >= 3.2\n+ os.makedirs(default_dir, exists_ok=True)\n config_path = os.path.join(default_dir, 'config.json')\n with open(config_path, \"w\") as config_file: \n json.dump({'backend': backend_name.lower()}, config_file)\n", "issue": "[Bug] FileExistsError when sometimes importing dgl from multiprocess training\n## \ud83d\udc1b Bug\r\nSometimes, when I launch my Pytorch distributed trainer (which spawns multiple trainer processes, eg once for each GPU for multi-gpu model training), my training job fails with the following error:\r\n\r\n```\r\n# pardon the possibly out-of-order stack trace, multiple processes are interleaving the stdout\r\n import dgl\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/dgl/__init__.py\", line 13, in <module>\r\n from .backend import load_backend, backend_name\r\n File \"/usr/local/lib/python3.7/os.py\", line 221, in makedirs\r\n mkdir(name, mode)\r\n File \"trainer/utils/cli.py\", line 137, in <module>\r\n locals()[\"run_\" + args.which](args, extra)\r\n File \"/usr/local/lib/python3.7/site-packages/dgl/backend/__init__.py\", line 107, in <module>\r\n load_backend(get_preferred_backend())\r\n File \"trainer/utils/cli.py\", line 27, in run_local\r\n trainer_class = locate(args.trainer)\r\nFileExistsError: [Errno 17] File exists: '/root/.dgl'\r\n File \"/usr/local/lib/python3.7/site-packages/dgl/backend/__init__.py\", line 103, in get_preferred_backend\r\n set_default_backend(default_dir, 'pytorch')\r\nFileExistsError: [Errno 17] File exists: '/root/.dgl'\r\n```\r\n\r\nI see this occur fairly often, say ~10-20% of the time. Usually, retrying the train command fixes things.\r\n\r\nFor what it's worth: I am running this within a Docker container, using a DGL nightly build from `2021-10-18`\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nI don't have a repro script. But, hopefully this stack trace can point out a diagnosis + fix.\r\n\r\n## Expected behavior\r\n\r\nImporting dgl shouldn't cause an error.\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): >0.7 (Nightly build from 2021-10-18).\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3):\r\n - OS (e.g., Linux): Linux\r\n - How you installed DGL (`conda`, `pip`, source): From nightly\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version (if applicable):\r\n - GPU models and configuration (e.g. V100):\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\n", "before_files": [{"content": "import argparse\nimport os\nimport json\n\ndef set_default_backend(default_dir, backend_name):\n if not os.path.exists(default_dir):\n os.makedirs(default_dir)\n config_path = os.path.join(default_dir, 'config.json')\n with open(config_path, \"w\") as config_file: \n json.dump({'backend': backend_name.lower()}, config_file)\n print('Setting the default backend to \"{}\". You can change it in the '\n '~/.dgl/config.json file or export the DGLBACKEND environment variable. '\n 'Valid options are: pytorch, mxnet, tensorflow (all lowercase)'.format(\n backend_name))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"default_dir\", type=str, default=os.path.join(os.path.expanduser('~'), '.dgl'))\n parser.add_argument(\"backend\", nargs=1, type=str, choices=[\n 'pytorch', 'tensorflow', 'mxnet'], help=\"Set default backend\")\n args = parser.parse_args()\n set_default_backend(args.default_dir, args.backend[0])\n", "path": "python/dgl/backend/set_default_backend.py"}], "after_files": [{"content": "import argparse\nimport os\nimport json\n\ndef set_default_backend(default_dir, backend_name):\n # the exists_ok requires python >= 3.2\n os.makedirs(default_dir, exists_ok=True)\n config_path = os.path.join(default_dir, 'config.json')\n with open(config_path, \"w\") as config_file: \n json.dump({'backend': backend_name.lower()}, config_file)\n print('Setting the default backend to \"{}\". You can change it in the '\n '~/.dgl/config.json file or export the DGLBACKEND environment variable. '\n 'Valid options are: pytorch, mxnet, tensorflow (all lowercase)'.format(\n backend_name))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"default_dir\", type=str, default=os.path.join(os.path.expanduser('~'), '.dgl'))\n parser.add_argument(\"backend\", nargs=1, type=str, choices=[\n 'pytorch', 'tensorflow', 'mxnet'], help=\"Set default backend\")\n args = parser.parse_args()\n set_default_backend(args.default_dir, args.backend[0])\n", "path": "python/dgl/backend/set_default_backend.py"}]}
| 1,130 | 150 |
gh_patches_debug_36088
|
rasdani/github-patches
|
git_diff
|
quantumlib__Cirq-4246
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Push to PyPi failing
```
error in cirq setup command: 'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.
```
See https://github.com/quantumlib/Cirq/runs/2851981344
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17 from setuptools import setup
18
19 # This reads the __version__ variable from cirq/_version.py
20 __version__ = ''
21
22 from dev_tools import modules
23 from dev_tools.requirements import explode
24
25 exec(open('cirq-core/cirq/_version.py').read())
26
27 name = 'cirq'
28
29 description = (
30 'A framework for creating, editing, and invoking '
31 'Noisy Intermediate Scale Quantum (NISQ) circuits.'
32 )
33
34 # README file as long_description.
35 long_description = io.open('README.rst', encoding='utf-8').read()
36
37 # If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.
38 # It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence
39 # it will be a pre-release version on PyPi. See
40 # https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning
41 # for more details.
42 if 'CIRQ_PRE_RELEASE_VERSION' in os.environ:
43 __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']
44 long_description = (
45 "**This is a development version of Cirq and may be "
46 "unstable.**\n\n**For the latest stable release of Cirq "
47 "see**\n`here <https://pypi.org/project/cirq>`__.\n\n" + long_description
48 )
49
50 # Sanity check
51 assert __version__, 'Version string cannot be empty'
52
53 # This is a pure metapackage that installs all our packages
54 requirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]
55
56 dev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')
57 dev_requirements = [r.strip() for r in dev_requirements]
58
59 setup(
60 name=name,
61 version=__version__,
62 url='http://github.com/quantumlib/cirq',
63 author='The Cirq Developers',
64 author_email='[email protected]',
65 python_requires=('>=3.6.0'),
66 install_requires=requirements,
67 extras_require={
68 'dev_env': dev_requirements,
69 },
70 license='Apache 2',
71 description=description,
72 long_description=long_description,
73 )
74
```
Path: `dev_tools/modules.py`
Content:
```
1 # Copyright 2021 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 """Utility tool for cirq modules.
17
18 It can be used as a python library for python scripts as well as a CLI tool for
19 bash scripts and interactive use.
20
21 Features:
22
23 listing modules:
24 - Python: see list_modules
25 - CLI: python3 dev_tools/modules.py list
26
27 optional arguments:
28 -h, --help show this help message and exit
29 --mode {folder,package-path}
30 'folder' to list root folder for module, 'package-path' for top level
31 python package path
32 --include-parent whether to include the parent package or not
33 """
34
35 import argparse
36 import dataclasses
37 import os
38 import sys
39 from pathlib import Path
40 from typing import List, Dict, Any
41
42 _FOLDER = 'folder'
43 _PACKAGE_PATH = 'package-path'
44
45
46 @dataclasses.dataclass
47 class Module:
48 root: Path
49 raw_setup: Dict[str, Any]
50
51 name: str = dataclasses.field(init=False)
52 version: str = dataclasses.field(init=False)
53 top_level_packages: List[str] = dataclasses.field(init=False)
54 top_level_package_paths: List[Path] = dataclasses.field(init=False)
55
56 def __post_init__(self) -> None:
57 self.name = self.raw_setup['name']
58 if 'packages' in self.raw_setup:
59 self.top_level_packages = [p for p in self.raw_setup['packages'] if '.' not in p]
60 else:
61 self.top_level_packages = []
62 self.top_level_package_paths = [self.root / p for p in self.top_level_packages]
63 self.version = self.raw_setup['version']
64
65
66 def list_modules(
67 search_dir: Path = Path(__file__).parents[1], include_parent: bool = False
68 ) -> List[Module]:
69 """Returns a list of python modules based defined by setup.py files.
70
71 Args:
72 include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding
73 module will be included.
74 search_dir: the search directory for modules, by default the repo root.
75 Returns:
76 a list of `Module`s that were found, where each module `m` is initialized with `m.root`
77 relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the
78 keyword args passed to the `setuptools.setup` method in setup.py
79 """
80
81 relative_folders = sorted(
82 f.relative_to(search_dir)
83 for f in search_dir.glob("*")
84 if f.is_dir() and (f / "setup.py").is_file()
85 )
86 if include_parent:
87 parent_setup_py = search_dir / "setup.py"
88 assert parent_setup_py.exists(), (
89 f"include_parent=True, but {parent_setup_py} " f"does not exist."
90 )
91 relative_folders.append(Path("."))
92
93 result = [
94 Module(root=folder, raw_setup=_parse_module(search_dir / folder))
95 for folder in relative_folders
96 ]
97
98 return result
99
100
101 def _parse_module(folder: Path) -> Dict[str, Any]:
102 setup_args = {}
103 import setuptools
104
105 orig_setup = setuptools.setup
106 cwd = os.getcwd()
107
108 def setup(**kwargs):
109 setup_args.update(kwargs)
110
111 try:
112 setuptools.setup = setup
113 os.chdir(str(folder))
114 setup_py = open("setup.py").read()
115 exec(setup_py, globals(), {})
116 assert setup_args, f"Invalid setup.py - setup() was not called in {folder}/setup.py!"
117 return setup_args
118 except BaseException:
119 print(f"Failed to run {folder}/setup.py:")
120 raise
121 finally:
122 setuptools.setup = orig_setup
123 os.chdir(cwd)
124
125
126 def _print_list_modules(mode: str, include_parent: bool = False):
127 """Prints certain properties of cirq modules on separate lines.
128
129 Module root folder and top level package paths are supported. The search dir is the current
130 directory.
131
132 Args:
133 mode: 'folder' lists the root folder for each module, 'package-path' lists the path to
134 the top level package(s).
135 include_cirq: when true the cirq metapackage is included in the list
136 Returns:
137 a list of strings
138 """
139 for m in list_modules(Path("."), include_parent):
140 if mode == _FOLDER:
141 print(m.root, end=" ")
142 elif mode == _PACKAGE_PATH:
143 for p in m.top_level_package_paths:
144 print(p, end=" ")
145
146
147 def main(argv: List[str]):
148 args = parse(argv)
149 # args.func is where we store the function to be called for a given subparser
150 # e.g. it is list_modules for the `list` subcommand
151 f = args.func
152 # however the func is not going to be needed for the function itself, so
153 # we remove it here
154 del args.func
155 f(**vars(args))
156
157
158 def parse(args):
159 parser = argparse.ArgumentParser('A utility for modules.')
160 subparsers = parser.add_subparsers(
161 title='subcommands', description='valid subcommands', help='additional help'
162 )
163 _add_list_modules_cmd(subparsers)
164 return parser.parse_args(args)
165
166
167 def _add_list_modules_cmd(subparsers):
168 list_modules_cmd = subparsers.add_parser("list", help="lists all the modules")
169 list_modules_cmd.add_argument(
170 "--mode",
171 default=_FOLDER,
172 choices=[_FOLDER, _PACKAGE_PATH],
173 type=str,
174 help="'folder' to list root folder for module,\n"
175 "'package-path' for top level python package path",
176 )
177 list_modules_cmd.add_argument(
178 "--include-parent",
179 help="whether to include the parent package or not",
180 default=False,
181 action="store_true",
182 )
183 list_modules_cmd.set_defaults(func=_print_list_modules)
184
185
186 if __name__ == '__main__':
187 main(sys.argv[1:]) # coverage: ignore
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dev_tools/modules.py b/dev_tools/modules.py
--- a/dev_tools/modules.py
+++ b/dev_tools/modules.py
@@ -27,8 +27,10 @@
optional arguments:
-h, --help show this help message and exit
--mode {folder,package-path}
- 'folder' to list root folder for module, 'package-path' for top level
- python package path
+ 'folder' to list root folder for module (e.g. cirq-google),
+ 'package-path' for top level python package path
+ (e.g. cirq-google/cirq_google),
+ 'package' for top level python package (e.g cirq_google),
--include-parent whether to include the parent package or not
"""
@@ -41,6 +43,7 @@
_FOLDER = 'folder'
_PACKAGE_PATH = 'package-path'
+_PACKAGE = 'package'
@dataclasses.dataclass
@@ -142,6 +145,9 @@
elif mode == _PACKAGE_PATH:
for p in m.top_level_package_paths:
print(p, end=" ")
+ elif mode == _PACKAGE:
+ for package in m.top_level_packages:
+ print(package, end=" ")
def main(argv: List[str]):
@@ -169,10 +175,11 @@
list_modules_cmd.add_argument(
"--mode",
default=_FOLDER,
- choices=[_FOLDER, _PACKAGE_PATH],
+ choices=[_FOLDER, _PACKAGE_PATH, _PACKAGE],
type=str,
- help="'folder' to list root folder for module,\n"
- "'package-path' for top level python package path",
+ help="'folder' to list root folder for module (e.g. cirq-google),\n"
+ "'package-path' for top level python package path (e.g. cirq-google/cirq_google),\n"
+ "'package' for top level python package (e.g cirq_google),\n",
)
list_modules_cmd.add_argument(
"--include-parent",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,7 +62,7 @@
url='http://github.com/quantumlib/cirq',
author='The Cirq Developers',
author_email='[email protected]',
- python_requires=('>=3.6.0'),
+ python_requires='>=3.6.0',
install_requires=requirements,
extras_require={
'dev_env': dev_requirements,
|
{"golden_diff": "diff --git a/dev_tools/modules.py b/dev_tools/modules.py\n--- a/dev_tools/modules.py\n+++ b/dev_tools/modules.py\n@@ -27,8 +27,10 @@\n optional arguments:\n -h, --help show this help message and exit\n --mode {folder,package-path}\n- 'folder' to list root folder for module, 'package-path' for top level\n- python package path\n+ 'folder' to list root folder for module (e.g. cirq-google),\n+ 'package-path' for top level python package path\n+ (e.g. cirq-google/cirq_google),\n+ 'package' for top level python package (e.g cirq_google),\n --include-parent whether to include the parent package or not\n \"\"\"\n \n@@ -41,6 +43,7 @@\n \n _FOLDER = 'folder'\n _PACKAGE_PATH = 'package-path'\n+_PACKAGE = 'package'\n \n \n @dataclasses.dataclass\n@@ -142,6 +145,9 @@\n elif mode == _PACKAGE_PATH:\n for p in m.top_level_package_paths:\n print(p, end=\" \")\n+ elif mode == _PACKAGE:\n+ for package in m.top_level_packages:\n+ print(package, end=\" \")\n \n \n def main(argv: List[str]):\n@@ -169,10 +175,11 @@\n list_modules_cmd.add_argument(\n \"--mode\",\n default=_FOLDER,\n- choices=[_FOLDER, _PACKAGE_PATH],\n+ choices=[_FOLDER, _PACKAGE_PATH, _PACKAGE],\n type=str,\n- help=\"'folder' to list root folder for module,\\n\"\n- \"'package-path' for top level python package path\",\n+ help=\"'folder' to list root folder for module (e.g. cirq-google),\\n\"\n+ \"'package-path' for top level python package path (e.g. cirq-google/cirq_google),\\n\"\n+ \"'package' for top level python package (e.g cirq_google),\\n\",\n )\n list_modules_cmd.add_argument(\n \"--include-parent\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,7 +62,7 @@\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n- python_requires=('>=3.6.0'),\n+ python_requires='>=3.6.0',\n install_requires=requirements,\n extras_require={\n 'dev_env': dev_requirements,\n", "issue": "Push to PyPi failing\n```\r\nerror in cirq setup command: 'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.\r\n```\r\n\r\nSee https://github.com/quantumlib/Cirq/runs/2851981344\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\n\nfrom dev_tools import modules\nfrom dev_tools.requirements import explode\n\nexec(open('cirq-core/cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\n# This is a pure metapackage that installs all our packages\nrequirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]\n\ndev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')\ndev_requirements = [r.strip() for r in dev_requirements]\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires=('>=3.6.0'),\n install_requires=requirements,\n extras_require={\n 'dev_env': dev_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n)\n", "path": "setup.py"}, {"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Utility tool for cirq modules.\n\nIt can be used as a python library for python scripts as well as a CLI tool for\nbash scripts and interactive use.\n\nFeatures:\n\nlisting modules:\n - Python: see list_modules\n - CLI: python3 dev_tools/modules.py list\n\noptional arguments:\n -h, --help show this help message and exit\n --mode {folder,package-path}\n 'folder' to list root folder for module, 'package-path' for top level\n python package path\n --include-parent whether to include the parent package or not\n\"\"\"\n\nimport argparse\nimport dataclasses\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\n_FOLDER = 'folder'\n_PACKAGE_PATH = 'package-path'\n\n\[email protected]\nclass Module:\n root: Path\n raw_setup: Dict[str, Any]\n\n name: str = dataclasses.field(init=False)\n version: str = dataclasses.field(init=False)\n top_level_packages: List[str] = dataclasses.field(init=False)\n top_level_package_paths: List[Path] = dataclasses.field(init=False)\n\n def __post_init__(self) -> None:\n self.name = self.raw_setup['name']\n if 'packages' in self.raw_setup:\n self.top_level_packages = [p for p in self.raw_setup['packages'] if '.' not in p]\n else:\n self.top_level_packages = []\n self.top_level_package_paths = [self.root / p for p in self.top_level_packages]\n self.version = self.raw_setup['version']\n\n\ndef list_modules(\n search_dir: Path = Path(__file__).parents[1], include_parent: bool = False\n) -> List[Module]:\n \"\"\"Returns a list of python modules based defined by setup.py files.\n\n Args:\n include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding\n module will be included.\n search_dir: the search directory for modules, by default the repo root.\n Returns:\n a list of `Module`s that were found, where each module `m` is initialized with `m.root`\n relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the\n keyword args passed to the `setuptools.setup` method in setup.py\n \"\"\"\n\n relative_folders = sorted(\n f.relative_to(search_dir)\n for f in search_dir.glob(\"*\")\n if f.is_dir() and (f / \"setup.py\").is_file()\n )\n if include_parent:\n parent_setup_py = search_dir / \"setup.py\"\n assert parent_setup_py.exists(), (\n f\"include_parent=True, but {parent_setup_py} \" f\"does not exist.\"\n )\n relative_folders.append(Path(\".\"))\n\n result = [\n Module(root=folder, raw_setup=_parse_module(search_dir / folder))\n for folder in relative_folders\n ]\n\n return result\n\n\ndef _parse_module(folder: Path) -> Dict[str, Any]:\n setup_args = {}\n import setuptools\n\n orig_setup = setuptools.setup\n cwd = os.getcwd()\n\n def setup(**kwargs):\n setup_args.update(kwargs)\n\n try:\n setuptools.setup = setup\n os.chdir(str(folder))\n setup_py = open(\"setup.py\").read()\n exec(setup_py, globals(), {})\n assert setup_args, f\"Invalid setup.py - setup() was not called in {folder}/setup.py!\"\n return setup_args\n except BaseException:\n print(f\"Failed to run {folder}/setup.py:\")\n raise\n finally:\n setuptools.setup = orig_setup\n os.chdir(cwd)\n\n\ndef _print_list_modules(mode: str, include_parent: bool = False):\n \"\"\"Prints certain properties of cirq modules on separate lines.\n\n Module root folder and top level package paths are supported. The search dir is the current\n directory.\n\n Args:\n mode: 'folder' lists the root folder for each module, 'package-path' lists the path to\n the top level package(s).\n include_cirq: when true the cirq metapackage is included in the list\n Returns:\n a list of strings\n \"\"\"\n for m in list_modules(Path(\".\"), include_parent):\n if mode == _FOLDER:\n print(m.root, end=\" \")\n elif mode == _PACKAGE_PATH:\n for p in m.top_level_package_paths:\n print(p, end=\" \")\n\n\ndef main(argv: List[str]):\n args = parse(argv)\n # args.func is where we store the function to be called for a given subparser\n # e.g. it is list_modules for the `list` subcommand\n f = args.func\n # however the func is not going to be needed for the function itself, so\n # we remove it here\n del args.func\n f(**vars(args))\n\n\ndef parse(args):\n parser = argparse.ArgumentParser('A utility for modules.')\n subparsers = parser.add_subparsers(\n title='subcommands', description='valid subcommands', help='additional help'\n )\n _add_list_modules_cmd(subparsers)\n return parser.parse_args(args)\n\n\ndef _add_list_modules_cmd(subparsers):\n list_modules_cmd = subparsers.add_parser(\"list\", help=\"lists all the modules\")\n list_modules_cmd.add_argument(\n \"--mode\",\n default=_FOLDER,\n choices=[_FOLDER, _PACKAGE_PATH],\n type=str,\n help=\"'folder' to list root folder for module,\\n\"\n \"'package-path' for top level python package path\",\n )\n list_modules_cmd.add_argument(\n \"--include-parent\",\n help=\"whether to include the parent package or not\",\n default=False,\n action=\"store_true\",\n )\n list_modules_cmd.set_defaults(func=_print_list_modules)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:]) # coverage: ignore\n", "path": "dev_tools/modules.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\nfrom setuptools import setup\n\n# This reads the __version__ variable from cirq/_version.py\n__version__ = ''\n\nfrom dev_tools import modules\nfrom dev_tools.requirements import explode\n\nexec(open('cirq-core/cirq/_version.py').read())\n\nname = 'cirq'\n\ndescription = (\n 'A framework for creating, editing, and invoking '\n 'Noisy Intermediate Scale Quantum (NISQ) circuits.'\n)\n\n# README file as long_description.\nlong_description = io.open('README.rst', encoding='utf-8').read()\n\n# If CIRQ_PRE_RELEASE_VERSION is set then we update the version to this value.\n# It is assumed that it ends with one of `.devN`, `.aN`, `.bN`, `.rcN` and hence\n# it will be a pre-release version on PyPi. See\n# https://packaging.python.org/guides/distributing-packages-using-setuptools/#pre-release-versioning\n# for more details.\nif 'CIRQ_PRE_RELEASE_VERSION' in os.environ:\n __version__ = os.environ['CIRQ_PRE_RELEASE_VERSION']\n long_description = (\n \"**This is a development version of Cirq and may be \"\n \"unstable.**\\n\\n**For the latest stable release of Cirq \"\n \"see**\\n`here <https://pypi.org/project/cirq>`__.\\n\\n\" + long_description\n )\n\n# Sanity check\nassert __version__, 'Version string cannot be empty'\n\n# This is a pure metapackage that installs all our packages\nrequirements = [f'{p.name}=={p.version}' for p in modules.list_modules()]\n\ndev_requirements = explode('dev_tools/requirements/deps/dev-tools.txt')\ndev_requirements = [r.strip() for r in dev_requirements]\n\nsetup(\n name=name,\n version=__version__,\n url='http://github.com/quantumlib/cirq',\n author='The Cirq Developers',\n author_email='[email protected]',\n python_requires='>=3.6.0',\n install_requires=requirements,\n extras_require={\n 'dev_env': dev_requirements,\n },\n license='Apache 2',\n description=description,\n long_description=long_description,\n)\n", "path": "setup.py"}, {"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Utility tool for cirq modules.\n\nIt can be used as a python library for python scripts as well as a CLI tool for\nbash scripts and interactive use.\n\nFeatures:\n\nlisting modules:\n - Python: see list_modules\n - CLI: python3 dev_tools/modules.py list\n\noptional arguments:\n -h, --help show this help message and exit\n --mode {folder,package-path}\n 'folder' to list root folder for module (e.g. cirq-google),\n 'package-path' for top level python package path\n (e.g. cirq-google/cirq_google),\n 'package' for top level python package (e.g cirq_google),\n --include-parent whether to include the parent package or not\n\"\"\"\n\nimport argparse\nimport dataclasses\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import List, Dict, Any\n\n_FOLDER = 'folder'\n_PACKAGE_PATH = 'package-path'\n_PACKAGE = 'package'\n\n\[email protected]\nclass Module:\n root: Path\n raw_setup: Dict[str, Any]\n\n name: str = dataclasses.field(init=False)\n version: str = dataclasses.field(init=False)\n top_level_packages: List[str] = dataclasses.field(init=False)\n top_level_package_paths: List[Path] = dataclasses.field(init=False)\n\n def __post_init__(self) -> None:\n self.name = self.raw_setup['name']\n if 'packages' in self.raw_setup:\n self.top_level_packages = [p for p in self.raw_setup['packages'] if '.' not in p]\n else:\n self.top_level_packages = []\n self.top_level_package_paths = [self.root / p for p in self.top_level_packages]\n self.version = self.raw_setup['version']\n\n\ndef list_modules(\n search_dir: Path = Path(__file__).parents[1], include_parent: bool = False\n) -> List[Module]:\n \"\"\"Returns a list of python modules based defined by setup.py files.\n\n Args:\n include_parent: if true, a setup.py is expected in `search_dir`, and the corresponding\n module will be included.\n search_dir: the search directory for modules, by default the repo root.\n Returns:\n a list of `Module`s that were found, where each module `m` is initialized with `m.root`\n relative to `search_dir`, `m.raw_setup` contains the dictionary equivalent to the\n keyword args passed to the `setuptools.setup` method in setup.py\n \"\"\"\n\n relative_folders = sorted(\n f.relative_to(search_dir)\n for f in search_dir.glob(\"*\")\n if f.is_dir() and (f / \"setup.py\").is_file()\n )\n if include_parent:\n parent_setup_py = search_dir / \"setup.py\"\n assert parent_setup_py.exists(), (\n f\"include_parent=True, but {parent_setup_py} \" f\"does not exist.\"\n )\n relative_folders.append(Path(\".\"))\n\n result = [\n Module(root=folder, raw_setup=_parse_module(search_dir / folder))\n for folder in relative_folders\n ]\n\n return result\n\n\ndef _parse_module(folder: Path) -> Dict[str, Any]:\n setup_args = {}\n import setuptools\n\n orig_setup = setuptools.setup\n cwd = os.getcwd()\n\n def setup(**kwargs):\n setup_args.update(kwargs)\n\n try:\n setuptools.setup = setup\n os.chdir(str(folder))\n setup_py = open(\"setup.py\").read()\n exec(setup_py, globals(), {})\n assert setup_args, f\"Invalid setup.py - setup() was not called in {folder}/setup.py!\"\n return setup_args\n except BaseException:\n print(f\"Failed to run {folder}/setup.py:\")\n raise\n finally:\n setuptools.setup = orig_setup\n os.chdir(cwd)\n\n\ndef _print_list_modules(mode: str, include_parent: bool = False):\n \"\"\"Prints certain properties of cirq modules on separate lines.\n\n Module root folder and top level package paths are supported. The search dir is the current\n directory.\n\n Args:\n mode: 'folder' lists the root folder for each module, 'package-path' lists the path to\n the top level package(s).\n include_cirq: when true the cirq metapackage is included in the list\n Returns:\n a list of strings\n \"\"\"\n for m in list_modules(Path(\".\"), include_parent):\n if mode == _FOLDER:\n print(m.root, end=\" \")\n elif mode == _PACKAGE_PATH:\n for p in m.top_level_package_paths:\n print(p, end=\" \")\n elif mode == _PACKAGE:\n for package in m.top_level_packages:\n print(package, end=\" \")\n\n\ndef main(argv: List[str]):\n args = parse(argv)\n # args.func is where we store the function to be called for a given subparser\n # e.g. it is list_modules for the `list` subcommand\n f = args.func\n # however the func is not going to be needed for the function itself, so\n # we remove it here\n del args.func\n f(**vars(args))\n\n\ndef parse(args):\n parser = argparse.ArgumentParser('A utility for modules.')\n subparsers = parser.add_subparsers(\n title='subcommands', description='valid subcommands', help='additional help'\n )\n _add_list_modules_cmd(subparsers)\n return parser.parse_args(args)\n\n\ndef _add_list_modules_cmd(subparsers):\n list_modules_cmd = subparsers.add_parser(\"list\", help=\"lists all the modules\")\n list_modules_cmd.add_argument(\n \"--mode\",\n default=_FOLDER,\n choices=[_FOLDER, _PACKAGE_PATH, _PACKAGE],\n type=str,\n help=\"'folder' to list root folder for module (e.g. cirq-google),\\n\"\n \"'package-path' for top level python package path (e.g. cirq-google/cirq_google),\\n\"\n \"'package' for top level python package (e.g cirq_google),\\n\",\n )\n list_modules_cmd.add_argument(\n \"--include-parent\",\n help=\"whether to include the parent package or not\",\n default=False,\n action=\"store_true\",\n )\n list_modules_cmd.set_defaults(func=_print_list_modules)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:]) # coverage: ignore\n", "path": "dev_tools/modules.py"}]}
| 2,962 | 569 |
gh_patches_debug_43122
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-1627
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False alarm from new W4002
*cfn-lint version: 0.34.0*
[Here](https://gist.github.com/schmiddy/44a779032a930995d22ee2722a18f163) is an example template which causes a false alarm like this:
```
$ cfn-lint /tmp/example.yml
W4002 As the resource "metadata" section contains reference to a "NoEcho" parameter DBUser, CloudFormation will display the parameter value in plaintext
/tmp/example.yml:21:7
W4002 As the resource "metadata" section contains reference to a "NoEcho" parameter DBPass, CloudFormation will display the parameter value in plaintext
/tmp/example.yml:21:7
```
The problem seems to be that the rule is looking for any mention of the parameter name, even as a text description that is not actually referencing the parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/NoEcho.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.helpers import bool_compare
6 from cfnlint.rules import CloudFormationLintRule
7 from cfnlint.rules import RuleMatch
8
9
10 class NoEcho(CloudFormationLintRule):
11 id = 'W4002'
12 shortdesc = 'Check for NoEcho References'
13 description = 'Check if there is a NoEcho enabled parameter referenced within a resources Metadata section'
14 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#parameters-section-structure-properties'
15 tags = ['resources', 'NoEcho']
16
17 def match(self, cfn):
18 matches = []
19 no_echo_params = []
20 parameters = cfn.get_parameters()
21 for parameter_name, parameter_value in parameters.items():
22 noecho = parameter_value.get('NoEcho', default=False)
23 if bool_compare(noecho, True):
24 no_echo_params.append(parameter_name)
25
26 if not no_echo_params:
27 return no_echo_params
28
29 resource_properties = cfn.get_resources()
30 resource_dict = {key: resource_properties[key] for key in resource_properties if
31 isinstance(resource_properties[key], dict)}
32 for resource_name, resource_values in resource_dict.items():
33 resource_values = {key: resource_values[key] for key in resource_values if
34 isinstance(resource_values[key], dict)}
35 metadata = resource_values.get('Metadata', {})
36 if metadata is not None:
37 for prop_name, properties in metadata.items():
38 if isinstance(properties, dict):
39 for property_value in properties.values():
40 for param in no_echo_params and no_echo_params:
41 if str(property_value).find(str(param)) > -1:
42 path = ['Resources', resource_name, 'Metadata', prop_name]
43 matches.append(RuleMatch(path, 'As the resource "metadata" section contains '
44 'reference to a "NoEcho" parameter ' + str(param)
45 + ', CloudFormation will display the parameter value in '
46 'plaintext'))
47 return matches
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cfnlint/rules/resources/NoEcho.py b/src/cfnlint/rules/resources/NoEcho.py
--- a/src/cfnlint/rules/resources/NoEcho.py
+++ b/src/cfnlint/rules/resources/NoEcho.py
@@ -2,6 +2,7 @@
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
+import six
from cfnlint.helpers import bool_compare
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
@@ -14,34 +15,58 @@
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#parameters-section-structure-properties'
tags = ['resources', 'NoEcho']
- def match(self, cfn):
- matches = []
+ def _get_no_echo_params(self, cfn):
+ """ Get no Echo Params"""
no_echo_params = []
- parameters = cfn.get_parameters()
- for parameter_name, parameter_value in parameters.items():
+ for parameter_name, parameter_value in cfn.get_parameters().items():
noecho = parameter_value.get('NoEcho', default=False)
if bool_compare(noecho, True):
no_echo_params.append(parameter_name)
+ return no_echo_params
+
+ def _check_ref(self, cfn, no_echo_params):
+ """ Check Refs """
+ matches = []
+ refs = cfn.search_deep_keys('Ref')
+ for ref in refs:
+ if ref[-1] in no_echo_params:
+ if len(ref) > 3:
+ if ref[0] == 'Resources' and ref[2] == 'Metadata':
+ matches.append(RuleMatch(ref, 'As the resource "metadata" section contains ' +
+ 'reference to a "NoEcho" parameter ' +
+ str(ref[-1]) +
+ ', CloudFormation will display the parameter value in ' +
+ 'plaintext'))
+
+ return matches
+
+ def _check_sub(self, cfn, no_echo_params):
+ """ Check Subs """
+ matches = []
+ subs = cfn.search_deep_keys('Fn::Sub')
+ for sub in subs:
+ if isinstance(sub[-1], six.string_types):
+ params = cfn.get_sub_parameters(sub[-1])
+ for param in params:
+ if param in no_echo_params:
+ if len(sub) > 2:
+ if sub[0] == 'Resources' and sub[2] == 'Metadata':
+
+ matches.append(RuleMatch(sub[:-1], 'As the resource "metadata" section contains ' +
+ 'reference to a "NoEcho" parameter ' +
+ str(param) +
+ ', CloudFormation will display the parameter value in ' +
+ 'plaintext'))
+
+ return matches
+
+ def match(self, cfn):
+ matches = []
+ no_echo_params = self._get_no_echo_params(cfn)
if not no_echo_params:
- return no_echo_params
-
- resource_properties = cfn.get_resources()
- resource_dict = {key: resource_properties[key] for key in resource_properties if
- isinstance(resource_properties[key], dict)}
- for resource_name, resource_values in resource_dict.items():
- resource_values = {key: resource_values[key] for key in resource_values if
- isinstance(resource_values[key], dict)}
- metadata = resource_values.get('Metadata', {})
- if metadata is not None:
- for prop_name, properties in metadata.items():
- if isinstance(properties, dict):
- for property_value in properties.values():
- for param in no_echo_params and no_echo_params:
- if str(property_value).find(str(param)) > -1:
- path = ['Resources', resource_name, 'Metadata', prop_name]
- matches.append(RuleMatch(path, 'As the resource "metadata" section contains '
- 'reference to a "NoEcho" parameter ' + str(param)
- + ', CloudFormation will display the parameter value in '
- 'plaintext'))
+ return matches
+ matches.extend(self._check_ref(cfn, no_echo_params))
+ matches.extend(self._check_sub(cfn, no_echo_params))
+
return matches
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/NoEcho.py b/src/cfnlint/rules/resources/NoEcho.py\n--- a/src/cfnlint/rules/resources/NoEcho.py\n+++ b/src/cfnlint/rules/resources/NoEcho.py\n@@ -2,6 +2,7 @@\n Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n SPDX-License-Identifier: MIT-0\n \"\"\"\n+import six\n from cfnlint.helpers import bool_compare\n from cfnlint.rules import CloudFormationLintRule\n from cfnlint.rules import RuleMatch\n@@ -14,34 +15,58 @@\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#parameters-section-structure-properties'\n tags = ['resources', 'NoEcho']\n \n- def match(self, cfn):\n- matches = []\n+ def _get_no_echo_params(self, cfn):\n+ \"\"\" Get no Echo Params\"\"\"\n no_echo_params = []\n- parameters = cfn.get_parameters()\n- for parameter_name, parameter_value in parameters.items():\n+ for parameter_name, parameter_value in cfn.get_parameters().items():\n noecho = parameter_value.get('NoEcho', default=False)\n if bool_compare(noecho, True):\n no_echo_params.append(parameter_name)\n \n+ return no_echo_params\n+\n+ def _check_ref(self, cfn, no_echo_params):\n+ \"\"\" Check Refs \"\"\"\n+ matches = []\n+ refs = cfn.search_deep_keys('Ref')\n+ for ref in refs:\n+ if ref[-1] in no_echo_params:\n+ if len(ref) > 3:\n+ if ref[0] == 'Resources' and ref[2] == 'Metadata':\n+ matches.append(RuleMatch(ref, 'As the resource \"metadata\" section contains ' +\n+ 'reference to a \"NoEcho\" parameter ' +\n+ str(ref[-1]) +\n+ ', CloudFormation will display the parameter value in ' +\n+ 'plaintext'))\n+\n+ return matches\n+\n+ def _check_sub(self, cfn, no_echo_params):\n+ \"\"\" Check Subs \"\"\"\n+ matches = []\n+ subs = cfn.search_deep_keys('Fn::Sub')\n+ for sub in subs:\n+ if isinstance(sub[-1], six.string_types):\n+ params = cfn.get_sub_parameters(sub[-1])\n+ for param in params:\n+ if param in no_echo_params:\n+ if len(sub) > 2:\n+ if sub[0] == 'Resources' and sub[2] == 'Metadata':\n+\n+ matches.append(RuleMatch(sub[:-1], 'As the resource \"metadata\" section contains ' +\n+ 'reference to a \"NoEcho\" parameter ' +\n+ str(param) +\n+ ', CloudFormation will display the parameter value in ' +\n+ 'plaintext'))\n+\n+ return matches\n+\n+ def match(self, cfn):\n+ matches = []\n+ no_echo_params = self._get_no_echo_params(cfn)\n if not no_echo_params:\n- return no_echo_params\n-\n- resource_properties = cfn.get_resources()\n- resource_dict = {key: resource_properties[key] for key in resource_properties if\n- isinstance(resource_properties[key], dict)}\n- for resource_name, resource_values in resource_dict.items():\n- resource_values = {key: resource_values[key] for key in resource_values if\n- isinstance(resource_values[key], dict)}\n- metadata = resource_values.get('Metadata', {})\n- if metadata is not None:\n- for prop_name, properties in metadata.items():\n- if isinstance(properties, dict):\n- for property_value in properties.values():\n- for param in no_echo_params and no_echo_params:\n- if str(property_value).find(str(param)) > -1:\n- path = ['Resources', resource_name, 'Metadata', prop_name]\n- matches.append(RuleMatch(path, 'As the resource \"metadata\" section contains '\n- 'reference to a \"NoEcho\" parameter ' + str(param)\n- + ', CloudFormation will display the parameter value in '\n- 'plaintext'))\n+ return matches\n+ matches.extend(self._check_ref(cfn, no_echo_params))\n+ matches.extend(self._check_sub(cfn, no_echo_params))\n+\n return matches\n", "issue": "False alarm from new W4002\n*cfn-lint version: 0.34.0*\r\n\r\n[Here](https://gist.github.com/schmiddy/44a779032a930995d22ee2722a18f163) is an example template which causes a false alarm like this:\r\n\r\n```\r\n$ cfn-lint /tmp/example.yml \r\nW4002 As the resource \"metadata\" section contains reference to a \"NoEcho\" parameter DBUser, CloudFormation will display the parameter value in plaintext\r\n/tmp/example.yml:21:7\r\n\r\nW4002 As the resource \"metadata\" section contains reference to a \"NoEcho\" parameter DBPass, CloudFormation will display the parameter value in plaintext\r\n/tmp/example.yml:21:7\r\n```\r\n\r\nThe problem seems to be that the rule is looking for any mention of the parameter name, even as a text description that is not actually referencing the parameter.\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.helpers import bool_compare\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass NoEcho(CloudFormationLintRule):\n id = 'W4002'\n shortdesc = 'Check for NoEcho References'\n description = 'Check if there is a NoEcho enabled parameter referenced within a resources Metadata section'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#parameters-section-structure-properties'\n tags = ['resources', 'NoEcho']\n\n def match(self, cfn):\n matches = []\n no_echo_params = []\n parameters = cfn.get_parameters()\n for parameter_name, parameter_value in parameters.items():\n noecho = parameter_value.get('NoEcho', default=False)\n if bool_compare(noecho, True):\n no_echo_params.append(parameter_name)\n\n if not no_echo_params:\n return no_echo_params\n\n resource_properties = cfn.get_resources()\n resource_dict = {key: resource_properties[key] for key in resource_properties if\n isinstance(resource_properties[key], dict)}\n for resource_name, resource_values in resource_dict.items():\n resource_values = {key: resource_values[key] for key in resource_values if\n isinstance(resource_values[key], dict)}\n metadata = resource_values.get('Metadata', {})\n if metadata is not None:\n for prop_name, properties in metadata.items():\n if isinstance(properties, dict):\n for property_value in properties.values():\n for param in no_echo_params and no_echo_params:\n if str(property_value).find(str(param)) > -1:\n path = ['Resources', resource_name, 'Metadata', prop_name]\n matches.append(RuleMatch(path, 'As the resource \"metadata\" section contains '\n 'reference to a \"NoEcho\" parameter ' + str(param)\n + ', CloudFormation will display the parameter value in '\n 'plaintext'))\n return matches\n", "path": "src/cfnlint/rules/resources/NoEcho.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.helpers import bool_compare\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass NoEcho(CloudFormationLintRule):\n id = 'W4002'\n shortdesc = 'Check for NoEcho References'\n description = 'Check if there is a NoEcho enabled parameter referenced within a resources Metadata section'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html#parameters-section-structure-properties'\n tags = ['resources', 'NoEcho']\n\n def _get_no_echo_params(self, cfn):\n \"\"\" Get no Echo Params\"\"\"\n no_echo_params = []\n for parameter_name, parameter_value in cfn.get_parameters().items():\n noecho = parameter_value.get('NoEcho', default=False)\n if bool_compare(noecho, True):\n no_echo_params.append(parameter_name)\n\n return no_echo_params\n\n def _check_ref(self, cfn, no_echo_params):\n \"\"\" Check Refs \"\"\"\n matches = []\n refs = cfn.search_deep_keys('Ref')\n for ref in refs:\n if ref[-1] in no_echo_params:\n if len(ref) > 3:\n if ref[0] == 'Resources' and ref[2] == 'Metadata':\n matches.append(RuleMatch(ref, 'As the resource \"metadata\" section contains ' +\n 'reference to a \"NoEcho\" parameter ' +\n str(ref[-1]) +\n ', CloudFormation will display the parameter value in ' +\n 'plaintext'))\n\n return matches\n\n def _check_sub(self, cfn, no_echo_params):\n \"\"\" Check Subs \"\"\"\n matches = []\n subs = cfn.search_deep_keys('Fn::Sub')\n for sub in subs:\n if isinstance(sub[-1], six.string_types):\n params = cfn.get_sub_parameters(sub[-1])\n for param in params:\n if param in no_echo_params:\n if len(sub) > 2:\n if sub[0] == 'Resources' and sub[2] == 'Metadata':\n\n matches.append(RuleMatch(sub[:-1], 'As the resource \"metadata\" section contains ' +\n 'reference to a \"NoEcho\" parameter ' +\n str(param) +\n ', CloudFormation will display the parameter value in ' +\n 'plaintext'))\n\n return matches\n\n def match(self, cfn):\n matches = []\n no_echo_params = self._get_no_echo_params(cfn)\n if not no_echo_params:\n return matches\n matches.extend(self._check_ref(cfn, no_echo_params))\n matches.extend(self._check_sub(cfn, no_echo_params))\n\n return matches\n", "path": "src/cfnlint/rules/resources/NoEcho.py"}]}
| 1,005 | 939 |
gh_patches_debug_39857
|
rasdani/github-patches
|
git_diff
|
flairNLP__flair-2134
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release Flair 0.8
Time finally for another release! This issue tracks the progress of releasing 0.8.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/datasets/__init__.py`
Content:
```
1 # Expose base classses
2 from .base import DataLoader
3 from .base import SentenceDataset
4 from .base import StringDataset
5 from .base import MongoDataset
6
7 # Expose all sequence labeling datasets
8 from .sequence_labeling import ColumnCorpus
9 from .sequence_labeling import ColumnDataset
10 from .sequence_labeling import ANER_CORP
11 from .sequence_labeling import BIOFID
12 from .sequence_labeling import BIOSCOPE
13 from .sequence_labeling import CONLL_03
14 from .sequence_labeling import CONLL_03_GERMAN
15 from .sequence_labeling import CONLL_03_DUTCH
16 from .sequence_labeling import CONLL_03_SPANISH
17 from .sequence_labeling import CONLL_2000
18 from .sequence_labeling import DANE
19 from .sequence_labeling import EUROPARL_NER_GERMAN
20 from .sequence_labeling import GERMEVAL_14
21 from .sequence_labeling import INSPEC
22 from .sequence_labeling import LER_GERMAN
23 from .sequence_labeling import MIT_MOVIE_NER_SIMPLE
24 from .sequence_labeling import MIT_MOVIE_NER_COMPLEX
25 from .sequence_labeling import MIT_RESTAURANT_NER
26 from .sequence_labeling import NER_BASQUE
27 from .sequence_labeling import NER_FINNISH
28 from .sequence_labeling import NER_SWEDISH
29 from .sequence_labeling import STACKOVERFLOW_NER
30 from .sequence_labeling import SEMEVAL2010
31 from .sequence_labeling import SEMEVAL2017
32 from .sequence_labeling import TURKU_NER
33 from .sequence_labeling import TWITTER_NER
34 from .sequence_labeling import UP_CHINESE
35 from .sequence_labeling import UP_ENGLISH
36 from .sequence_labeling import UP_FINNISH
37 from .sequence_labeling import UP_FRENCH
38 from .sequence_labeling import UP_GERMAN
39 from .sequence_labeling import UP_ITALIAN
40 from .sequence_labeling import UP_SPANISH
41 from .sequence_labeling import UP_SPANISH_ANCORA
42 from .sequence_labeling import WEIBO_NER
43 from .sequence_labeling import WIKIANN
44 from .sequence_labeling import WIKIGOLD_NER
45 from .sequence_labeling import WIKINER_ENGLISH
46 from .sequence_labeling import WIKINER_GERMAN
47 from .sequence_labeling import WIKINER_DUTCH
48 from .sequence_labeling import WIKINER_FRENCH
49 from .sequence_labeling import WIKINER_ITALIAN
50 from .sequence_labeling import WIKINER_SPANISH
51 from .sequence_labeling import WIKINER_PORTUGUESE
52 from .sequence_labeling import WIKINER_POLISH
53 from .sequence_labeling import WIKINER_RUSSIAN
54 from .sequence_labeling import WNUT_17
55 from .sequence_labeling import WSD_UFSAC
56 from .sequence_labeling import WNUT_2020_NER
57 from .sequence_labeling import XTREME
58 from .sequence_labeling import BUSINESS_HUN
59
60 # Expose all document classification datasets
61 from .document_classification import ClassificationCorpus
62 from .document_classification import ClassificationDataset
63 from .document_classification import CSVClassificationCorpus
64 from .document_classification import CSVClassificationDataset
65 from .document_classification import AMAZON_REVIEWS
66 from .document_classification import IMDB
67 from .document_classification import NEWSGROUPS
68 from .document_classification import SENTIMENT_140
69 from .document_classification import SENTEVAL_CR
70 from .document_classification import SENTEVAL_MR
71 from .document_classification import SENTEVAL_MPQA
72 from .document_classification import SENTEVAL_SUBJ
73 from .document_classification import SENTEVAL_SST_BINARY
74 from .document_classification import SENTEVAL_SST_GRANULAR
75 from .document_classification import TREC_50
76 from .document_classification import TREC_6
77 from .document_classification import COMMUNICATIVE_FUNCTIONS
78 from .document_classification import WASSA_ANGER
79 from .document_classification import WASSA_FEAR
80 from .document_classification import WASSA_JOY
81 from .document_classification import WASSA_SADNESS
82 from .document_classification import GO_EMOTIONS
83 from .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE
84
85 # Expose all treebanks
86 from .treebanks import UniversalDependenciesCorpus
87 from .treebanks import UniversalDependenciesDataset
88 from .treebanks import UD_ENGLISH
89 from .treebanks import UD_GERMAN
90 from .treebanks import UD_GERMAN_HDT
91 from .treebanks import UD_DUTCH
92 from .treebanks import UD_FRENCH
93 from .treebanks import UD_ITALIAN
94 from .treebanks import UD_SPANISH
95 from .treebanks import UD_PORTUGUESE
96 from .treebanks import UD_ROMANIAN
97 from .treebanks import UD_CATALAN
98 from .treebanks import UD_POLISH
99 from .treebanks import UD_CZECH
100 from .treebanks import UD_SLOVAK
101 from .treebanks import UD_SWEDISH
102 from .treebanks import UD_DANISH
103 from .treebanks import UD_NORWEGIAN
104 from .treebanks import UD_FINNISH
105 from .treebanks import UD_SLOVENIAN
106 from .treebanks import UD_CROATIAN
107 from .treebanks import UD_SERBIAN
108 from .treebanks import UD_BULGARIAN
109 from .treebanks import UD_ARABIC
110 from .treebanks import UD_HEBREW
111 from .treebanks import UD_TURKISH
112 from .treebanks import UD_PERSIAN
113 from .treebanks import UD_RUSSIAN
114 from .treebanks import UD_HINDI
115 from .treebanks import UD_INDONESIAN
116 from .treebanks import UD_JAPANESE
117 from .treebanks import UD_CHINESE
118 from .treebanks import UD_KOREAN
119 from .treebanks import UD_BASQUE
120 from .treebanks import UD_GREEK
121 from .treebanks import UD_LIVVI
122 from .treebanks import UD_NORTH_SAMI
123 from .treebanks import UD_MARATHI
124 from .treebanks import UD_MALTESE
125 from .treebanks import UD_AFRIKAANS
126 from .treebanks import UD_OLD_FRENCH
127 from .treebanks import UD_GOTHIC
128 from .treebanks import UD_WOLOF
129
130 # Expose all text-text datasets
131 from .text_text import ParallelTextCorpus
132 from .text_text import ParallelTextDataset
133 from .text_text import OpusParallelCorpus
134 from .text_text import DataPairDataset
135 from .text_text import DataPairCorpus
136 from .text_text import GLUE_RTE
137 from .text_text import SUPERGLUE_RTE
138
139 # Expose all text-image datasets
140 from .text_image import FeideggerCorpus
141 from .text_image import FeideggerDataset
142
143 # Expose all biomedical data sets
144 from .biomedical import ANAT_EM
145 from .biomedical import AZDZ
146 from .biomedical import BIONLP2013_PC
147 from .biomedical import BIONLP2013_CG
148 from .biomedical import BIO_INFER
149 from .biomedical import BIOSEMANTICS
150 from .biomedical import BC2GM
151 from .biomedical import CELL_FINDER
152 from .biomedical import CEMP
153 from .biomedical import CDR
154 from .biomedical import CHEMDNER
155 from .biomedical import CRAFT
156 from .biomedical import CRAFT_V4
157 from .biomedical import CLL
158 from .biomedical import DECA
159 from .biomedical import FSU
160 from .biomedical import GELLUS
161 from .biomedical import GPRO
162 from .biomedical import IEPA
163 from .biomedical import JNLPBA
164 from .biomedical import LOCTEXT
165 from .biomedical import LINNEAUS
166 from .biomedical import NCBI_DISEASE
167 from .biomedical import MIRNA
168 from .biomedical import OSIRIS
169 from .biomedical import PDR
170 from .biomedical import S800
171 from .biomedical import SCAI_CHEMICALS
172 from .biomedical import SCAI_DISEASE
173 from .biomedical import VARIOME
174
175 # Expose all biomedical data sets using the HUNER splits
176 from .biomedical import HUNER_CHEMICAL
177 from .biomedical import HUNER_CHEMICAL_CHEBI
178 from .biomedical import HUNER_CHEMICAL_CHEMDNER
179 from .biomedical import HUNER_CHEMICAL_CDR
180 from .biomedical import HUNER_CHEMICAL_CEMP
181 from .biomedical import HUNER_CHEMICAL_SCAI
182 from .biomedical import HUNER_CHEMICAL_CRAFT_V4
183 # -
184 from .biomedical import HUNER_CELL_LINE
185 from .biomedical import HUNER_CELL_LINE_CLL
186 from .biomedical import HUNER_CELL_LINE_CELL_FINDER
187 from .biomedical import HUNER_CELL_LINE_GELLUS
188 from .biomedical import HUNER_CELL_LINE_JNLPBA
189 # -
190 from .biomedical import HUNER_DISEASE
191 from .biomedical import HUNER_DISEASE_CDR
192 from .biomedical import HUNER_DISEASE_MIRNA
193 from .biomedical import HUNER_DISEASE_NCBI
194 from .biomedical import HUNER_DISEASE_SCAI
195 from .biomedical import HUNER_DISEASE_VARIOME
196 from .biomedical import HUNER_DISEASE_PDR
197 # -
198 from .biomedical import HUNER_GENE
199 from .biomedical import HUNER_GENE_BIO_INFER
200 from .biomedical import HUNER_GENE_BC2GM
201 from .biomedical import HUNER_GENE_CHEBI
202 from .biomedical import HUNER_GENE_CRAFT_V4
203 from .biomedical import HUNER_GENE_CELL_FINDER
204 from .biomedical import HUNER_GENE_DECA
205 from .biomedical import HUNER_GENE_FSU
206 from .biomedical import HUNER_GENE_GPRO
207 from .biomedical import HUNER_GENE_IEPA
208 from .biomedical import HUNER_GENE_JNLPBA
209 from .biomedical import HUNER_GENE_LOCTEXT
210 from .biomedical import HUNER_GENE_MIRNA
211 from .biomedical import HUNER_GENE_OSIRIS
212 from .biomedical import HUNER_GENE_VARIOME
213 # -
214 from .biomedical import HUNER_SPECIES
215 from .biomedical import HUNER_SPECIES_CELL_FINDER
216 from .biomedical import HUNER_SPECIES_CHEBI
217 from .biomedical import HUNER_SPECIES_CRAFT_V4
218 from .biomedical import HUNER_SPECIES_LOCTEXT
219 from .biomedical import HUNER_SPECIES_LINNEAUS
220 from .biomedical import HUNER_SPECIES_MIRNA
221 from .biomedical import HUNER_SPECIES_S800
222 from .biomedical import HUNER_SPECIES_VARIOME
223
224 # Expose all biomedical data sets used for the evaluation of BioBERT
225 from .biomedical import BIOBERT_CHEMICAL_BC4CHEMD
226 from .biomedical import BIOBERT_CHEMICAL_BC5CDR
227 from .biomedical import BIOBERT_DISEASE_NCBI
228 from .biomedical import BIOBERT_DISEASE_BC5CDR
229 from .biomedical import BIOBERT_SPECIES_LINNAEUS
230 from .biomedical import BIOBERT_SPECIES_S800
231 from .biomedical import BIOBERT_GENE_BC2GM
232 from .biomedical import BIOBERT_GENE_JNLPBA
233
```
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open("requirements.txt") as f:
4 required = f.read().splitlines()
5
6 setup(
7 name="flair",
8 version="0.7",
9 description="A very simple framework for state-of-the-art NLP",
10 long_description=open("README.md", encoding="utf-8").read(),
11 long_description_content_type="text/markdown",
12 author="Alan Akbik",
13 author_email="[email protected]",
14 url="https://github.com/flairNLP/flair",
15 packages=find_packages(exclude="tests"), # same as name
16 license="MIT",
17 install_requires=required,
18 include_package_data=True,
19 python_requires=">=3.6",
20 )
21
```
Path: `flair/__init__.py`
Content:
```
1 import os
2 import torch
3 from pathlib import Path
4 from transformers import set_seed as hf_set_seed
5
6 # global variable: cache_root
7 cache_root = os.getenv('FLAIR_CACHE_ROOT', Path(Path.home(), ".flair"))
8
9 # global variable: device
10 device = None
11 if torch.cuda.is_available():
12 device = torch.device("cuda:0")
13 else:
14 device = torch.device("cpu")
15
16 # global variable: embedding_storage_mode
17 embedding_storage_mode = "default"
18
19 from . import data
20 from . import models
21 from . import visual
22 from . import trainers
23 from . import nn
24 from .training_utils import AnnealOnPlateau
25
26 import logging.config
27
28 __version__ = "0.7"
29
30 logging.config.dictConfig(
31 {
32 "version": 1,
33 "disable_existing_loggers": False,
34 "formatters": {"standard": {"format": "%(asctime)-15s %(message)s"}},
35 "handlers": {
36 "console": {
37 "level": "INFO",
38 "class": "logging.StreamHandler",
39 "formatter": "standard",
40 "stream": "ext://sys.stdout",
41 }
42 },
43 "loggers": {
44 "flair": {"handlers": ["console"], "level": "INFO", "propagate": False}
45 },
46 }
47 )
48
49 logger = logging.getLogger("flair")
50
51 def set_seed(seed: int):
52 hf_set_seed(seed)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flair/__init__.py b/flair/__init__.py
--- a/flair/__init__.py
+++ b/flair/__init__.py
@@ -25,7 +25,7 @@
import logging.config
-__version__ = "0.7"
+__version__ = "0.8"
logging.config.dictConfig(
{
diff --git a/flair/datasets/__init__.py b/flair/datasets/__init__.py
--- a/flair/datasets/__init__.py
+++ b/flair/datasets/__init__.py
@@ -10,6 +10,7 @@
from .sequence_labeling import ANER_CORP
from .sequence_labeling import BIOFID
from .sequence_labeling import BIOSCOPE
+from .sequence_labeling import BUSINESS_HUN
from .sequence_labeling import CONLL_03
from .sequence_labeling import CONLL_03_GERMAN
from .sequence_labeling import CONLL_03_DUTCH
@@ -55,7 +56,6 @@
from .sequence_labeling import WSD_UFSAC
from .sequence_labeling import WNUT_2020_NER
from .sequence_labeling import XTREME
-from .sequence_labeling import BUSINESS_HUN
# Expose all document classification datasets
from .document_classification import ClassificationCorpus
@@ -63,6 +63,9 @@
from .document_classification import CSVClassificationCorpus
from .document_classification import CSVClassificationDataset
from .document_classification import AMAZON_REVIEWS
+from .document_classification import COMMUNICATIVE_FUNCTIONS
+from .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE
+from .document_classification import GO_EMOTIONS
from .document_classification import IMDB
from .document_classification import NEWSGROUPS
from .document_classification import SENTIMENT_140
@@ -74,13 +77,10 @@
from .document_classification import SENTEVAL_SST_GRANULAR
from .document_classification import TREC_50
from .document_classification import TREC_6
-from .document_classification import COMMUNICATIVE_FUNCTIONS
from .document_classification import WASSA_ANGER
from .document_classification import WASSA_FEAR
from .document_classification import WASSA_JOY
from .document_classification import WASSA_SADNESS
-from .document_classification import GO_EMOTIONS
-from .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE
# Expose all treebanks
from .treebanks import UniversalDependenciesCorpus
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@
setup(
name="flair",
- version="0.7",
+ version="0.8",
description="A very simple framework for state-of-the-art NLP",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
|
{"golden_diff": "diff --git a/flair/__init__.py b/flair/__init__.py\n--- a/flair/__init__.py\n+++ b/flair/__init__.py\n@@ -25,7 +25,7 @@\n \n import logging.config\n \n-__version__ = \"0.7\"\n+__version__ = \"0.8\"\n \n logging.config.dictConfig(\n {\ndiff --git a/flair/datasets/__init__.py b/flair/datasets/__init__.py\n--- a/flair/datasets/__init__.py\n+++ b/flair/datasets/__init__.py\n@@ -10,6 +10,7 @@\n from .sequence_labeling import ANER_CORP\n from .sequence_labeling import BIOFID\n from .sequence_labeling import BIOSCOPE\n+from .sequence_labeling import BUSINESS_HUN\n from .sequence_labeling import CONLL_03\n from .sequence_labeling import CONLL_03_GERMAN\n from .sequence_labeling import CONLL_03_DUTCH\n@@ -55,7 +56,6 @@\n from .sequence_labeling import WSD_UFSAC\n from .sequence_labeling import WNUT_2020_NER\n from .sequence_labeling import XTREME\n-from .sequence_labeling import BUSINESS_HUN\n \n # Expose all document classification datasets\n from .document_classification import ClassificationCorpus\n@@ -63,6 +63,9 @@\n from .document_classification import CSVClassificationCorpus\n from .document_classification import CSVClassificationDataset\n from .document_classification import AMAZON_REVIEWS\n+from .document_classification import COMMUNICATIVE_FUNCTIONS\n+from .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE\n+from .document_classification import GO_EMOTIONS\n from .document_classification import IMDB\n from .document_classification import NEWSGROUPS\n from .document_classification import SENTIMENT_140\n@@ -74,13 +77,10 @@\n from .document_classification import SENTEVAL_SST_GRANULAR\n from .document_classification import TREC_50\n from .document_classification import TREC_6\n-from .document_classification import COMMUNICATIVE_FUNCTIONS\n from .document_classification import WASSA_ANGER\n from .document_classification import WASSA_FEAR\n from .document_classification import WASSA_JOY\n from .document_classification import WASSA_SADNESS\n-from .document_classification import GO_EMOTIONS\n-from .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE\n \n # Expose all treebanks\n from .treebanks import UniversalDependenciesCorpus\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,7 @@\n \n setup(\n name=\"flair\",\n- version=\"0.7\",\n+ version=\"0.8\",\n description=\"A very simple framework for state-of-the-art NLP\",\n long_description=open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n", "issue": "Release Flair 0.8\nTime finally for another release! This issue tracks the progress of releasing 0.8.\n", "before_files": [{"content": "# Expose base classses\nfrom .base import DataLoader\nfrom .base import SentenceDataset\nfrom .base import StringDataset\nfrom .base import MongoDataset\n\n# Expose all sequence labeling datasets\nfrom .sequence_labeling import ColumnCorpus\nfrom .sequence_labeling import ColumnDataset\nfrom .sequence_labeling import ANER_CORP\nfrom .sequence_labeling import BIOFID\nfrom .sequence_labeling import BIOSCOPE\nfrom .sequence_labeling import CONLL_03\nfrom .sequence_labeling import CONLL_03_GERMAN\nfrom .sequence_labeling import CONLL_03_DUTCH\nfrom .sequence_labeling import CONLL_03_SPANISH\nfrom .sequence_labeling import CONLL_2000\nfrom .sequence_labeling import DANE\nfrom .sequence_labeling import EUROPARL_NER_GERMAN\nfrom .sequence_labeling import GERMEVAL_14\nfrom .sequence_labeling import INSPEC\nfrom .sequence_labeling import LER_GERMAN\nfrom .sequence_labeling import MIT_MOVIE_NER_SIMPLE\nfrom .sequence_labeling import MIT_MOVIE_NER_COMPLEX\nfrom .sequence_labeling import MIT_RESTAURANT_NER\nfrom .sequence_labeling import NER_BASQUE\nfrom .sequence_labeling import NER_FINNISH\nfrom .sequence_labeling import NER_SWEDISH\nfrom .sequence_labeling import STACKOVERFLOW_NER\nfrom .sequence_labeling import SEMEVAL2010\nfrom .sequence_labeling import SEMEVAL2017\nfrom .sequence_labeling import TURKU_NER\nfrom .sequence_labeling import TWITTER_NER\nfrom .sequence_labeling import UP_CHINESE\nfrom .sequence_labeling import UP_ENGLISH\nfrom .sequence_labeling import UP_FINNISH\nfrom .sequence_labeling import UP_FRENCH\nfrom .sequence_labeling import UP_GERMAN\nfrom .sequence_labeling import UP_ITALIAN\nfrom .sequence_labeling import UP_SPANISH\nfrom .sequence_labeling import UP_SPANISH_ANCORA\nfrom .sequence_labeling import WEIBO_NER\nfrom .sequence_labeling import WIKIANN\nfrom .sequence_labeling import WIKIGOLD_NER\nfrom .sequence_labeling import WIKINER_ENGLISH\nfrom .sequence_labeling import WIKINER_GERMAN\nfrom .sequence_labeling import WIKINER_DUTCH\nfrom .sequence_labeling import WIKINER_FRENCH\nfrom .sequence_labeling import WIKINER_ITALIAN\nfrom .sequence_labeling import WIKINER_SPANISH\nfrom .sequence_labeling import WIKINER_PORTUGUESE\nfrom .sequence_labeling import WIKINER_POLISH\nfrom .sequence_labeling import WIKINER_RUSSIAN\nfrom .sequence_labeling import WNUT_17\nfrom .sequence_labeling import WSD_UFSAC\nfrom .sequence_labeling import WNUT_2020_NER\nfrom .sequence_labeling import XTREME\nfrom .sequence_labeling import BUSINESS_HUN\n\n# Expose all document classification datasets\nfrom .document_classification import ClassificationCorpus\nfrom .document_classification import ClassificationDataset\nfrom .document_classification import CSVClassificationCorpus\nfrom .document_classification import CSVClassificationDataset\nfrom .document_classification import AMAZON_REVIEWS\nfrom .document_classification import IMDB\nfrom .document_classification import NEWSGROUPS\nfrom .document_classification import SENTIMENT_140\nfrom .document_classification import SENTEVAL_CR\nfrom .document_classification import SENTEVAL_MR\nfrom .document_classification import SENTEVAL_MPQA\nfrom .document_classification import SENTEVAL_SUBJ\nfrom .document_classification import SENTEVAL_SST_BINARY\nfrom .document_classification import SENTEVAL_SST_GRANULAR\nfrom .document_classification import TREC_50\nfrom .document_classification import TREC_6\nfrom .document_classification import COMMUNICATIVE_FUNCTIONS\nfrom .document_classification import WASSA_ANGER\nfrom .document_classification import WASSA_FEAR\nfrom .document_classification import WASSA_JOY\nfrom .document_classification import WASSA_SADNESS\nfrom .document_classification import GO_EMOTIONS\nfrom .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE\n\n# Expose all treebanks\nfrom .treebanks import UniversalDependenciesCorpus\nfrom .treebanks import UniversalDependenciesDataset\nfrom .treebanks import UD_ENGLISH\nfrom .treebanks import UD_GERMAN\nfrom .treebanks import UD_GERMAN_HDT\nfrom .treebanks import UD_DUTCH\nfrom .treebanks import UD_FRENCH\nfrom .treebanks import UD_ITALIAN\nfrom .treebanks import UD_SPANISH\nfrom .treebanks import UD_PORTUGUESE\nfrom .treebanks import UD_ROMANIAN\nfrom .treebanks import UD_CATALAN\nfrom .treebanks import UD_POLISH\nfrom .treebanks import UD_CZECH\nfrom .treebanks import UD_SLOVAK\nfrom .treebanks import UD_SWEDISH\nfrom .treebanks import UD_DANISH\nfrom .treebanks import UD_NORWEGIAN\nfrom .treebanks import UD_FINNISH\nfrom .treebanks import UD_SLOVENIAN\nfrom .treebanks import UD_CROATIAN\nfrom .treebanks import UD_SERBIAN\nfrom .treebanks import UD_BULGARIAN\nfrom .treebanks import UD_ARABIC\nfrom .treebanks import UD_HEBREW\nfrom .treebanks import UD_TURKISH\nfrom .treebanks import UD_PERSIAN\nfrom .treebanks import UD_RUSSIAN\nfrom .treebanks import UD_HINDI\nfrom .treebanks import UD_INDONESIAN\nfrom .treebanks import UD_JAPANESE\nfrom .treebanks import UD_CHINESE\nfrom .treebanks import UD_KOREAN\nfrom .treebanks import UD_BASQUE\nfrom .treebanks import UD_GREEK\nfrom .treebanks import UD_LIVVI\nfrom .treebanks import UD_NORTH_SAMI\nfrom .treebanks import UD_MARATHI\nfrom .treebanks import UD_MALTESE\nfrom .treebanks import UD_AFRIKAANS\nfrom .treebanks import UD_OLD_FRENCH\nfrom .treebanks import UD_GOTHIC\nfrom .treebanks import UD_WOLOF\n\n# Expose all text-text datasets\nfrom .text_text import ParallelTextCorpus\nfrom .text_text import ParallelTextDataset\nfrom .text_text import OpusParallelCorpus\nfrom .text_text import DataPairDataset\nfrom .text_text import DataPairCorpus\nfrom .text_text import GLUE_RTE\nfrom .text_text import SUPERGLUE_RTE\n\n# Expose all text-image datasets\nfrom .text_image import FeideggerCorpus\nfrom .text_image import FeideggerDataset\n\n# Expose all biomedical data sets\nfrom .biomedical import ANAT_EM\nfrom .biomedical import AZDZ\nfrom .biomedical import BIONLP2013_PC\nfrom .biomedical import BIONLP2013_CG\nfrom .biomedical import BIO_INFER\nfrom .biomedical import BIOSEMANTICS\nfrom .biomedical import BC2GM\nfrom .biomedical import CELL_FINDER\nfrom .biomedical import CEMP\nfrom .biomedical import CDR\nfrom .biomedical import CHEMDNER\nfrom .biomedical import CRAFT\nfrom .biomedical import CRAFT_V4\nfrom .biomedical import CLL\nfrom .biomedical import DECA\nfrom .biomedical import FSU\nfrom .biomedical import GELLUS\nfrom .biomedical import GPRO\nfrom .biomedical import IEPA\nfrom .biomedical import JNLPBA\nfrom .biomedical import LOCTEXT\nfrom .biomedical import LINNEAUS\nfrom .biomedical import NCBI_DISEASE\nfrom .biomedical import MIRNA\nfrom .biomedical import OSIRIS\nfrom .biomedical import PDR\nfrom .biomedical import S800\nfrom .biomedical import SCAI_CHEMICALS\nfrom .biomedical import SCAI_DISEASE\nfrom .biomedical import VARIOME\n\n# Expose all biomedical data sets using the HUNER splits\nfrom .biomedical import HUNER_CHEMICAL\nfrom .biomedical import HUNER_CHEMICAL_CHEBI\nfrom .biomedical import HUNER_CHEMICAL_CHEMDNER\nfrom .biomedical import HUNER_CHEMICAL_CDR\nfrom .biomedical import HUNER_CHEMICAL_CEMP\nfrom .biomedical import HUNER_CHEMICAL_SCAI\nfrom .biomedical import HUNER_CHEMICAL_CRAFT_V4\n# -\nfrom .biomedical import HUNER_CELL_LINE\nfrom .biomedical import HUNER_CELL_LINE_CLL\nfrom .biomedical import HUNER_CELL_LINE_CELL_FINDER\nfrom .biomedical import HUNER_CELL_LINE_GELLUS\nfrom .biomedical import HUNER_CELL_LINE_JNLPBA\n# -\nfrom .biomedical import HUNER_DISEASE\nfrom .biomedical import HUNER_DISEASE_CDR\nfrom .biomedical import HUNER_DISEASE_MIRNA\nfrom .biomedical import HUNER_DISEASE_NCBI\nfrom .biomedical import HUNER_DISEASE_SCAI\nfrom .biomedical import HUNER_DISEASE_VARIOME\nfrom .biomedical import HUNER_DISEASE_PDR\n# -\nfrom .biomedical import HUNER_GENE\nfrom .biomedical import HUNER_GENE_BIO_INFER\nfrom .biomedical import HUNER_GENE_BC2GM\nfrom .biomedical import HUNER_GENE_CHEBI\nfrom .biomedical import HUNER_GENE_CRAFT_V4\nfrom .biomedical import HUNER_GENE_CELL_FINDER\nfrom .biomedical import HUNER_GENE_DECA\nfrom .biomedical import HUNER_GENE_FSU\nfrom .biomedical import HUNER_GENE_GPRO\nfrom .biomedical import HUNER_GENE_IEPA\nfrom .biomedical import HUNER_GENE_JNLPBA\nfrom .biomedical import HUNER_GENE_LOCTEXT\nfrom .biomedical import HUNER_GENE_MIRNA\nfrom .biomedical import HUNER_GENE_OSIRIS\nfrom .biomedical import HUNER_GENE_VARIOME\n# -\nfrom .biomedical import HUNER_SPECIES\nfrom .biomedical import HUNER_SPECIES_CELL_FINDER\nfrom .biomedical import HUNER_SPECIES_CHEBI\nfrom .biomedical import HUNER_SPECIES_CRAFT_V4\nfrom .biomedical import HUNER_SPECIES_LOCTEXT\nfrom .biomedical import HUNER_SPECIES_LINNEAUS\nfrom .biomedical import HUNER_SPECIES_MIRNA\nfrom .biomedical import HUNER_SPECIES_S800\nfrom .biomedical import HUNER_SPECIES_VARIOME\n\n# Expose all biomedical data sets used for the evaluation of BioBERT\nfrom .biomedical import BIOBERT_CHEMICAL_BC4CHEMD\nfrom .biomedical import BIOBERT_CHEMICAL_BC5CDR\nfrom .biomedical import BIOBERT_DISEASE_NCBI\nfrom .biomedical import BIOBERT_DISEASE_BC5CDR\nfrom .biomedical import BIOBERT_SPECIES_LINNAEUS\nfrom .biomedical import BIOBERT_SPECIES_S800\nfrom .biomedical import BIOBERT_GENE_BC2GM\nfrom .biomedical import BIOBERT_GENE_JNLPBA\n", "path": "flair/datasets/__init__.py"}, {"content": "from setuptools import setup, find_packages\n\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\nsetup(\n name=\"flair\",\n version=\"0.7\",\n description=\"A very simple framework for state-of-the-art NLP\",\n long_description=open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n author=\"Alan Akbik\",\n author_email=\"[email protected]\",\n url=\"https://github.com/flairNLP/flair\",\n packages=find_packages(exclude=\"tests\"), # same as name\n license=\"MIT\",\n install_requires=required,\n include_package_data=True,\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}, {"content": "import os\nimport torch\nfrom pathlib import Path\nfrom transformers import set_seed as hf_set_seed\n\n# global variable: cache_root\ncache_root = os.getenv('FLAIR_CACHE_ROOT', Path(Path.home(), \".flair\"))\n\n# global variable: device\ndevice = None\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")\n\n# global variable: embedding_storage_mode\nembedding_storage_mode = \"default\"\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\nfrom . import nn\nfrom .training_utils import AnnealOnPlateau\n\nimport logging.config\n\n__version__ = \"0.7\"\n\nlogging.config.dictConfig(\n {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"standard\": {\"format\": \"%(asctime)-15s %(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"standard\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n \"flair\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": False}\n },\n }\n)\n\nlogger = logging.getLogger(\"flair\")\n\ndef set_seed(seed: int):\n hf_set_seed(seed)\n", "path": "flair/__init__.py"}], "after_files": [{"content": "# Expose base classses\nfrom .base import DataLoader\nfrom .base import SentenceDataset\nfrom .base import StringDataset\nfrom .base import MongoDataset\n\n# Expose all sequence labeling datasets\nfrom .sequence_labeling import ColumnCorpus\nfrom .sequence_labeling import ColumnDataset\nfrom .sequence_labeling import ANER_CORP\nfrom .sequence_labeling import BIOFID\nfrom .sequence_labeling import BIOSCOPE\nfrom .sequence_labeling import BUSINESS_HUN\nfrom .sequence_labeling import CONLL_03\nfrom .sequence_labeling import CONLL_03_GERMAN\nfrom .sequence_labeling import CONLL_03_DUTCH\nfrom .sequence_labeling import CONLL_03_SPANISH\nfrom .sequence_labeling import CONLL_2000\nfrom .sequence_labeling import DANE\nfrom .sequence_labeling import EUROPARL_NER_GERMAN\nfrom .sequence_labeling import GERMEVAL_14\nfrom .sequence_labeling import INSPEC\nfrom .sequence_labeling import LER_GERMAN\nfrom .sequence_labeling import MIT_MOVIE_NER_SIMPLE\nfrom .sequence_labeling import MIT_MOVIE_NER_COMPLEX\nfrom .sequence_labeling import MIT_RESTAURANT_NER\nfrom .sequence_labeling import NER_BASQUE\nfrom .sequence_labeling import NER_FINNISH\nfrom .sequence_labeling import NER_SWEDISH\nfrom .sequence_labeling import STACKOVERFLOW_NER\nfrom .sequence_labeling import SEMEVAL2010\nfrom .sequence_labeling import SEMEVAL2017\nfrom .sequence_labeling import TURKU_NER\nfrom .sequence_labeling import TWITTER_NER\nfrom .sequence_labeling import UP_CHINESE\nfrom .sequence_labeling import UP_ENGLISH\nfrom .sequence_labeling import UP_FINNISH\nfrom .sequence_labeling import UP_FRENCH\nfrom .sequence_labeling import UP_GERMAN\nfrom .sequence_labeling import UP_ITALIAN\nfrom .sequence_labeling import UP_SPANISH\nfrom .sequence_labeling import UP_SPANISH_ANCORA\nfrom .sequence_labeling import WEIBO_NER\nfrom .sequence_labeling import WIKIANN\nfrom .sequence_labeling import WIKIGOLD_NER\nfrom .sequence_labeling import WIKINER_ENGLISH\nfrom .sequence_labeling import WIKINER_GERMAN\nfrom .sequence_labeling import WIKINER_DUTCH\nfrom .sequence_labeling import WIKINER_FRENCH\nfrom .sequence_labeling import WIKINER_ITALIAN\nfrom .sequence_labeling import WIKINER_SPANISH\nfrom .sequence_labeling import WIKINER_PORTUGUESE\nfrom .sequence_labeling import WIKINER_POLISH\nfrom .sequence_labeling import WIKINER_RUSSIAN\nfrom .sequence_labeling import WNUT_17\nfrom .sequence_labeling import WSD_UFSAC\nfrom .sequence_labeling import WNUT_2020_NER\nfrom .sequence_labeling import XTREME\n\n# Expose all document classification datasets\nfrom .document_classification import ClassificationCorpus\nfrom .document_classification import ClassificationDataset\nfrom .document_classification import CSVClassificationCorpus\nfrom .document_classification import CSVClassificationDataset\nfrom .document_classification import AMAZON_REVIEWS\nfrom .document_classification import COMMUNICATIVE_FUNCTIONS\nfrom .document_classification import GERMEVAL_2018_OFFENSIVE_LANGUAGE\nfrom .document_classification import GO_EMOTIONS\nfrom .document_classification import IMDB\nfrom .document_classification import NEWSGROUPS\nfrom .document_classification import SENTIMENT_140\nfrom .document_classification import SENTEVAL_CR\nfrom .document_classification import SENTEVAL_MR\nfrom .document_classification import SENTEVAL_MPQA\nfrom .document_classification import SENTEVAL_SUBJ\nfrom .document_classification import SENTEVAL_SST_BINARY\nfrom .document_classification import SENTEVAL_SST_GRANULAR\nfrom .document_classification import TREC_50\nfrom .document_classification import TREC_6\nfrom .document_classification import WASSA_ANGER\nfrom .document_classification import WASSA_FEAR\nfrom .document_classification import WASSA_JOY\nfrom .document_classification import WASSA_SADNESS\n\n# Expose all treebanks\nfrom .treebanks import UniversalDependenciesCorpus\nfrom .treebanks import UniversalDependenciesDataset\nfrom .treebanks import UD_ENGLISH\nfrom .treebanks import UD_GERMAN\nfrom .treebanks import UD_GERMAN_HDT\nfrom .treebanks import UD_DUTCH\nfrom .treebanks import UD_FRENCH\nfrom .treebanks import UD_ITALIAN\nfrom .treebanks import UD_SPANISH\nfrom .treebanks import UD_PORTUGUESE\nfrom .treebanks import UD_ROMANIAN\nfrom .treebanks import UD_CATALAN\nfrom .treebanks import UD_POLISH\nfrom .treebanks import UD_CZECH\nfrom .treebanks import UD_SLOVAK\nfrom .treebanks import UD_SWEDISH\nfrom .treebanks import UD_DANISH\nfrom .treebanks import UD_NORWEGIAN\nfrom .treebanks import UD_FINNISH\nfrom .treebanks import UD_SLOVENIAN\nfrom .treebanks import UD_CROATIAN\nfrom .treebanks import UD_SERBIAN\nfrom .treebanks import UD_BULGARIAN\nfrom .treebanks import UD_ARABIC\nfrom .treebanks import UD_HEBREW\nfrom .treebanks import UD_TURKISH\nfrom .treebanks import UD_PERSIAN\nfrom .treebanks import UD_RUSSIAN\nfrom .treebanks import UD_HINDI\nfrom .treebanks import UD_INDONESIAN\nfrom .treebanks import UD_JAPANESE\nfrom .treebanks import UD_CHINESE\nfrom .treebanks import UD_KOREAN\nfrom .treebanks import UD_BASQUE\nfrom .treebanks import UD_GREEK\nfrom .treebanks import UD_LIVVI\nfrom .treebanks import UD_NORTH_SAMI\nfrom .treebanks import UD_MARATHI\nfrom .treebanks import UD_MALTESE\nfrom .treebanks import UD_AFRIKAANS\nfrom .treebanks import UD_OLD_FRENCH\nfrom .treebanks import UD_GOTHIC\nfrom .treebanks import UD_WOLOF\n\n# Expose all text-text datasets\nfrom .text_text import ParallelTextCorpus\nfrom .text_text import ParallelTextDataset\nfrom .text_text import OpusParallelCorpus\nfrom .text_text import DataPairDataset\nfrom .text_text import DataPairCorpus\nfrom .text_text import GLUE_RTE\nfrom .text_text import SUPERGLUE_RTE\n\n# Expose all text-image datasets\nfrom .text_image import FeideggerCorpus\nfrom .text_image import FeideggerDataset\n\n# Expose all biomedical data sets\nfrom .biomedical import ANAT_EM\nfrom .biomedical import AZDZ\nfrom .biomedical import BIONLP2013_PC\nfrom .biomedical import BIONLP2013_CG\nfrom .biomedical import BIO_INFER\nfrom .biomedical import BIOSEMANTICS\nfrom .biomedical import BC2GM\nfrom .biomedical import CELL_FINDER\nfrom .biomedical import CEMP\nfrom .biomedical import CDR\nfrom .biomedical import CHEMDNER\nfrom .biomedical import CRAFT\nfrom .biomedical import CRAFT_V4\nfrom .biomedical import CLL\nfrom .biomedical import DECA\nfrom .biomedical import FSU\nfrom .biomedical import GELLUS\nfrom .biomedical import GPRO\nfrom .biomedical import IEPA\nfrom .biomedical import JNLPBA\nfrom .biomedical import LOCTEXT\nfrom .biomedical import LINNEAUS\nfrom .biomedical import NCBI_DISEASE\nfrom .biomedical import MIRNA\nfrom .biomedical import OSIRIS\nfrom .biomedical import PDR\nfrom .biomedical import S800\nfrom .biomedical import SCAI_CHEMICALS\nfrom .biomedical import SCAI_DISEASE\nfrom .biomedical import VARIOME\n\n# Expose all biomedical data sets using the HUNER splits\nfrom .biomedical import HUNER_CHEMICAL\nfrom .biomedical import HUNER_CHEMICAL_CHEBI\nfrom .biomedical import HUNER_CHEMICAL_CHEMDNER\nfrom .biomedical import HUNER_CHEMICAL_CDR\nfrom .biomedical import HUNER_CHEMICAL_CEMP\nfrom .biomedical import HUNER_CHEMICAL_SCAI\nfrom .biomedical import HUNER_CHEMICAL_CRAFT_V4\n# -\nfrom .biomedical import HUNER_CELL_LINE\nfrom .biomedical import HUNER_CELL_LINE_CLL\nfrom .biomedical import HUNER_CELL_LINE_CELL_FINDER\nfrom .biomedical import HUNER_CELL_LINE_GELLUS\nfrom .biomedical import HUNER_CELL_LINE_JNLPBA\n# -\nfrom .biomedical import HUNER_DISEASE\nfrom .biomedical import HUNER_DISEASE_CDR\nfrom .biomedical import HUNER_DISEASE_MIRNA\nfrom .biomedical import HUNER_DISEASE_NCBI\nfrom .biomedical import HUNER_DISEASE_SCAI\nfrom .biomedical import HUNER_DISEASE_VARIOME\nfrom .biomedical import HUNER_DISEASE_PDR\n# -\nfrom .biomedical import HUNER_GENE\nfrom .biomedical import HUNER_GENE_BIO_INFER\nfrom .biomedical import HUNER_GENE_BC2GM\nfrom .biomedical import HUNER_GENE_CHEBI\nfrom .biomedical import HUNER_GENE_CRAFT_V4\nfrom .biomedical import HUNER_GENE_CELL_FINDER\nfrom .biomedical import HUNER_GENE_DECA\nfrom .biomedical import HUNER_GENE_FSU\nfrom .biomedical import HUNER_GENE_GPRO\nfrom .biomedical import HUNER_GENE_IEPA\nfrom .biomedical import HUNER_GENE_JNLPBA\nfrom .biomedical import HUNER_GENE_LOCTEXT\nfrom .biomedical import HUNER_GENE_MIRNA\nfrom .biomedical import HUNER_GENE_OSIRIS\nfrom .biomedical import HUNER_GENE_VARIOME\n# -\nfrom .biomedical import HUNER_SPECIES\nfrom .biomedical import HUNER_SPECIES_CELL_FINDER\nfrom .biomedical import HUNER_SPECIES_CHEBI\nfrom .biomedical import HUNER_SPECIES_CRAFT_V4\nfrom .biomedical import HUNER_SPECIES_LOCTEXT\nfrom .biomedical import HUNER_SPECIES_LINNEAUS\nfrom .biomedical import HUNER_SPECIES_MIRNA\nfrom .biomedical import HUNER_SPECIES_S800\nfrom .biomedical import HUNER_SPECIES_VARIOME\n\n# Expose all biomedical data sets used for the evaluation of BioBERT\nfrom .biomedical import BIOBERT_CHEMICAL_BC4CHEMD\nfrom .biomedical import BIOBERT_CHEMICAL_BC5CDR\nfrom .biomedical import BIOBERT_DISEASE_NCBI\nfrom .biomedical import BIOBERT_DISEASE_BC5CDR\nfrom .biomedical import BIOBERT_SPECIES_LINNAEUS\nfrom .biomedical import BIOBERT_SPECIES_S800\nfrom .biomedical import BIOBERT_GENE_BC2GM\nfrom .biomedical import BIOBERT_GENE_JNLPBA\n", "path": "flair/datasets/__init__.py"}, {"content": "from setuptools import setup, find_packages\n\nwith open(\"requirements.txt\") as f:\n required = f.read().splitlines()\n\nsetup(\n name=\"flair\",\n version=\"0.8\",\n description=\"A very simple framework for state-of-the-art NLP\",\n long_description=open(\"README.md\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n author=\"Alan Akbik\",\n author_email=\"[email protected]\",\n url=\"https://github.com/flairNLP/flair\",\n packages=find_packages(exclude=\"tests\"), # same as name\n license=\"MIT\",\n install_requires=required,\n include_package_data=True,\n python_requires=\">=3.6\",\n)\n", "path": "setup.py"}, {"content": "import os\nimport torch\nfrom pathlib import Path\nfrom transformers import set_seed as hf_set_seed\n\n# global variable: cache_root\ncache_root = os.getenv('FLAIR_CACHE_ROOT', Path(Path.home(), \".flair\"))\n\n# global variable: device\ndevice = None\nif torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\nelse:\n device = torch.device(\"cpu\")\n\n# global variable: embedding_storage_mode\nembedding_storage_mode = \"default\"\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\nfrom . import nn\nfrom .training_utils import AnnealOnPlateau\n\nimport logging.config\n\n__version__ = \"0.8\"\n\nlogging.config.dictConfig(\n {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"standard\": {\"format\": \"%(asctime)-15s %(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"standard\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n \"flair\": {\"handlers\": [\"console\"], \"level\": \"INFO\", \"propagate\": False}\n },\n }\n)\n\nlogger = logging.getLogger(\"flair\")\n\ndef set_seed(seed: int):\n hf_set_seed(seed)\n", "path": "flair/__init__.py"}]}
| 4,052 | 655 |
gh_patches_debug_20651
|
rasdani/github-patches
|
git_diff
|
crytic__slither-618
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AssertionError when comparing two functions with Binary operation
The following contract causes an assertion error:
```
contract FunctionComparisonTest {
function f() public returns (bool) {
return f == f;
}
}
```
Output:
```
ERROR:root:Error in .\function_comparison.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 103, in __init__
self._visit_expression(self.expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 95, in _visit_expression
self._post_visit(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 268, in _post_visit
self._post_binary_operation(expression)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 192, in _post_binary_operation
operation = Binary(val, left, right, _binary_to_binary[expression.type])
File "c:\users\x\documents\github\slither\slither\slithir\operations\binary.py", line 133, in __init__
assert is_valid_rvalue(left_variable)
AssertionError
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/slithir/operations/binary.py`
Content:
```
1 import logging
2 from enum import Enum
3
4 from slither.core.solidity_types import ElementaryType
5 from slither.slithir.exceptions import SlithIRError
6 from slither.slithir.operations.lvalue import OperationWithLValue
7 from slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue
8 from slither.slithir.variables import ReferenceVariable
9
10 logger = logging.getLogger("BinaryOperationIR")
11
12
13 class BinaryType(Enum):
14 POWER = 0 # **
15 MULTIPLICATION = 1 # *
16 DIVISION = 2 # /
17 MODULO = 3 # %
18 ADDITION = 4 # +
19 SUBTRACTION = 5 # -
20 LEFT_SHIFT = 6 # <<
21 RIGHT_SHIFT = 7 # >>
22 AND = 8 # &
23 CARET = 9 # ^
24 OR = 10 # |
25 LESS = 11 # <
26 GREATER = 12 # >
27 LESS_EQUAL = 13 # <=
28 GREATER_EQUAL = 14 # >=
29 EQUAL = 15 # ==
30 NOT_EQUAL = 16 # !=
31 ANDAND = 17 # &&
32 OROR = 18 # ||
33
34 @staticmethod
35 def return_bool(operation_type):
36 return operation_type in [
37 BinaryType.OROR,
38 BinaryType.ANDAND,
39 BinaryType.LESS,
40 BinaryType.GREATER,
41 BinaryType.LESS_EQUAL,
42 BinaryType.GREATER_EQUAL,
43 BinaryType.EQUAL,
44 BinaryType.NOT_EQUAL,
45 ]
46
47 @staticmethod
48 def get_type(operation_type): # pylint: disable=too-many-branches
49 if operation_type == "**":
50 return BinaryType.POWER
51 if operation_type == "*":
52 return BinaryType.MULTIPLICATION
53 if operation_type == "/":
54 return BinaryType.DIVISION
55 if operation_type == "%":
56 return BinaryType.MODULO
57 if operation_type == "+":
58 return BinaryType.ADDITION
59 if operation_type == "-":
60 return BinaryType.SUBTRACTION
61 if operation_type == "<<":
62 return BinaryType.LEFT_SHIFT
63 if operation_type == ">>":
64 return BinaryType.RIGHT_SHIFT
65 if operation_type == "&":
66 return BinaryType.AND
67 if operation_type == "^":
68 return BinaryType.CARET
69 if operation_type == "|":
70 return BinaryType.OR
71 if operation_type == "<":
72 return BinaryType.LESS
73 if operation_type == ">":
74 return BinaryType.GREATER
75 if operation_type == "<=":
76 return BinaryType.LESS_EQUAL
77 if operation_type == ">=":
78 return BinaryType.GREATER_EQUAL
79 if operation_type == "==":
80 return BinaryType.EQUAL
81 if operation_type == "!=":
82 return BinaryType.NOT_EQUAL
83 if operation_type == "&&":
84 return BinaryType.ANDAND
85 if operation_type == "||":
86 return BinaryType.OROR
87
88 raise SlithIRError("get_type: Unknown operation type {})".format(operation_type))
89
90 def __str__(self): # pylint: disable=too-many-branches
91 if self == BinaryType.POWER:
92 return "**"
93 if self == BinaryType.MULTIPLICATION:
94 return "*"
95 if self == BinaryType.DIVISION:
96 return "/"
97 if self == BinaryType.MODULO:
98 return "%"
99 if self == BinaryType.ADDITION:
100 return "+"
101 if self == BinaryType.SUBTRACTION:
102 return "-"
103 if self == BinaryType.LEFT_SHIFT:
104 return "<<"
105 if self == BinaryType.RIGHT_SHIFT:
106 return ">>"
107 if self == BinaryType.AND:
108 return "&"
109 if self == BinaryType.CARET:
110 return "^"
111 if self == BinaryType.OR:
112 return "|"
113 if self == BinaryType.LESS:
114 return "<"
115 if self == BinaryType.GREATER:
116 return ">"
117 if self == BinaryType.LESS_EQUAL:
118 return "<="
119 if self == BinaryType.GREATER_EQUAL:
120 return ">="
121 if self == BinaryType.EQUAL:
122 return "=="
123 if self == BinaryType.NOT_EQUAL:
124 return "!="
125 if self == BinaryType.ANDAND:
126 return "&&"
127 if self == BinaryType.OROR:
128 return "||"
129 raise SlithIRError("str: Unknown operation type {} {})".format(self, type(self)))
130
131
132 class Binary(OperationWithLValue):
133 def __init__(self, result, left_variable, right_variable, operation_type):
134 assert is_valid_rvalue(left_variable)
135 assert is_valid_rvalue(right_variable)
136 assert is_valid_lvalue(result)
137 assert isinstance(operation_type, BinaryType)
138 super().__init__()
139 self._variables = [left_variable, right_variable]
140 self._type = operation_type
141 self._lvalue = result
142 if BinaryType.return_bool(operation_type):
143 result.set_type(ElementaryType("bool"))
144 else:
145 result.set_type(left_variable.type)
146
147 @property
148 def read(self):
149 return [self.variable_left, self.variable_right]
150
151 @property
152 def get_variable(self):
153 return self._variables
154
155 @property
156 def variable_left(self):
157 return self._variables[0]
158
159 @property
160 def variable_right(self):
161 return self._variables[1]
162
163 @property
164 def type(self):
165 return self._type
166
167 @property
168 def type_str(self):
169 return str(self._type)
170
171 def __str__(self):
172 if isinstance(self.lvalue, ReferenceVariable):
173 points = self.lvalue.points_to
174 while isinstance(points, ReferenceVariable):
175 points = points.points_to
176 return "{}(-> {}) = {} {} {}".format(
177 str(self.lvalue),
178 points,
179 self.variable_left,
180 self.type_str,
181 self.variable_right,
182 )
183 return "{}({}) = {} {} {}".format(
184 str(self.lvalue),
185 self.lvalue.type,
186 self.variable_left,
187 self.type_str,
188 self.variable_right,
189 )
190
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/slithir/operations/binary.py b/slither/slithir/operations/binary.py
--- a/slither/slithir/operations/binary.py
+++ b/slither/slithir/operations/binary.py
@@ -1,6 +1,7 @@
import logging
from enum import Enum
+from slither.core.declarations import Function
from slither.core.solidity_types import ElementaryType
from slither.slithir.exceptions import SlithIRError
from slither.slithir.operations.lvalue import OperationWithLValue
@@ -131,8 +132,8 @@
class Binary(OperationWithLValue):
def __init__(self, result, left_variable, right_variable, operation_type):
- assert is_valid_rvalue(left_variable)
- assert is_valid_rvalue(right_variable)
+ assert is_valid_rvalue(left_variable) or isinstance(left_variable, Function)
+ assert is_valid_rvalue(right_variable) or isinstance(right_variable, Function)
assert is_valid_lvalue(result)
assert isinstance(operation_type, BinaryType)
super().__init__()
|
{"golden_diff": "diff --git a/slither/slithir/operations/binary.py b/slither/slithir/operations/binary.py\n--- a/slither/slithir/operations/binary.py\n+++ b/slither/slithir/operations/binary.py\n@@ -1,6 +1,7 @@\n import logging\n from enum import Enum\n \n+from slither.core.declarations import Function\n from slither.core.solidity_types import ElementaryType\n from slither.slithir.exceptions import SlithIRError\n from slither.slithir.operations.lvalue import OperationWithLValue\n@@ -131,8 +132,8 @@\n \n class Binary(OperationWithLValue):\n def __init__(self, result, left_variable, right_variable, operation_type):\n- assert is_valid_rvalue(left_variable)\n- assert is_valid_rvalue(right_variable)\n+ assert is_valid_rvalue(left_variable) or isinstance(left_variable, Function)\n+ assert is_valid_rvalue(right_variable) or isinstance(right_variable, Function)\n assert is_valid_lvalue(result)\n assert isinstance(operation_type, BinaryType)\n super().__init__()\n", "issue": "AssertionError when comparing two functions with Binary operation\nThe following contract causes an assertion error:\r\n```\r\ncontract FunctionComparisonTest {\r\n function f() public returns (bool) {\r\n return f == f;\r\n }\r\n}\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:root:Error in .\\function_comparison.sol\r\nERROR:root:Traceback (most recent call last):\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 610, in main_impl\r\n (slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 67, in process_all\r\n (slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\__main__.py\", line 53, in process_single\r\n slither = Slither(target,\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slither.py\", line 86, in __init__\r\n self._parser.analyze_contracts()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 345, in analyze_contracts\r\n self._convert_to_slithir()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\solc_parsing\\slitherSolc.py\", line 489, in _convert_to_slithir\r\n func.generate_slithir_and_analyze()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\declarations\\function.py\", line 1652, in generate_slithir_and_analyze\r\n node.slithir_generation()\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\core\\cfg\\node.py\", line 702, in slithir_generation\r\n self._irs = convert_expression(expression, self)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\convert.py\", line 64, in convert_expression\r\n visitor = ExpressionToSlithIR(expression, node)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\slithir\\expression_to_slithir.py\", line 103, in __init__\r\n self._visit_expression(self.expression)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\expression\\expression.py\", line 95, in _visit_expression\r\n self._post_visit(expression)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\expression\\expression.py\", line 268, in _post_visit\r\n self._post_binary_operation(expression)\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\visitors\\slithir\\expression_to_slithir.py\", line 192, in _post_binary_operation\r\n operation = Binary(val, left, right, _binary_to_binary[expression.type])\r\n File \"c:\\users\\x\\documents\\github\\slither\\slither\\slithir\\operations\\binary.py\", line 133, in __init__\r\n assert is_valid_rvalue(left_variable)\r\nAssertionError\r\n```\n", "before_files": [{"content": "import logging\nfrom enum import Enum\n\nfrom slither.core.solidity_types import ElementaryType\nfrom slither.slithir.exceptions import SlithIRError\nfrom slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\nfrom slither.slithir.variables import ReferenceVariable\n\nlogger = logging.getLogger(\"BinaryOperationIR\")\n\n\nclass BinaryType(Enum):\n POWER = 0 # **\n MULTIPLICATION = 1 # *\n DIVISION = 2 # /\n MODULO = 3 # %\n ADDITION = 4 # +\n SUBTRACTION = 5 # -\n LEFT_SHIFT = 6 # <<\n RIGHT_SHIFT = 7 # >>\n AND = 8 # &\n CARET = 9 # ^\n OR = 10 # |\n LESS = 11 # <\n GREATER = 12 # >\n LESS_EQUAL = 13 # <=\n GREATER_EQUAL = 14 # >=\n EQUAL = 15 # ==\n NOT_EQUAL = 16 # !=\n ANDAND = 17 # &&\n OROR = 18 # ||\n\n @staticmethod\n def return_bool(operation_type):\n return operation_type in [\n BinaryType.OROR,\n BinaryType.ANDAND,\n BinaryType.LESS,\n BinaryType.GREATER,\n BinaryType.LESS_EQUAL,\n BinaryType.GREATER_EQUAL,\n BinaryType.EQUAL,\n BinaryType.NOT_EQUAL,\n ]\n\n @staticmethod\n def get_type(operation_type): # pylint: disable=too-many-branches\n if operation_type == \"**\":\n return BinaryType.POWER\n if operation_type == \"*\":\n return BinaryType.MULTIPLICATION\n if operation_type == \"/\":\n return BinaryType.DIVISION\n if operation_type == \"%\":\n return BinaryType.MODULO\n if operation_type == \"+\":\n return BinaryType.ADDITION\n if operation_type == \"-\":\n return BinaryType.SUBTRACTION\n if operation_type == \"<<\":\n return BinaryType.LEFT_SHIFT\n if operation_type == \">>\":\n return BinaryType.RIGHT_SHIFT\n if operation_type == \"&\":\n return BinaryType.AND\n if operation_type == \"^\":\n return BinaryType.CARET\n if operation_type == \"|\":\n return BinaryType.OR\n if operation_type == \"<\":\n return BinaryType.LESS\n if operation_type == \">\":\n return BinaryType.GREATER\n if operation_type == \"<=\":\n return BinaryType.LESS_EQUAL\n if operation_type == \">=\":\n return BinaryType.GREATER_EQUAL\n if operation_type == \"==\":\n return BinaryType.EQUAL\n if operation_type == \"!=\":\n return BinaryType.NOT_EQUAL\n if operation_type == \"&&\":\n return BinaryType.ANDAND\n if operation_type == \"||\":\n return BinaryType.OROR\n\n raise SlithIRError(\"get_type: Unknown operation type {})\".format(operation_type))\n\n def __str__(self): # pylint: disable=too-many-branches\n if self == BinaryType.POWER:\n return \"**\"\n if self == BinaryType.MULTIPLICATION:\n return \"*\"\n if self == BinaryType.DIVISION:\n return \"/\"\n if self == BinaryType.MODULO:\n return \"%\"\n if self == BinaryType.ADDITION:\n return \"+\"\n if self == BinaryType.SUBTRACTION:\n return \"-\"\n if self == BinaryType.LEFT_SHIFT:\n return \"<<\"\n if self == BinaryType.RIGHT_SHIFT:\n return \">>\"\n if self == BinaryType.AND:\n return \"&\"\n if self == BinaryType.CARET:\n return \"^\"\n if self == BinaryType.OR:\n return \"|\"\n if self == BinaryType.LESS:\n return \"<\"\n if self == BinaryType.GREATER:\n return \">\"\n if self == BinaryType.LESS_EQUAL:\n return \"<=\"\n if self == BinaryType.GREATER_EQUAL:\n return \">=\"\n if self == BinaryType.EQUAL:\n return \"==\"\n if self == BinaryType.NOT_EQUAL:\n return \"!=\"\n if self == BinaryType.ANDAND:\n return \"&&\"\n if self == BinaryType.OROR:\n return \"||\"\n raise SlithIRError(\"str: Unknown operation type {} {})\".format(self, type(self)))\n\n\nclass Binary(OperationWithLValue):\n def __init__(self, result, left_variable, right_variable, operation_type):\n assert is_valid_rvalue(left_variable)\n assert is_valid_rvalue(right_variable)\n assert is_valid_lvalue(result)\n assert isinstance(operation_type, BinaryType)\n super().__init__()\n self._variables = [left_variable, right_variable]\n self._type = operation_type\n self._lvalue = result\n if BinaryType.return_bool(operation_type):\n result.set_type(ElementaryType(\"bool\"))\n else:\n result.set_type(left_variable.type)\n\n @property\n def read(self):\n return [self.variable_left, self.variable_right]\n\n @property\n def get_variable(self):\n return self._variables\n\n @property\n def variable_left(self):\n return self._variables[0]\n\n @property\n def variable_right(self):\n return self._variables[1]\n\n @property\n def type(self):\n return self._type\n\n @property\n def type_str(self):\n return str(self._type)\n\n def __str__(self):\n if isinstance(self.lvalue, ReferenceVariable):\n points = self.lvalue.points_to\n while isinstance(points, ReferenceVariable):\n points = points.points_to\n return \"{}(-> {}) = {} {} {}\".format(\n str(self.lvalue),\n points,\n self.variable_left,\n self.type_str,\n self.variable_right,\n )\n return \"{}({}) = {} {} {}\".format(\n str(self.lvalue),\n self.lvalue.type,\n self.variable_left,\n self.type_str,\n self.variable_right,\n )\n", "path": "slither/slithir/operations/binary.py"}], "after_files": [{"content": "import logging\nfrom enum import Enum\n\nfrom slither.core.declarations import Function\nfrom slither.core.solidity_types import ElementaryType\nfrom slither.slithir.exceptions import SlithIRError\nfrom slither.slithir.operations.lvalue import OperationWithLValue\nfrom slither.slithir.utils.utils import is_valid_lvalue, is_valid_rvalue\nfrom slither.slithir.variables import ReferenceVariable\n\nlogger = logging.getLogger(\"BinaryOperationIR\")\n\n\nclass BinaryType(Enum):\n POWER = 0 # **\n MULTIPLICATION = 1 # *\n DIVISION = 2 # /\n MODULO = 3 # %\n ADDITION = 4 # +\n SUBTRACTION = 5 # -\n LEFT_SHIFT = 6 # <<\n RIGHT_SHIFT = 7 # >>\n AND = 8 # &\n CARET = 9 # ^\n OR = 10 # |\n LESS = 11 # <\n GREATER = 12 # >\n LESS_EQUAL = 13 # <=\n GREATER_EQUAL = 14 # >=\n EQUAL = 15 # ==\n NOT_EQUAL = 16 # !=\n ANDAND = 17 # &&\n OROR = 18 # ||\n\n @staticmethod\n def return_bool(operation_type):\n return operation_type in [\n BinaryType.OROR,\n BinaryType.ANDAND,\n BinaryType.LESS,\n BinaryType.GREATER,\n BinaryType.LESS_EQUAL,\n BinaryType.GREATER_EQUAL,\n BinaryType.EQUAL,\n BinaryType.NOT_EQUAL,\n ]\n\n @staticmethod\n def get_type(operation_type): # pylint: disable=too-many-branches\n if operation_type == \"**\":\n return BinaryType.POWER\n if operation_type == \"*\":\n return BinaryType.MULTIPLICATION\n if operation_type == \"/\":\n return BinaryType.DIVISION\n if operation_type == \"%\":\n return BinaryType.MODULO\n if operation_type == \"+\":\n return BinaryType.ADDITION\n if operation_type == \"-\":\n return BinaryType.SUBTRACTION\n if operation_type == \"<<\":\n return BinaryType.LEFT_SHIFT\n if operation_type == \">>\":\n return BinaryType.RIGHT_SHIFT\n if operation_type == \"&\":\n return BinaryType.AND\n if operation_type == \"^\":\n return BinaryType.CARET\n if operation_type == \"|\":\n return BinaryType.OR\n if operation_type == \"<\":\n return BinaryType.LESS\n if operation_type == \">\":\n return BinaryType.GREATER\n if operation_type == \"<=\":\n return BinaryType.LESS_EQUAL\n if operation_type == \">=\":\n return BinaryType.GREATER_EQUAL\n if operation_type == \"==\":\n return BinaryType.EQUAL\n if operation_type == \"!=\":\n return BinaryType.NOT_EQUAL\n if operation_type == \"&&\":\n return BinaryType.ANDAND\n if operation_type == \"||\":\n return BinaryType.OROR\n\n raise SlithIRError(\"get_type: Unknown operation type {})\".format(operation_type))\n\n def __str__(self): # pylint: disable=too-many-branches\n if self == BinaryType.POWER:\n return \"**\"\n if self == BinaryType.MULTIPLICATION:\n return \"*\"\n if self == BinaryType.DIVISION:\n return \"/\"\n if self == BinaryType.MODULO:\n return \"%\"\n if self == BinaryType.ADDITION:\n return \"+\"\n if self == BinaryType.SUBTRACTION:\n return \"-\"\n if self == BinaryType.LEFT_SHIFT:\n return \"<<\"\n if self == BinaryType.RIGHT_SHIFT:\n return \">>\"\n if self == BinaryType.AND:\n return \"&\"\n if self == BinaryType.CARET:\n return \"^\"\n if self == BinaryType.OR:\n return \"|\"\n if self == BinaryType.LESS:\n return \"<\"\n if self == BinaryType.GREATER:\n return \">\"\n if self == BinaryType.LESS_EQUAL:\n return \"<=\"\n if self == BinaryType.GREATER_EQUAL:\n return \">=\"\n if self == BinaryType.EQUAL:\n return \"==\"\n if self == BinaryType.NOT_EQUAL:\n return \"!=\"\n if self == BinaryType.ANDAND:\n return \"&&\"\n if self == BinaryType.OROR:\n return \"||\"\n raise SlithIRError(\"str: Unknown operation type {} {})\".format(self, type(self)))\n\n\nclass Binary(OperationWithLValue):\n def __init__(self, result, left_variable, right_variable, operation_type):\n assert is_valid_rvalue(left_variable) or isinstance(left_variable, Function)\n assert is_valid_rvalue(right_variable) or isinstance(right_variable, Function)\n assert is_valid_lvalue(result)\n assert isinstance(operation_type, BinaryType)\n super().__init__()\n self._variables = [left_variable, right_variable]\n self._type = operation_type\n self._lvalue = result\n if BinaryType.return_bool(operation_type):\n result.set_type(ElementaryType(\"bool\"))\n else:\n result.set_type(left_variable.type)\n\n @property\n def read(self):\n return [self.variable_left, self.variable_right]\n\n @property\n def get_variable(self):\n return self._variables\n\n @property\n def variable_left(self):\n return self._variables[0]\n\n @property\n def variable_right(self):\n return self._variables[1]\n\n @property\n def type(self):\n return self._type\n\n @property\n def type_str(self):\n return str(self._type)\n\n def __str__(self):\n if isinstance(self.lvalue, ReferenceVariable):\n points = self.lvalue.points_to\n while isinstance(points, ReferenceVariable):\n points = points.points_to\n return \"{}(-> {}) = {} {} {}\".format(\n str(self.lvalue),\n points,\n self.variable_left,\n self.type_str,\n self.variable_right,\n )\n return \"{}({}) = {} {} {}\".format(\n str(self.lvalue),\n self.lvalue.type,\n self.variable_left,\n self.type_str,\n self.variable_right,\n )\n", "path": "slither/slithir/operations/binary.py"}]}
| 2,812 | 236 |
gh_patches_debug_43932
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-249
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concordanes Settings
##### Expected behavior
Concordanes widget uses settings.
##### Actual behavior
It does not yet.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/widgets/owconcordance.py`
Content:
```
1 from typing import Optional
2
3 from itertools import chain
4 from AnyQt.QtCore import Qt, QAbstractTableModel, QSize, QItemSelectionModel, \
5 QItemSelection
6 from AnyQt.QtWidgets import QSizePolicy, QApplication, QTableView, \
7 QStyledItemDelegate
8 from AnyQt.QtGui import QColor
9
10 from Orange.widgets import gui
11 from Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler
12 from Orange.widgets.widget import OWWidget, Msg, Input, Output
13 from nltk import ConcordanceIndex
14 from orangecontrib.text.corpus import Corpus
15 from orangecontrib.text.topics import Topic
16 from orangecontrib.text.preprocess import WordPunctTokenizer
17
18
19 class HorizontalGridDelegate(QStyledItemDelegate):
20 """Class for setting elide."""
21
22 def paint(self, painter, option, index):
23 if index.column() == 0:
24 option.textElideMode = Qt.ElideLeft
25 elif index.column() == 2:
26 option.textElideMode = Qt.ElideRight
27 QStyledItemDelegate.paint(self, painter, option, index)
28
29
30 class DocumentSelectionModel(QItemSelectionModel):
31 """Sets selection for QTableView. Creates a set of selected documents."""
32
33 def select(self, selection, flags):
34 # which rows have been selected
35 indexes = selection.indexes() if isinstance(selection, QItemSelection) \
36 else [selection]
37 # prevent crashing when deleting the connection
38 if not indexes:
39 super().select(selection, flags)
40 return
41 # indexes[0].row() == -1 indicates clicking outside of the table
42 if len(indexes) == 1 and indexes[0].row() == -1:
43 self.clear()
44 return
45 word_index = self.model().word_index
46 selected_docs = {word_index[index.row()][0] for index in indexes}
47 selected_rows = [
48 row_index for row_index, (doc_index, _) in enumerate(word_index)
49 if doc_index in selected_docs]
50 selection = QItemSelection()
51 # select all rows belonging to the selected document
52 for row in selected_rows:
53 index = self.model().index(row, 0)
54 selection.select(index, index)
55 super().select(selection, flags)
56
57
58 class ConcordanceModel(QAbstractTableModel):
59 """A model for constructing concordances from text."""
60
61 def __init__(self):
62 QAbstractTableModel.__init__(self)
63 self.word = None
64 self.corpus = None
65 self.tokens = None
66 self.n_tokens = None
67 self.n_types = None
68 self.indices = None
69 self.word_index = None
70 self.width = 8
71 self.colored_rows = None
72
73 def set_word(self, word):
74 self.modelAboutToBeReset.emit()
75 self.word = word
76 self._compute_word_index()
77 self.modelReset.emit()
78
79 def set_corpus(self, corpus):
80 self.modelAboutToBeReset.emit()
81 self.corpus = corpus
82 self.set_tokens()
83 self._compute_indices()
84 self._compute_word_index()
85 self.modelReset.emit()
86
87 def set_tokens(self):
88 if self.corpus is None:
89 self.tokens = None
90 return
91 tokenizer = WordPunctTokenizer()
92 self.tokens = tokenizer(self.corpus.documents)
93 self.n_tokens = sum(map(len, self.tokens))
94 self.n_types = len(set(chain.from_iterable(self.tokens)))
95
96 def set_width(self, width):
97 self.modelAboutToBeReset.emit()
98 self.width = width
99 self.modelReset.emit()
100
101 def flags(self, _):
102 return Qt.ItemIsEnabled | Qt.ItemIsSelectable
103
104 def rowCount(self, parent=None, *args, **kwargs):
105 return 0 if parent is None or parent.isValid() or \
106 self.word_index is None \
107 else len(self.word_index)
108
109 def columnCount(self, parent=None, *args, **kwargs):
110 return 3
111
112 def data(self, index, role=Qt.DisplayRole):
113 row, col = index.row(), index.column()
114 doc, index = self.word_index[row]
115
116 if role == Qt.DisplayRole:
117 tokens = self.tokens
118 if col == 0:
119 return ' '.join(tokens[doc][max(index - self.width, 0):index])
120 if col == 1:
121 return tokens[doc][index]
122 if col == 2:
123 return ' '.join(tokens[doc][index + 1:index + self.width + 1])
124
125 elif role == Qt.TextAlignmentRole:
126 return [Qt.AlignRight | Qt.AlignVCenter,
127 Qt.AlignCenter,
128 Qt.AlignLeft | Qt.AlignVCenter][col]
129
130 elif role == Qt.BackgroundRole:
131 const = self.word_index[row][0] in self.colored_rows
132 return QColor(236 + 19 * const, 243 + 12 * const, 255)
133
134 def _compute_indices(self): # type: () -> Optional[None, list]
135 if self.corpus is None:
136 self.indices = None
137 return
138 self.indices = [ConcordanceIndex(doc, key=lambda x: x.lower())
139 for doc in self.tokens]
140
141 def _compute_word_index(self):
142 if self.indices is None or self.word is None:
143 self.word_index = self.colored_rows = None
144 else:
145 self.word_index = [
146 (doc_idx, offset) for doc_idx, doc in enumerate(self.indices)
147 for offset in doc.offsets(self.word)]
148 self.colored_rows = set(sorted({d[0] for d in self.word_index})[::2])
149
150 def matching_docs(self):
151 if self.indices and self.word:
152 return sum(bool(doc.offsets(self.word)) for doc in self.indices)
153 else:
154 return 0
155
156
157 class OWConcordance(OWWidget):
158 name = "Concordance"
159 description = "Display the context of the word."
160 icon = "icons/Concordance.svg"
161 priority = 520
162
163 class Inputs:
164 corpus = Input("Corpus", Corpus)
165 query_word = Input("Query Word", Topic)
166
167 class Outputs:
168 selected_documents = Output("Selected Documents", Corpus)
169
170 settingsHandler = PerfectDomainContextHandler(
171 match_values = PerfectDomainContextHandler.MATCH_VALUES_ALL
172 )
173 autocommit = Setting(True)
174 context_width = Setting(5)
175 word = ContextSetting("", exclude_metas=False)
176 # TODO Set selection settings (DataHashContextHandler)
177
178 class Warning(OWWidget.Warning):
179 multiple_words_on_input = Msg("Multiple query words on input. "
180 "Only the first one is considered!")
181
182 def __init__(self):
183 super().__init__()
184
185 self.corpus = None # Corpus
186 self.n_matching = '' # Info on docs matching the word
187 self.n_tokens = '' # Info on tokens
188 self.n_types = '' # Info on types (unique tokens)
189 self.is_word_on_input = False
190
191 # Info attributes
192 info_box = gui.widgetBox(self.controlArea, 'Info')
193 gui.label(info_box, self, 'Tokens: %(n_tokens)s')
194 gui.label(info_box, self, 'Types: %(n_types)s')
195 gui.label(info_box, self, 'Matching: %(n_matching)s')
196
197 # Width parameter
198 gui.spin(self.controlArea, self, 'context_width', 3, 10, box=True,
199 label="Number of words:", callback=self.set_width)
200
201 gui.rubber(self.controlArea)
202
203 # Search
204 c_box = gui.widgetBox(self.mainArea, orientation="vertical")
205 self.input = gui.lineEdit(
206 c_box, self, 'word', orientation=Qt.Horizontal,
207 sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,
208 QSizePolicy.Fixed),
209 label='Query:', callback=self.set_word, callbackOnType=True)
210 self.input.setFocus()
211
212 # Concordances view
213 self.conc_view = QTableView()
214 self.model = ConcordanceModel()
215 self.conc_view.setModel(self.model)
216 self.conc_view.setWordWrap(False)
217 self.conc_view.setSelectionBehavior(QTableView.SelectRows)
218 self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))
219 self.conc_view.setItemDelegate(HorizontalGridDelegate())
220 # connect selectionChanged to self.commit(), which will be
221 # updated by gui.auto_commit()
222 self.conc_view.selectionModel().selectionChanged.connect(lambda:
223 self.commit())
224 self.conc_view.horizontalHeader().hide()
225 self.conc_view.setShowGrid(False)
226 self.mainArea.layout().addWidget(self.conc_view)
227 self.set_width()
228
229 # Auto-commit box
230 gui.auto_commit(self.controlArea, self, 'autocommit', 'Commit',
231 'Auto commit is on')
232
233 def sizeHint(self): # pragma: no cover
234 return QSize(600, 400)
235
236 def set_width(self):
237 sel = self.conc_view.selectionModel().selection()
238 self.model.set_width(self.context_width)
239 if sel:
240 self.conc_view.selectionModel().select(sel,
241 QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
242
243 @Inputs.corpus
244 def set_corpus(self, data=None):
245 self.closeContext()
246 self.corpus = data
247 if data is not None and not isinstance(data, Corpus):
248 self.corpus = Corpus.from_table(data.domain, data)
249 self.model.set_corpus(self.corpus)
250 if not self.is_word_on_input:
251 self.word = ""
252 self.openContext(self.corpus)
253 self.set_word()
254
255 @Inputs.query_word
256 def set_word_from_input(self, topic):
257 self.Warning.multiple_words_on_input.clear()
258 self.is_word_on_input = topic is not None and len(topic) > 0
259 self.input.setEnabled(not self.is_word_on_input)
260 if self.is_word_on_input:
261 if len(topic) > 1:
262 self.Warning.multiple_words_on_input()
263 self.word = topic.metas[0, 0]
264 self.set_word()
265
266 def set_word(self):
267 self.model.set_word(self.word)
268 self.update_widget()
269 self.commit()
270
271 def resize_columns(self):
272 col_width = (self.conc_view.width() -
273 self.conc_view.columnWidth(1)) / 2 - 12
274 self.conc_view.setColumnWidth(0, col_width)
275 self.conc_view.setColumnWidth(2, col_width)
276
277 def resizeEvent(self, event): # pragma: no cover
278 super().resizeEvent(event)
279 self.resize_columns()
280
281 def update_widget(self):
282 self.conc_view.resizeColumnToContents(1)
283 self.resize_columns()
284 self.conc_view.resizeRowsToContents()
285
286 if self.corpus is not None:
287 self.n_matching = '{}/{}'.format(
288 self.model.matching_docs() if self.word else 0,
289 len(self.corpus))
290 self.n_tokens = self.model.n_tokens
291 self.n_types = self.model.n_types
292 else:
293 self.n_matching = ''
294 self.n_tokens = ''
295 self.n_types = ''
296
297 def commit(self):
298 rows = [sel_range.top() for sel_range
299 in self.conc_view.selectionModel().selection()]
300 selected_docs = sorted(set(self.model.word_index[row][0]
301 for row in rows))
302 if selected_docs:
303 selected = self.corpus[selected_docs]
304 self.Outputs.selected_documents.send(selected)
305 else:
306 self.Outputs.selected_documents.send(None)
307
308
309 if __name__ == '__main__': # pragma: no cover
310 app = QApplication([])
311 widget = OWConcordance()
312 corpus = Corpus.from_file('book-excerpts')
313 corpus = corpus[:3]
314 widget.set_corpus(corpus)
315 widget.show()
316 app.exec()
317
318
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/orangecontrib/text/widgets/owconcordance.py b/orangecontrib/text/widgets/owconcordance.py
--- a/orangecontrib/text/widgets/owconcordance.py
+++ b/orangecontrib/text/widgets/owconcordance.py
@@ -173,7 +173,7 @@
autocommit = Setting(True)
context_width = Setting(5)
word = ContextSetting("", exclude_metas=False)
- # TODO Set selection settings (DataHashContextHandler)
+ selected_rows = Setting([], schema_only=True)
class Warning(OWWidget.Warning):
multiple_words_on_input = Msg("Multiple query words on input. "
@@ -217,10 +217,7 @@
self.conc_view.setSelectionBehavior(QTableView.SelectRows)
self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))
self.conc_view.setItemDelegate(HorizontalGridDelegate())
- # connect selectionChanged to self.commit(), which will be
- # updated by gui.auto_commit()
- self.conc_view.selectionModel().selectionChanged.connect(lambda:
- self.commit())
+ self.conc_view.selectionModel().selectionChanged.connect(self.selection_changed)
self.conc_view.horizontalHeader().hide()
self.conc_view.setShowGrid(False)
self.mainArea.layout().addWidget(self.conc_view)
@@ -240,21 +237,39 @@
self.conc_view.selectionModel().select(sel,
QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
+ def selection_changed(self):
+ selection = self.conc_view.selectionModel().selection()
+ self.selected_rows = sorted(set(cell.row() for cell in selection.indexes()))
+ self.commit()
+
+ def set_selection(self, selection):
+ if selection:
+ sel = QItemSelection()
+ for row in selection:
+ index = self.conc_view.model().index(row, 0)
+ sel.select(index, index)
+ self.conc_view.selectionModel().select(sel,
+ QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
+
@Inputs.corpus
def set_corpus(self, data=None):
self.closeContext()
self.corpus = data
- if data is not None and not isinstance(data, Corpus):
- self.corpus = Corpus.from_table(data.domain, data)
- self.model.set_corpus(self.corpus)
+ if data is None: # data removed, clear selection
+ self.selected_rows = []
+
if not self.is_word_on_input:
self.word = ""
self.openContext(self.corpus)
+
+ self.model.set_corpus(self.corpus)
self.set_word()
@Inputs.query_word
def set_word_from_input(self, topic):
self.Warning.multiple_words_on_input.clear()
+ if self.is_word_on_input: # word changed, clear selection
+ self.selected_rows = []
self.is_word_on_input = topic is not None and len(topic) > 0
self.input.setEnabled(not self.is_word_on_input)
if self.is_word_on_input:
@@ -268,6 +283,9 @@
self.update_widget()
self.commit()
+ def handleNewSignals(self):
+ self.set_selection(self.selected_rows)
+
def resize_columns(self):
col_width = (self.conc_view.width() -
self.conc_view.columnWidth(1)) / 2 - 12
@@ -295,10 +313,8 @@
self.n_types = ''
def commit(self):
- rows = [sel_range.top() for sel_range
- in self.conc_view.selectionModel().selection()]
selected_docs = sorted(set(self.model.word_index[row][0]
- for row in rows))
+ for row in self.selected_rows))
if selected_docs:
selected = self.corpus[selected_docs]
self.Outputs.selected_documents.send(selected)
|
{"golden_diff": "diff --git a/orangecontrib/text/widgets/owconcordance.py b/orangecontrib/text/widgets/owconcordance.py\n--- a/orangecontrib/text/widgets/owconcordance.py\n+++ b/orangecontrib/text/widgets/owconcordance.py\n@@ -173,7 +173,7 @@\n autocommit = Setting(True)\n context_width = Setting(5)\n word = ContextSetting(\"\", exclude_metas=False)\n- # TODO Set selection settings (DataHashContextHandler)\n+ selected_rows = Setting([], schema_only=True)\n \n class Warning(OWWidget.Warning):\n multiple_words_on_input = Msg(\"Multiple query words on input. \"\n@@ -217,10 +217,7 @@\n self.conc_view.setSelectionBehavior(QTableView.SelectRows)\n self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))\n self.conc_view.setItemDelegate(HorizontalGridDelegate())\n- # connect selectionChanged to self.commit(), which will be\n- # updated by gui.auto_commit()\n- self.conc_view.selectionModel().selectionChanged.connect(lambda:\n- self.commit())\n+ self.conc_view.selectionModel().selectionChanged.connect(self.selection_changed)\n self.conc_view.horizontalHeader().hide()\n self.conc_view.setShowGrid(False)\n self.mainArea.layout().addWidget(self.conc_view)\n@@ -240,21 +237,39 @@\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n \n+ def selection_changed(self):\n+ selection = self.conc_view.selectionModel().selection()\n+ self.selected_rows = sorted(set(cell.row() for cell in selection.indexes()))\n+ self.commit()\n+\n+ def set_selection(self, selection):\n+ if selection:\n+ sel = QItemSelection()\n+ for row in selection:\n+ index = self.conc_view.model().index(row, 0)\n+ sel.select(index, index)\n+ self.conc_view.selectionModel().select(sel,\n+ QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n+\n @Inputs.corpus\n def set_corpus(self, data=None):\n self.closeContext()\n self.corpus = data\n- if data is not None and not isinstance(data, Corpus):\n- self.corpus = Corpus.from_table(data.domain, data)\n- self.model.set_corpus(self.corpus)\n+ if data is None: # data removed, clear selection\n+ self.selected_rows = []\n+\n if not self.is_word_on_input:\n self.word = \"\"\n self.openContext(self.corpus)\n+\n+ self.model.set_corpus(self.corpus)\n self.set_word()\n \n @Inputs.query_word\n def set_word_from_input(self, topic):\n self.Warning.multiple_words_on_input.clear()\n+ if self.is_word_on_input: # word changed, clear selection\n+ self.selected_rows = []\n self.is_word_on_input = topic is not None and len(topic) > 0\n self.input.setEnabled(not self.is_word_on_input)\n if self.is_word_on_input:\n@@ -268,6 +283,9 @@\n self.update_widget()\n self.commit()\n \n+ def handleNewSignals(self):\n+ self.set_selection(self.selected_rows)\n+\n def resize_columns(self):\n col_width = (self.conc_view.width() -\n self.conc_view.columnWidth(1)) / 2 - 12\n@@ -295,10 +313,8 @@\n self.n_types = ''\n \n def commit(self):\n- rows = [sel_range.top() for sel_range\n- in self.conc_view.selectionModel().selection()]\n selected_docs = sorted(set(self.model.word_index[row][0]\n- for row in rows))\n+ for row in self.selected_rows))\n if selected_docs:\n selected = self.corpus[selected_docs]\n self.Outputs.selected_documents.send(selected)\n", "issue": "Concordanes Settings\n##### Expected behavior\r\nConcordanes widget uses settings. \r\n\r\n\r\n##### Actual behavior\r\nIt does not yet.\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nfrom itertools import chain\nfrom AnyQt.QtCore import Qt, QAbstractTableModel, QSize, QItemSelectionModel, \\\n QItemSelection\nfrom AnyQt.QtWidgets import QSizePolicy, QApplication, QTableView, \\\n QStyledItemDelegate\nfrom AnyQt.QtGui import QColor\n\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler\nfrom Orange.widgets.widget import OWWidget, Msg, Input, Output\nfrom nltk import ConcordanceIndex\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.topics import Topic\nfrom orangecontrib.text.preprocess import WordPunctTokenizer\n\n\nclass HorizontalGridDelegate(QStyledItemDelegate):\n \"\"\"Class for setting elide.\"\"\"\n\n def paint(self, painter, option, index):\n if index.column() == 0:\n option.textElideMode = Qt.ElideLeft\n elif index.column() == 2:\n option.textElideMode = Qt.ElideRight\n QStyledItemDelegate.paint(self, painter, option, index)\n\n\nclass DocumentSelectionModel(QItemSelectionModel):\n \"\"\"Sets selection for QTableView. Creates a set of selected documents.\"\"\"\n\n def select(self, selection, flags):\n # which rows have been selected\n indexes = selection.indexes() if isinstance(selection, QItemSelection) \\\n else [selection]\n # prevent crashing when deleting the connection\n if not indexes:\n super().select(selection, flags)\n return\n # indexes[0].row() == -1 indicates clicking outside of the table\n if len(indexes) == 1 and indexes[0].row() == -1:\n self.clear()\n return\n word_index = self.model().word_index\n selected_docs = {word_index[index.row()][0] for index in indexes}\n selected_rows = [\n row_index for row_index, (doc_index, _) in enumerate(word_index)\n if doc_index in selected_docs]\n selection = QItemSelection()\n # select all rows belonging to the selected document\n for row in selected_rows:\n index = self.model().index(row, 0)\n selection.select(index, index)\n super().select(selection, flags)\n\n\nclass ConcordanceModel(QAbstractTableModel):\n \"\"\"A model for constructing concordances from text.\"\"\"\n\n def __init__(self):\n QAbstractTableModel.__init__(self)\n self.word = None\n self.corpus = None\n self.tokens = None\n self.n_tokens = None\n self.n_types = None\n self.indices = None\n self.word_index = None\n self.width = 8\n self.colored_rows = None\n\n def set_word(self, word):\n self.modelAboutToBeReset.emit()\n self.word = word\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_corpus(self, corpus):\n self.modelAboutToBeReset.emit()\n self.corpus = corpus\n self.set_tokens()\n self._compute_indices()\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_tokens(self):\n if self.corpus is None:\n self.tokens = None\n return\n tokenizer = WordPunctTokenizer()\n self.tokens = tokenizer(self.corpus.documents)\n self.n_tokens = sum(map(len, self.tokens))\n self.n_types = len(set(chain.from_iterable(self.tokens)))\n\n def set_width(self, width):\n self.modelAboutToBeReset.emit()\n self.width = width\n self.modelReset.emit()\n\n def flags(self, _):\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable\n\n def rowCount(self, parent=None, *args, **kwargs):\n return 0 if parent is None or parent.isValid() or \\\n self.word_index is None \\\n else len(self.word_index)\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 3\n\n def data(self, index, role=Qt.DisplayRole):\n row, col = index.row(), index.column()\n doc, index = self.word_index[row]\n\n if role == Qt.DisplayRole:\n tokens = self.tokens\n if col == 0:\n return ' '.join(tokens[doc][max(index - self.width, 0):index])\n if col == 1:\n return tokens[doc][index]\n if col == 2:\n return ' '.join(tokens[doc][index + 1:index + self.width + 1])\n\n elif role == Qt.TextAlignmentRole:\n return [Qt.AlignRight | Qt.AlignVCenter,\n Qt.AlignCenter,\n Qt.AlignLeft | Qt.AlignVCenter][col]\n\n elif role == Qt.BackgroundRole:\n const = self.word_index[row][0] in self.colored_rows\n return QColor(236 + 19 * const, 243 + 12 * const, 255)\n\n def _compute_indices(self): # type: () -> Optional[None, list]\n if self.corpus is None:\n self.indices = None\n return\n self.indices = [ConcordanceIndex(doc, key=lambda x: x.lower())\n for doc in self.tokens]\n\n def _compute_word_index(self):\n if self.indices is None or self.word is None:\n self.word_index = self.colored_rows = None\n else:\n self.word_index = [\n (doc_idx, offset) for doc_idx, doc in enumerate(self.indices)\n for offset in doc.offsets(self.word)]\n self.colored_rows = set(sorted({d[0] for d in self.word_index})[::2])\n\n def matching_docs(self):\n if self.indices and self.word:\n return sum(bool(doc.offsets(self.word)) for doc in self.indices)\n else:\n return 0\n\n\nclass OWConcordance(OWWidget):\n name = \"Concordance\"\n description = \"Display the context of the word.\"\n icon = \"icons/Concordance.svg\"\n priority = 520\n\n class Inputs:\n corpus = Input(\"Corpus\", Corpus)\n query_word = Input(\"Query Word\", Topic)\n\n class Outputs:\n selected_documents = Output(\"Selected Documents\", Corpus)\n\n settingsHandler = PerfectDomainContextHandler(\n match_values = PerfectDomainContextHandler.MATCH_VALUES_ALL\n )\n autocommit = Setting(True)\n context_width = Setting(5)\n word = ContextSetting(\"\", exclude_metas=False)\n # TODO Set selection settings (DataHashContextHandler)\n\n class Warning(OWWidget.Warning):\n multiple_words_on_input = Msg(\"Multiple query words on input. \"\n \"Only the first one is considered!\")\n\n def __init__(self):\n super().__init__()\n\n self.corpus = None # Corpus\n self.n_matching = '' # Info on docs matching the word\n self.n_tokens = '' # Info on tokens\n self.n_types = '' # Info on types (unique tokens)\n self.is_word_on_input = False\n\n # Info attributes\n info_box = gui.widgetBox(self.controlArea, 'Info')\n gui.label(info_box, self, 'Tokens: %(n_tokens)s')\n gui.label(info_box, self, 'Types: %(n_types)s')\n gui.label(info_box, self, 'Matching: %(n_matching)s')\n\n # Width parameter\n gui.spin(self.controlArea, self, 'context_width', 3, 10, box=True,\n label=\"Number of words:\", callback=self.set_width)\n\n gui.rubber(self.controlArea)\n\n # Search\n c_box = gui.widgetBox(self.mainArea, orientation=\"vertical\")\n self.input = gui.lineEdit(\n c_box, self, 'word', orientation=Qt.Horizontal,\n sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,\n QSizePolicy.Fixed),\n label='Query:', callback=self.set_word, callbackOnType=True)\n self.input.setFocus()\n\n # Concordances view\n self.conc_view = QTableView()\n self.model = ConcordanceModel()\n self.conc_view.setModel(self.model)\n self.conc_view.setWordWrap(False)\n self.conc_view.setSelectionBehavior(QTableView.SelectRows)\n self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))\n self.conc_view.setItemDelegate(HorizontalGridDelegate())\n # connect selectionChanged to self.commit(), which will be\n # updated by gui.auto_commit()\n self.conc_view.selectionModel().selectionChanged.connect(lambda:\n self.commit())\n self.conc_view.horizontalHeader().hide()\n self.conc_view.setShowGrid(False)\n self.mainArea.layout().addWidget(self.conc_view)\n self.set_width()\n\n # Auto-commit box\n gui.auto_commit(self.controlArea, self, 'autocommit', 'Commit',\n 'Auto commit is on')\n\n def sizeHint(self): # pragma: no cover\n return QSize(600, 400)\n\n def set_width(self):\n sel = self.conc_view.selectionModel().selection()\n self.model.set_width(self.context_width)\n if sel:\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n @Inputs.corpus\n def set_corpus(self, data=None):\n self.closeContext()\n self.corpus = data\n if data is not None and not isinstance(data, Corpus):\n self.corpus = Corpus.from_table(data.domain, data)\n self.model.set_corpus(self.corpus)\n if not self.is_word_on_input:\n self.word = \"\"\n self.openContext(self.corpus)\n self.set_word()\n\n @Inputs.query_word\n def set_word_from_input(self, topic):\n self.Warning.multiple_words_on_input.clear()\n self.is_word_on_input = topic is not None and len(topic) > 0\n self.input.setEnabled(not self.is_word_on_input)\n if self.is_word_on_input:\n if len(topic) > 1:\n self.Warning.multiple_words_on_input()\n self.word = topic.metas[0, 0]\n self.set_word()\n\n def set_word(self):\n self.model.set_word(self.word)\n self.update_widget()\n self.commit()\n\n def resize_columns(self):\n col_width = (self.conc_view.width() -\n self.conc_view.columnWidth(1)) / 2 - 12\n self.conc_view.setColumnWidth(0, col_width)\n self.conc_view.setColumnWidth(2, col_width)\n\n def resizeEvent(self, event): # pragma: no cover\n super().resizeEvent(event)\n self.resize_columns()\n\n def update_widget(self):\n self.conc_view.resizeColumnToContents(1)\n self.resize_columns()\n self.conc_view.resizeRowsToContents()\n\n if self.corpus is not None:\n self.n_matching = '{}/{}'.format(\n self.model.matching_docs() if self.word else 0,\n len(self.corpus))\n self.n_tokens = self.model.n_tokens\n self.n_types = self.model.n_types\n else:\n self.n_matching = ''\n self.n_tokens = ''\n self.n_types = ''\n\n def commit(self):\n rows = [sel_range.top() for sel_range\n in self.conc_view.selectionModel().selection()]\n selected_docs = sorted(set(self.model.word_index[row][0]\n for row in rows))\n if selected_docs:\n selected = self.corpus[selected_docs]\n self.Outputs.selected_documents.send(selected)\n else:\n self.Outputs.selected_documents.send(None)\n\n\nif __name__ == '__main__': # pragma: no cover\n app = QApplication([])\n widget = OWConcordance()\n corpus = Corpus.from_file('book-excerpts')\n corpus = corpus[:3]\n widget.set_corpus(corpus)\n widget.show()\n app.exec()\n\n", "path": "orangecontrib/text/widgets/owconcordance.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom itertools import chain\nfrom AnyQt.QtCore import Qt, QAbstractTableModel, QSize, QItemSelectionModel, \\\n QItemSelection\nfrom AnyQt.QtWidgets import QSizePolicy, QApplication, QTableView, \\\n QStyledItemDelegate\nfrom AnyQt.QtGui import QColor\n\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler\nfrom Orange.widgets.widget import OWWidget, Msg, Input, Output\nfrom nltk import ConcordanceIndex\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.topics import Topic\nfrom orangecontrib.text.preprocess import WordPunctTokenizer\n\n\nclass HorizontalGridDelegate(QStyledItemDelegate):\n \"\"\"Class for setting elide.\"\"\"\n\n def paint(self, painter, option, index):\n if index.column() == 0:\n option.textElideMode = Qt.ElideLeft\n elif index.column() == 2:\n option.textElideMode = Qt.ElideRight\n QStyledItemDelegate.paint(self, painter, option, index)\n\n\nclass DocumentSelectionModel(QItemSelectionModel):\n \"\"\"Sets selection for QTableView. Creates a set of selected documents.\"\"\"\n\n def select(self, selection, flags):\n # which rows have been selected\n indexes = selection.indexes() if isinstance(selection, QItemSelection) \\\n else [selection]\n # prevent crashing when deleting the connection\n if not indexes:\n super().select(selection, flags)\n return\n # indexes[0].row() == -1 indicates clicking outside of the table\n if len(indexes) == 1 and indexes[0].row() == -1:\n self.clear()\n return\n word_index = self.model().word_index\n selected_docs = {word_index[index.row()][0] for index in indexes}\n selected_rows = [\n row_index for row_index, (doc_index, _) in enumerate(word_index)\n if doc_index in selected_docs]\n selection = QItemSelection()\n # select all rows belonging to the selected document\n for row in selected_rows:\n index = self.model().index(row, 0)\n selection.select(index, index)\n super().select(selection, flags)\n\n\nclass ConcordanceModel(QAbstractTableModel):\n \"\"\"A model for constructing concordances from text.\"\"\"\n\n def __init__(self):\n QAbstractTableModel.__init__(self)\n self.word = None\n self.corpus = None\n self.tokens = None\n self.n_tokens = None\n self.n_types = None\n self.indices = None\n self.word_index = None\n self.width = 8\n self.colored_rows = None\n\n def set_word(self, word):\n self.modelAboutToBeReset.emit()\n self.word = word\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_corpus(self, corpus):\n self.modelAboutToBeReset.emit()\n self.corpus = corpus\n self.set_tokens()\n self._compute_indices()\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_tokens(self):\n if self.corpus is None:\n self.tokens = None\n return\n tokenizer = WordPunctTokenizer()\n self.tokens = tokenizer(self.corpus.documents)\n self.n_tokens = sum(map(len, self.tokens))\n self.n_types = len(set(chain.from_iterable(self.tokens)))\n\n def set_width(self, width):\n self.modelAboutToBeReset.emit()\n self.width = width\n self.modelReset.emit()\n\n def flags(self, _):\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable\n\n def rowCount(self, parent=None, *args, **kwargs):\n return 0 if parent is None or parent.isValid() or \\\n self.word_index is None \\\n else len(self.word_index)\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 3\n\n def data(self, index, role=Qt.DisplayRole):\n row, col = index.row(), index.column()\n doc, index = self.word_index[row]\n\n if role == Qt.DisplayRole:\n tokens = self.tokens\n if col == 0:\n return ' '.join(tokens[doc][max(index - self.width, 0):index])\n if col == 1:\n return tokens[doc][index]\n if col == 2:\n return ' '.join(tokens[doc][index + 1:index + self.width + 1])\n\n elif role == Qt.TextAlignmentRole:\n return [Qt.AlignRight | Qt.AlignVCenter,\n Qt.AlignCenter,\n Qt.AlignLeft | Qt.AlignVCenter][col]\n\n elif role == Qt.BackgroundRole:\n const = self.word_index[row][0] in self.colored_rows\n return QColor(236 + 19 * const, 243 + 12 * const, 255)\n\n def _compute_indices(self): # type: () -> Optional[None, list]\n if self.corpus is None:\n self.indices = None\n return\n self.indices = [ConcordanceIndex(doc, key=lambda x: x.lower())\n for doc in self.tokens]\n\n def _compute_word_index(self):\n if self.indices is None or self.word is None:\n self.word_index = self.colored_rows = None\n else:\n self.word_index = [\n (doc_idx, offset) for doc_idx, doc in enumerate(self.indices)\n for offset in doc.offsets(self.word)]\n self.colored_rows = set(sorted({d[0] for d in self.word_index})[::2])\n\n def matching_docs(self):\n if self.indices and self.word:\n return sum(bool(doc.offsets(self.word)) for doc in self.indices)\n else:\n return 0\n\n\nclass OWConcordance(OWWidget):\n name = \"Concordance\"\n description = \"Display the context of the word.\"\n icon = \"icons/Concordance.svg\"\n priority = 520\n\n class Inputs:\n corpus = Input(\"Corpus\", Corpus)\n query_word = Input(\"Query Word\", Topic)\n\n class Outputs:\n selected_documents = Output(\"Selected Documents\", Corpus)\n\n settingsHandler = PerfectDomainContextHandler(\n match_values = PerfectDomainContextHandler.MATCH_VALUES_ALL\n )\n autocommit = Setting(True)\n context_width = Setting(5)\n word = ContextSetting(\"\", exclude_metas=False)\n selected_rows = Setting([], schema_only=True)\n\n class Warning(OWWidget.Warning):\n multiple_words_on_input = Msg(\"Multiple query words on input. \"\n \"Only the first one is considered!\")\n\n def __init__(self):\n super().__init__()\n\n self.corpus = None # Corpus\n self.n_matching = '' # Info on docs matching the word\n self.n_tokens = '' # Info on tokens\n self.n_types = '' # Info on types (unique tokens)\n self.is_word_on_input = False\n\n # Info attributes\n info_box = gui.widgetBox(self.controlArea, 'Info')\n gui.label(info_box, self, 'Tokens: %(n_tokens)s')\n gui.label(info_box, self, 'Types: %(n_types)s')\n gui.label(info_box, self, 'Matching: %(n_matching)s')\n\n # Width parameter\n gui.spin(self.controlArea, self, 'context_width', 3, 10, box=True,\n label=\"Number of words:\", callback=self.set_width)\n\n gui.rubber(self.controlArea)\n\n # Search\n c_box = gui.widgetBox(self.mainArea, orientation=\"vertical\")\n self.input = gui.lineEdit(\n c_box, self, 'word', orientation=Qt.Horizontal,\n sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,\n QSizePolicy.Fixed),\n label='Query:', callback=self.set_word, callbackOnType=True)\n self.input.setFocus()\n\n # Concordances view\n self.conc_view = QTableView()\n self.model = ConcordanceModel()\n self.conc_view.setModel(self.model)\n self.conc_view.setWordWrap(False)\n self.conc_view.setSelectionBehavior(QTableView.SelectRows)\n self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))\n self.conc_view.setItemDelegate(HorizontalGridDelegate())\n self.conc_view.selectionModel().selectionChanged.connect(self.selection_changed)\n self.conc_view.horizontalHeader().hide()\n self.conc_view.setShowGrid(False)\n self.mainArea.layout().addWidget(self.conc_view)\n self.set_width()\n\n # Auto-commit box\n gui.auto_commit(self.controlArea, self, 'autocommit', 'Commit',\n 'Auto commit is on')\n\n def sizeHint(self): # pragma: no cover\n return QSize(600, 400)\n\n def set_width(self):\n sel = self.conc_view.selectionModel().selection()\n self.model.set_width(self.context_width)\n if sel:\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n def selection_changed(self):\n selection = self.conc_view.selectionModel().selection()\n self.selected_rows = sorted(set(cell.row() for cell in selection.indexes()))\n self.commit()\n\n def set_selection(self, selection):\n if selection:\n sel = QItemSelection()\n for row in selection:\n index = self.conc_view.model().index(row, 0)\n sel.select(index, index)\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n @Inputs.corpus\n def set_corpus(self, data=None):\n self.closeContext()\n self.corpus = data\n if data is None: # data removed, clear selection\n self.selected_rows = []\n\n if not self.is_word_on_input:\n self.word = \"\"\n self.openContext(self.corpus)\n\n self.model.set_corpus(self.corpus)\n self.set_word()\n\n @Inputs.query_word\n def set_word_from_input(self, topic):\n self.Warning.multiple_words_on_input.clear()\n if self.is_word_on_input: # word changed, clear selection\n self.selected_rows = []\n self.is_word_on_input = topic is not None and len(topic) > 0\n self.input.setEnabled(not self.is_word_on_input)\n if self.is_word_on_input:\n if len(topic) > 1:\n self.Warning.multiple_words_on_input()\n self.word = topic.metas[0, 0]\n self.set_word()\n\n def set_word(self):\n self.model.set_word(self.word)\n self.update_widget()\n self.commit()\n\n def handleNewSignals(self):\n self.set_selection(self.selected_rows)\n\n def resize_columns(self):\n col_width = (self.conc_view.width() -\n self.conc_view.columnWidth(1)) / 2 - 12\n self.conc_view.setColumnWidth(0, col_width)\n self.conc_view.setColumnWidth(2, col_width)\n\n def resizeEvent(self, event): # pragma: no cover\n super().resizeEvent(event)\n self.resize_columns()\n\n def update_widget(self):\n self.conc_view.resizeColumnToContents(1)\n self.resize_columns()\n self.conc_view.resizeRowsToContents()\n\n if self.corpus is not None:\n self.n_matching = '{}/{}'.format(\n self.model.matching_docs() if self.word else 0,\n len(self.corpus))\n self.n_tokens = self.model.n_tokens\n self.n_types = self.model.n_types\n else:\n self.n_matching = ''\n self.n_tokens = ''\n self.n_types = ''\n\n def commit(self):\n selected_docs = sorted(set(self.model.word_index[row][0]\n for row in self.selected_rows))\n if selected_docs:\n selected = self.corpus[selected_docs]\n self.Outputs.selected_documents.send(selected)\n else:\n self.Outputs.selected_documents.send(None)\n\n\nif __name__ == '__main__': # pragma: no cover\n app = QApplication([])\n widget = OWConcordance()\n corpus = Corpus.from_file('book-excerpts')\n corpus = corpus[:3]\n widget.set_corpus(corpus)\n widget.show()\n app.exec()\n\n", "path": "orangecontrib/text/widgets/owconcordance.py"}]}
| 3,682 | 852 |
gh_patches_debug_21891
|
rasdani/github-patches
|
git_diff
|
sherlock-project__sherlock-2099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feat: add json schema validation
Adds a JSON Schema that validates `data.json` and `removed_sites.json`. I've already validated the existing data with it and can confirm both files pass. 👍🏾
Prepends the following property to both files:
```json
{
"$schema": "{path/to/json-schema}"
}
```
Removes the `rank` property from `removed_sites.json` and documentation as this is no longer relevant.
Removes the "noPeriod" property from Jimdo as this appears to serve on purpose.
---
Sorry, also a question. What is the purpose of `responseUrl`? I just realized, this property is not referenced whatsoever in the actual application. 🤔 Can this property be removed in the dataset and schema?
Later I do want to make it stricter, i.e. prevent someone from using one `errorType` with the wrong additional field, i.e.
```json
{
"errorMsg": "Not Found",
"errorType": "status_code"
}
```
I figured I'd leave it like this for now and find out what `responseUrl` is about. 🤔
### Related
* Closes https://github.com/sherlock-project/sherlock/issues/1336
The goal of this is to improve the experience for developers and leave less room for human-error. It also enforces that we document and provide examples of each field, unlike the current Wiki which tends to get outdated.



--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sherlock/sites.py`
Content:
```
1 """Sherlock Sites Information Module
2
3 This module supports storing information about websites.
4 This is the raw data that will be used to search for usernames.
5 """
6 import json
7 import requests
8 import secrets
9
10 class SiteInformation:
11 def __init__(self, name, url_home, url_username_format, username_claimed,
12 information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
13 """Create Site Information Object.
14
15 Contains information about a specific website.
16
17 Keyword Arguments:
18 self -- This object.
19 name -- String which identifies site.
20 url_home -- String containing URL for home of site.
21 url_username_format -- String containing URL for Username format
22 on site.
23 NOTE: The string should contain the
24 token "{}" where the username should
25 be substituted. For example, a string
26 of "https://somesite.com/users/{}"
27 indicates that the individual
28 usernames would show up under the
29 "https://somesite.com/users/" area of
30 the website.
31 username_claimed -- String containing username which is known
32 to be claimed on website.
33 username_unclaimed -- String containing username which is known
34 to be unclaimed on website.
35 information -- Dictionary containing all known information
36 about website.
37 NOTE: Custom information about how to
38 actually detect the existence of the
39 username will be included in this
40 dictionary. This information will
41 be needed by the detection method,
42 but it is only recorded in this
43 object for future use.
44 is_nsfw -- Boolean indicating if site is Not Safe For Work.
45
46 Return Value:
47 Nothing.
48 """
49
50 self.name = name
51 self.url_home = url_home
52 self.url_username_format = url_username_format
53
54 self.username_claimed = username_claimed
55 self.username_unclaimed = secrets.token_urlsafe(32)
56 self.information = information
57 self.is_nsfw = is_nsfw
58
59 return
60
61 def __str__(self):
62 """Convert Object To String.
63
64 Keyword Arguments:
65 self -- This object.
66
67 Return Value:
68 Nicely formatted string to get information about this object.
69 """
70
71 return f"{self.name} ({self.url_home})"
72
73
74 class SitesInformation:
75 def __init__(self, data_file_path=None):
76 """Create Sites Information Object.
77
78 Contains information about all supported websites.
79
80 Keyword Arguments:
81 self -- This object.
82 data_file_path -- String which indicates path to data file.
83 The file name must end in ".json".
84
85 There are 3 possible formats:
86 * Absolute File Format
87 For example, "c:/stuff/data.json".
88 * Relative File Format
89 The current working directory is used
90 as the context.
91 For example, "data.json".
92 * URL Format
93 For example,
94 "https://example.com/data.json", or
95 "http://example.com/data.json".
96
97 An exception will be thrown if the path
98 to the data file is not in the expected
99 format, or if there was any problem loading
100 the file.
101
102 If this option is not specified, then a
103 default site list will be used.
104
105 Return Value:
106 Nothing.
107 """
108
109 if not data_file_path:
110 # The default data file is the live data.json which is in the GitHub repo. The reason why we are using
111 # this instead of the local one is so that the user has the most up-to-date data. This prevents
112 # users from creating issue about false positives which has already been fixed or having outdated data
113 data_file_path = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json"
114
115 # Ensure that specified data file has correct extension.
116 if not data_file_path.lower().endswith(".json"):
117 raise FileNotFoundError(f"Incorrect JSON file extension for data file '{data_file_path}'.")
118
119 # if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower():
120 if data_file_path.lower().startswith("http"):
121 # Reference is to a URL.
122 try:
123 response = requests.get(url=data_file_path)
124 except Exception as error:
125 raise FileNotFoundError(
126 f"Problem while attempting to access data file URL '{data_file_path}': {error}"
127 )
128
129 if response.status_code != 200:
130 raise FileNotFoundError(f"Bad response while accessing "
131 f"data file URL '{data_file_path}'."
132 )
133 try:
134 site_data = response.json()
135 except Exception as error:
136 raise ValueError(
137 f"Problem parsing json contents at '{data_file_path}': {error}."
138 )
139
140 else:
141 # Reference is to a file.
142 try:
143 with open(data_file_path, "r", encoding="utf-8") as file:
144 try:
145 site_data = json.load(file)
146 except Exception as error:
147 raise ValueError(
148 f"Problem parsing json contents at '{data_file_path}': {error}."
149 )
150
151 except FileNotFoundError:
152 raise FileNotFoundError(f"Problem while attempting to access "
153 f"data file '{data_file_path}'."
154 )
155
156 self.sites = {}
157
158 # Add all site information from the json file to internal site list.
159 for site_name in site_data:
160 try:
161
162 self.sites[site_name] = \
163 SiteInformation(site_name,
164 site_data[site_name]["urlMain"],
165 site_data[site_name]["url"],
166 site_data[site_name]["username_claimed"],
167 site_data[site_name],
168 site_data[site_name].get("isNSFW",False)
169
170 )
171 except KeyError as error:
172 raise ValueError(
173 f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
174 )
175
176 return
177
178 def remove_nsfw_sites(self):
179 """
180 Remove NSFW sites from the sites, if isNSFW flag is true for site
181
182 Keyword Arguments:
183 self -- This object.
184
185 Return Value:
186 None
187 """
188 sites = {}
189 for site in self.sites:
190 if self.sites[site].is_nsfw:
191 continue
192 sites[site] = self.sites[site]
193 self.sites = sites
194
195 def site_name_list(self):
196 """Get Site Name List.
197
198 Keyword Arguments:
199 self -- This object.
200
201 Return Value:
202 List of strings containing names of sites.
203 """
204
205 return sorted([site.name for site in self], key=str.lower)
206
207 def __iter__(self):
208 """Iterator For Object.
209
210 Keyword Arguments:
211 self -- This object.
212
213 Return Value:
214 Iterator for sites object.
215 """
216
217 for site_name in self.sites:
218 yield self.sites[site_name]
219
220 def __len__(self):
221 """Length For Object.
222
223 Keyword Arguments:
224 self -- This object.
225
226 Return Value:
227 Length of sites object.
228 """
229 return len(self.sites)
230
```
Path: `site_list.py`
Content:
```
1 #!/usr/bin/env python
2 # This module generates the listing of supported sites which can be found in
3 # sites.md. It also organizes all the sites in alphanumeric order
4 import json
5
6 # Read the data.json file
7 with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file:
8 data = json.load(data_file)
9
10 # Sort the social networks in alphanumeric order
11 social_networks = sorted(data.items())
12
13 # Write the list of supported sites to sites.md
14 with open("sites.md", "w") as site_file:
15 site_file.write(f"## List Of Supported Sites ({len(social_networks)} Sites In Total!)\n")
16 for social_network, info in social_networks:
17 url_main = info["urlMain"]
18 is_nsfw = "**(NSFW)**" if info.get("isNSFW") else ""
19 site_file.write(f"1.  [{social_network}]({url_main}) {is_nsfw}\n")
20
21 # Overwrite the data.json file with sorted data
22 with open("sherlock/resources/data.json", "w") as data_file:
23 sorted_data = json.dumps(data, indent=2, sort_keys=True)
24 data_file.write(sorted_data)
25 data_file.write("\n")
26
27 print("Finished updating supported site listing!")
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sherlock/sites.py b/sherlock/sites.py
--- a/sherlock/sites.py
+++ b/sherlock/sites.py
@@ -152,6 +152,11 @@
raise FileNotFoundError(f"Problem while attempting to access "
f"data file '{data_file_path}'."
)
+
+ try:
+ site_data.pop('$schema')
+ except:
+ pass
self.sites = {}
diff --git a/site_list.py b/site_list.py
--- a/site_list.py
+++ b/site_list.py
@@ -5,10 +5,14 @@
# Read the data.json file
with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file:
- data = json.load(data_file)
+ data: dict = json.load(data_file)
+
+# Removes schema-specific keywords for proper processing
+social_networks: dict = dict(data)
+social_networks.pop('$schema')
# Sort the social networks in alphanumeric order
-social_networks = sorted(data.items())
+social_networks: list = sorted(social_networks.items())
# Write the list of supported sites to sites.md
with open("sites.md", "w") as site_file:
|
{"golden_diff": "diff --git a/sherlock/sites.py b/sherlock/sites.py\n--- a/sherlock/sites.py\n+++ b/sherlock/sites.py\n@@ -152,6 +152,11 @@\n raise FileNotFoundError(f\"Problem while attempting to access \"\n f\"data file '{data_file_path}'.\"\n )\n+ \n+ try:\n+ site_data.pop('$schema')\n+ except:\n+ pass\n \n self.sites = {}\n \ndiff --git a/site_list.py b/site_list.py\n--- a/site_list.py\n+++ b/site_list.py\n@@ -5,10 +5,14 @@\n \n # Read the data.json file\n with open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n- data = json.load(data_file)\n+ data: dict = json.load(data_file)\n+\n+# Removes schema-specific keywords for proper processing\n+social_networks: dict = dict(data)\n+social_networks.pop('$schema')\n \n # Sort the social networks in alphanumeric order\n-social_networks = sorted(data.items())\n+social_networks: list = sorted(social_networks.items())\n \n # Write the list of supported sites to sites.md\n with open(\"sites.md\", \"w\") as site_file:\n", "issue": "feat: add json schema validation\nAdds a JSON Schema that validates `data.json` and `removed_sites.json`. I've already validated the existing data with it and can confirm both files pass. \ud83d\udc4d\ud83c\udffe \r\n\r\nPrepends the following property to both files:\r\n```json\r\n{\r\n \"$schema\": \"{path/to/json-schema}\"\r\n}\r\n```\r\n\r\nRemoves the `rank` property from `removed_sites.json` and documentation as this is no longer relevant.\r\n\r\nRemoves the \"noPeriod\" property from Jimdo as this appears to serve on purpose.\r\n\r\n---\r\n\r\nSorry, also a question. What is the purpose of `responseUrl`? I just realized, this property is not referenced whatsoever in the actual application. \ud83e\udd14 Can this property be removed in the dataset and schema?\r\n\r\nLater I do want to make it stricter, i.e. prevent someone from using one `errorType` with the wrong additional field, i.e.\r\n```json\r\n{ \r\n \"errorMsg\": \"Not Found\",\r\n \"errorType\": \"status_code\"\r\n} \r\n```\r\n\r\nI figured I'd leave it like this for now and find out what `responseUrl` is about. \ud83e\udd14 \r\n\r\n### Related\r\n* Closes https://github.com/sherlock-project/sherlock/issues/1336\r\n\r\nThe goal of this is to improve the experience for developers and leave less room for human-error. It also enforces that we document and provide examples of each field, unlike the current Wiki which tends to get outdated.\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Sherlock Sites Information Module\n\nThis module supports storing information about websites.\nThis is the raw data that will be used to search for usernames.\n\"\"\"\nimport json\nimport requests\nimport secrets\n\nclass SiteInformation:\n def __init__(self, name, url_home, url_username_format, username_claimed,\n information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):\n \"\"\"Create Site Information Object.\n\n Contains information about a specific website.\n\n Keyword Arguments:\n self -- This object.\n name -- String which identifies site.\n url_home -- String containing URL for home of site.\n url_username_format -- String containing URL for Username format\n on site.\n NOTE: The string should contain the\n token \"{}\" where the username should\n be substituted. For example, a string\n of \"https://somesite.com/users/{}\"\n indicates that the individual\n usernames would show up under the\n \"https://somesite.com/users/\" area of\n the website.\n username_claimed -- String containing username which is known\n to be claimed on website.\n username_unclaimed -- String containing username which is known\n to be unclaimed on website.\n information -- Dictionary containing all known information\n about website.\n NOTE: Custom information about how to\n actually detect the existence of the\n username will be included in this\n dictionary. This information will\n be needed by the detection method,\n but it is only recorded in this\n object for future use.\n is_nsfw -- Boolean indicating if site is Not Safe For Work.\n\n Return Value:\n Nothing.\n \"\"\"\n\n self.name = name\n self.url_home = url_home\n self.url_username_format = url_username_format\n\n self.username_claimed = username_claimed\n self.username_unclaimed = secrets.token_urlsafe(32)\n self.information = information\n self.is_nsfw = is_nsfw\n\n return\n\n def __str__(self):\n \"\"\"Convert Object To String.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Nicely formatted string to get information about this object.\n \"\"\"\n \n return f\"{self.name} ({self.url_home})\"\n\n\nclass SitesInformation:\n def __init__(self, data_file_path=None):\n \"\"\"Create Sites Information Object.\n\n Contains information about all supported websites.\n\n Keyword Arguments:\n self -- This object.\n data_file_path -- String which indicates path to data file.\n The file name must end in \".json\".\n\n There are 3 possible formats:\n * Absolute File Format\n For example, \"c:/stuff/data.json\".\n * Relative File Format\n The current working directory is used\n as the context.\n For example, \"data.json\".\n * URL Format\n For example,\n \"https://example.com/data.json\", or\n \"http://example.com/data.json\".\n\n An exception will be thrown if the path\n to the data file is not in the expected\n format, or if there was any problem loading\n the file.\n\n If this option is not specified, then a\n default site list will be used.\n\n Return Value:\n Nothing.\n \"\"\"\n\n if not data_file_path:\n # The default data file is the live data.json which is in the GitHub repo. The reason why we are using\n # this instead of the local one is so that the user has the most up-to-date data. This prevents\n # users from creating issue about false positives which has already been fixed or having outdated data\n data_file_path = \"https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json\"\n\n # Ensure that specified data file has correct extension.\n if not data_file_path.lower().endswith(\".json\"):\n raise FileNotFoundError(f\"Incorrect JSON file extension for data file '{data_file_path}'.\")\n\n # if \"http://\" == data_file_path[:7].lower() or \"https://\" == data_file_path[:8].lower():\n if data_file_path.lower().startswith(\"http\"):\n # Reference is to a URL.\n try:\n response = requests.get(url=data_file_path)\n except Exception as error:\n raise FileNotFoundError(\n f\"Problem while attempting to access data file URL '{data_file_path}': {error}\"\n )\n\n if response.status_code != 200:\n raise FileNotFoundError(f\"Bad response while accessing \"\n f\"data file URL '{data_file_path}'.\"\n )\n try:\n site_data = response.json()\n except Exception as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': {error}.\"\n )\n\n else:\n # Reference is to a file.\n try:\n with open(data_file_path, \"r\", encoding=\"utf-8\") as file:\n try:\n site_data = json.load(file)\n except Exception as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': {error}.\"\n )\n\n except FileNotFoundError:\n raise FileNotFoundError(f\"Problem while attempting to access \"\n f\"data file '{data_file_path}'.\"\n )\n\n self.sites = {}\n\n # Add all site information from the json file to internal site list.\n for site_name in site_data:\n try:\n\n self.sites[site_name] = \\\n SiteInformation(site_name,\n site_data[site_name][\"urlMain\"],\n site_data[site_name][\"url\"],\n site_data[site_name][\"username_claimed\"],\n site_data[site_name],\n site_data[site_name].get(\"isNSFW\",False)\n\n )\n except KeyError as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': Missing attribute {error}.\"\n )\n\n return\n\n def remove_nsfw_sites(self):\n \"\"\"\n Remove NSFW sites from the sites, if isNSFW flag is true for site\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n None\n \"\"\"\n sites = {}\n for site in self.sites:\n if self.sites[site].is_nsfw:\n continue\n sites[site] = self.sites[site] \n self.sites = sites\n\n def site_name_list(self):\n \"\"\"Get Site Name List.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n List of strings containing names of sites.\n \"\"\"\n\n return sorted([site.name for site in self], key=str.lower)\n\n def __iter__(self):\n \"\"\"Iterator For Object.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Iterator for sites object.\n \"\"\"\n\n for site_name in self.sites:\n yield self.sites[site_name]\n\n def __len__(self):\n \"\"\"Length For Object.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Length of sites object.\n \"\"\"\n return len(self.sites)\n", "path": "sherlock/sites.py"}, {"content": "#!/usr/bin/env python\n# This module generates the listing of supported sites which can be found in\n# sites.md. It also organizes all the sites in alphanumeric order\nimport json\n\n# Read the data.json file\nwith open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data = json.load(data_file)\n\n# Sort the social networks in alphanumeric order\nsocial_networks = sorted(data.items())\n\n# Write the list of supported sites to sites.md\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f\"## List Of Supported Sites ({len(social_networks)} Sites In Total!)\\n\")\n for social_network, info in social_networks:\n url_main = info[\"urlMain\"]\n is_nsfw = \"**(NSFW)**\" if info.get(\"isNSFW\") else \"\"\n site_file.write(f\"1.  [{social_network}]({url_main}) {is_nsfw}\\n\")\n\n# Overwrite the data.json file with sorted data\nwith open(\"sherlock/resources/data.json\", \"w\") as data_file:\n sorted_data = json.dumps(data, indent=2, sort_keys=True)\n data_file.write(sorted_data)\n data_file.write(\"\\n\")\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}], "after_files": [{"content": "\"\"\"Sherlock Sites Information Module\n\nThis module supports storing information about websites.\nThis is the raw data that will be used to search for usernames.\n\"\"\"\nimport json\nimport requests\nimport secrets\n\nclass SiteInformation:\n def __init__(self, name, url_home, url_username_format, username_claimed,\n information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):\n \"\"\"Create Site Information Object.\n\n Contains information about a specific website.\n\n Keyword Arguments:\n self -- This object.\n name -- String which identifies site.\n url_home -- String containing URL for home of site.\n url_username_format -- String containing URL for Username format\n on site.\n NOTE: The string should contain the\n token \"{}\" where the username should\n be substituted. For example, a string\n of \"https://somesite.com/users/{}\"\n indicates that the individual\n usernames would show up under the\n \"https://somesite.com/users/\" area of\n the website.\n username_claimed -- String containing username which is known\n to be claimed on website.\n username_unclaimed -- String containing username which is known\n to be unclaimed on website.\n information -- Dictionary containing all known information\n about website.\n NOTE: Custom information about how to\n actually detect the existence of the\n username will be included in this\n dictionary. This information will\n be needed by the detection method,\n but it is only recorded in this\n object for future use.\n is_nsfw -- Boolean indicating if site is Not Safe For Work.\n\n Return Value:\n Nothing.\n \"\"\"\n\n self.name = name\n self.url_home = url_home\n self.url_username_format = url_username_format\n\n self.username_claimed = username_claimed\n self.username_unclaimed = secrets.token_urlsafe(32)\n self.information = information\n self.is_nsfw = is_nsfw\n\n return\n\n def __str__(self):\n \"\"\"Convert Object To String.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Nicely formatted string to get information about this object.\n \"\"\"\n \n return f\"{self.name} ({self.url_home})\"\n\n\nclass SitesInformation:\n def __init__(self, data_file_path=None):\n \"\"\"Create Sites Information Object.\n\n Contains information about all supported websites.\n\n Keyword Arguments:\n self -- This object.\n data_file_path -- String which indicates path to data file.\n The file name must end in \".json\".\n\n There are 3 possible formats:\n * Absolute File Format\n For example, \"c:/stuff/data.json\".\n * Relative File Format\n The current working directory is used\n as the context.\n For example, \"data.json\".\n * URL Format\n For example,\n \"https://example.com/data.json\", or\n \"http://example.com/data.json\".\n\n An exception will be thrown if the path\n to the data file is not in the expected\n format, or if there was any problem loading\n the file.\n\n If this option is not specified, then a\n default site list will be used.\n\n Return Value:\n Nothing.\n \"\"\"\n\n if not data_file_path:\n # The default data file is the live data.json which is in the GitHub repo. The reason why we are using\n # this instead of the local one is so that the user has the most up-to-date data. This prevents\n # users from creating issue about false positives which has already been fixed or having outdated data\n data_file_path = \"https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json\"\n\n # Ensure that specified data file has correct extension.\n if not data_file_path.lower().endswith(\".json\"):\n raise FileNotFoundError(f\"Incorrect JSON file extension for data file '{data_file_path}'.\")\n\n # if \"http://\" == data_file_path[:7].lower() or \"https://\" == data_file_path[:8].lower():\n if data_file_path.lower().startswith(\"http\"):\n # Reference is to a URL.\n try:\n response = requests.get(url=data_file_path)\n except Exception as error:\n raise FileNotFoundError(\n f\"Problem while attempting to access data file URL '{data_file_path}': {error}\"\n )\n\n if response.status_code != 200:\n raise FileNotFoundError(f\"Bad response while accessing \"\n f\"data file URL '{data_file_path}'.\"\n )\n try:\n site_data = response.json()\n except Exception as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': {error}.\"\n )\n\n else:\n # Reference is to a file.\n try:\n with open(data_file_path, \"r\", encoding=\"utf-8\") as file:\n try:\n site_data = json.load(file)\n except Exception as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': {error}.\"\n )\n\n except FileNotFoundError:\n raise FileNotFoundError(f\"Problem while attempting to access \"\n f\"data file '{data_file_path}'.\"\n )\n \n try:\n site_data.pop('$schema')\n except:\n pass\n\n self.sites = {}\n\n # Add all site information from the json file to internal site list.\n for site_name in site_data:\n try:\n\n self.sites[site_name] = \\\n SiteInformation(site_name,\n site_data[site_name][\"urlMain\"],\n site_data[site_name][\"url\"],\n site_data[site_name][\"username_claimed\"],\n site_data[site_name],\n site_data[site_name].get(\"isNSFW\",False)\n\n )\n except KeyError as error:\n raise ValueError(\n f\"Problem parsing json contents at '{data_file_path}': Missing attribute {error}.\"\n )\n\n return\n\n def remove_nsfw_sites(self):\n \"\"\"\n Remove NSFW sites from the sites, if isNSFW flag is true for site\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n None\n \"\"\"\n sites = {}\n for site in self.sites:\n if self.sites[site].is_nsfw:\n continue\n sites[site] = self.sites[site] \n self.sites = sites\n\n def site_name_list(self):\n \"\"\"Get Site Name List.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n List of strings containing names of sites.\n \"\"\"\n\n return sorted([site.name for site in self], key=str.lower)\n\n def __iter__(self):\n \"\"\"Iterator For Object.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Iterator for sites object.\n \"\"\"\n\n for site_name in self.sites:\n yield self.sites[site_name]\n\n def __len__(self):\n \"\"\"Length For Object.\n\n Keyword Arguments:\n self -- This object.\n\n Return Value:\n Length of sites object.\n \"\"\"\n return len(self.sites)\n", "path": "sherlock/sites.py"}, {"content": "#!/usr/bin/env python\n# This module generates the listing of supported sites which can be found in\n# sites.md. It also organizes all the sites in alphanumeric order\nimport json\n\n# Read the data.json file\nwith open(\"sherlock/resources/data.json\", \"r\", encoding=\"utf-8\") as data_file:\n data: dict = json.load(data_file)\n\n# Removes schema-specific keywords for proper processing\nsocial_networks: dict = dict(data)\nsocial_networks.pop('$schema')\n\n# Sort the social networks in alphanumeric order\nsocial_networks: list = sorted(social_networks.items())\n\n# Write the list of supported sites to sites.md\nwith open(\"sites.md\", \"w\") as site_file:\n site_file.write(f\"## List Of Supported Sites ({len(social_networks)} Sites In Total!)\\n\")\n for social_network, info in social_networks:\n url_main = info[\"urlMain\"]\n is_nsfw = \"**(NSFW)**\" if info.get(\"isNSFW\") else \"\"\n site_file.write(f\"1.  [{social_network}]({url_main}) {is_nsfw}\\n\")\n\n# Overwrite the data.json file with sorted data\nwith open(\"sherlock/resources/data.json\", \"w\") as data_file:\n sorted_data = json.dumps(data, indent=2, sort_keys=True)\n data_file.write(sorted_data)\n data_file.write(\"\\n\")\n\nprint(\"Finished updating supported site listing!\")\n", "path": "site_list.py"}]}
| 3,245 | 270 |
gh_patches_debug_9061
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-506
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Numpy 1.16 support for future read_hdf
Due to this issue (https://github.com/PyTables/PyTables/issues/717) it seems that, at least on my host machine, the latest version of numpy is needed to store and play with large datasets using hdf5. Naturally, I would love to use modin (ray) for these purposes and but realized that modin runs with numpy<=1.15.
I downloaded the source of Ray from github to test to see if numpy 1.15+ was supported and it seems that tests were failing for numpy 1.16.1. I was curious if modin planned to support higher versions of numpy in the near term as would be required to interplay with py tables.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 from setuptools import setup, find_packages
6
7 with open("README.md", "r", encoding="utf8") as fh:
8 long_description = fh.read()
9
10 setup(
11 name="modin",
12 version="0.4.0",
13 description="Modin: Make your pandas code run faster by changing one line of code.",
14 packages=find_packages(),
15 url="https://github.com/modin-project/modin",
16 long_description=long_description,
17 long_description_content_type="text/markdown",
18 install_requires=["pandas==0.24.1", "ray==0.6.2", "numpy<=1.15.0", "typing"],
19 extras_require={
20 # can be installed by pip install modin[dask]
21 "dask": ["dask==1.0.0", "distributed==1.25.0"],
22 },
23 )
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,9 +15,9 @@
url="https://github.com/modin-project/modin",
long_description=long_description,
long_description_content_type="text/markdown",
- install_requires=["pandas==0.24.1", "ray==0.6.2", "numpy<=1.15.0", "typing"],
+ install_requires=["pandas==0.24.1", "ray==0.6.2", "typing"],
extras_require={
# can be installed by pip install modin[dask]
- "dask": ["dask==1.0.0", "distributed==1.25.0"],
+ "dask": ["dask==1.1.0", "distributed==1.25.0"],
},
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,9 +15,9 @@\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n- install_requires=[\"pandas==0.24.1\", \"ray==0.6.2\", \"numpy<=1.15.0\", \"typing\"],\n+ install_requires=[\"pandas==0.24.1\", \"ray==0.6.2\", \"typing\"],\n extras_require={\n # can be installed by pip install modin[dask]\n- \"dask\": [\"dask==1.0.0\", \"distributed==1.25.0\"],\n+ \"dask\": [\"dask==1.1.0\", \"distributed==1.25.0\"],\n },\n )\n", "issue": "Numpy 1.16 support for future read_hdf\nDue to this issue (https://github.com/PyTables/PyTables/issues/717) it seems that, at least on my host machine, the latest version of numpy is needed to store and play with large datasets using hdf5. Naturally, I would love to use modin (ray) for these purposes and but realized that modin runs with numpy<=1.15.\r\n\r\nI downloaded the source of Ray from github to test to see if numpy 1.15+ was supported and it seems that tests were failing for numpy 1.16.1. I was curious if modin planned to support higher versions of numpy in the near term as would be required to interplay with py tables.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"modin\",\n version=\"0.4.0\",\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==0.24.1\", \"ray==0.6.2\", \"numpy<=1.15.0\", \"typing\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": [\"dask==1.0.0\", \"distributed==1.25.0\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\", encoding=\"utf8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"modin\",\n version=\"0.4.0\",\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(),\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\"pandas==0.24.1\", \"ray==0.6.2\", \"typing\"],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": [\"dask==1.1.0\", \"distributed==1.25.0\"],\n },\n)\n", "path": "setup.py"}]}
| 666 | 198 |
gh_patches_debug_55117
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15725
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PROTECTION_RULES: Custom Validator does not show error message on object deletion
### Deployment Type
Self-hosted
### NetBox Version
v4.0-beta1 (commit c7f6c206cf5068f890b89da9ca04d4d3583f5107)
### Python Version
3.11
### Steps to Reproduce
1. Create a custom validator with the following code:
```python
from extras.validators import CustomValidator
from utilities.exceptions import AbortRequest
class IPAddressDeleteValidator(CustomValidator):
def validate(self, instance, request):
raise AbortRequest("Do not delete IP addresses!")
```
and store as `/opt/netbox/validators/test.py`
2. Add the custom validator as a protect rule for `IPAddress` objects:
```python
PROTECTION_RULES = {
"ipam.ipaddress": [
"validators.test.IPAddressDeleteValidator",
]
}
```
3. Navigate to IPAM/IP Addresses
4. Create an arbitrary IP address
5. Click on "Delete" in the new address's detail view and confirm deletion
### Expected Behavior
The IP address is not deleted, an error message is shown saying "Do not delete IP addresses!"
### Observed Behavior
The IP address is not deleted, but there is no error message.
The error message is, however, displayed when one tries to delete an IP address using the bulk edit view:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/utilities/htmx.py`
Content:
```
1 __all__ = (
2 'htmx_partial',
3 )
4
5 PAGE_CONTAINER_ID = 'page-content'
6
7
8 def htmx_partial(request):
9 """
10 Determines whether to render partial (versus complete) HTML content
11 in response to an HTMX request, based on the target element.
12 """
13 return request.htmx and request.htmx.target and request.htmx.target != PAGE_CONTAINER_ID
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/utilities/htmx.py b/netbox/utilities/htmx.py
--- a/netbox/utilities/htmx.py
+++ b/netbox/utilities/htmx.py
@@ -2,12 +2,10 @@
'htmx_partial',
)
-PAGE_CONTAINER_ID = 'page-content'
-
def htmx_partial(request):
"""
Determines whether to render partial (versus complete) HTML content
in response to an HTMX request, based on the target element.
"""
- return request.htmx and request.htmx.target and request.htmx.target != PAGE_CONTAINER_ID
+ return request.htmx and not request.htmx.boosted
|
{"golden_diff": "diff --git a/netbox/utilities/htmx.py b/netbox/utilities/htmx.py\n--- a/netbox/utilities/htmx.py\n+++ b/netbox/utilities/htmx.py\n@@ -2,12 +2,10 @@\n 'htmx_partial',\n )\n \n-PAGE_CONTAINER_ID = 'page-content'\n-\n \n def htmx_partial(request):\n \"\"\"\n Determines whether to render partial (versus complete) HTML content\n in response to an HTMX request, based on the target element.\n \"\"\"\n- return request.htmx and request.htmx.target and request.htmx.target != PAGE_CONTAINER_ID\n+ return request.htmx and not request.htmx.boosted\n", "issue": "PROTECTION_RULES: Custom Validator does not show error message on object deletion\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv4.0-beta1 (commit c7f6c206cf5068f890b89da9ca04d4d3583f5107)\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create a custom validator with the following code:\r\n```python\r\nfrom extras.validators import CustomValidator\r\nfrom utilities.exceptions import AbortRequest\r\n\r\n\r\nclass IPAddressDeleteValidator(CustomValidator):\r\n\r\n def validate(self, instance, request):\r\n raise AbortRequest(\"Do not delete IP addresses!\")\r\n```\r\nand store as `/opt/netbox/validators/test.py`\r\n\r\n2. Add the custom validator as a protect rule for `IPAddress` objects:\r\n```python\r\nPROTECTION_RULES = {\r\n \"ipam.ipaddress\": [\r\n \"validators.test.IPAddressDeleteValidator\",\r\n ]\r\n}\r\n```\r\n3. Navigate to IPAM/IP Addresses\r\n4. Create an arbitrary IP address\r\n5. Click on \"Delete\" in the new address's detail view and confirm deletion\r\n\r\n### Expected Behavior\r\n\r\nThe IP address is not deleted, an error message is shown saying \"Do not delete IP addresses!\"\r\n\r\n### Observed Behavior\r\n\r\nThe IP address is not deleted, but there is no error message. \r\n\r\nThe error message is, however, displayed when one tries to delete an IP address using the bulk edit view:\r\n\r\n\n", "before_files": [{"content": "__all__ = (\n 'htmx_partial',\n)\n\nPAGE_CONTAINER_ID = 'page-content'\n\n\ndef htmx_partial(request):\n \"\"\"\n Determines whether to render partial (versus complete) HTML content\n in response to an HTMX request, based on the target element.\n \"\"\"\n return request.htmx and request.htmx.target and request.htmx.target != PAGE_CONTAINER_ID\n", "path": "netbox/utilities/htmx.py"}], "after_files": [{"content": "__all__ = (\n 'htmx_partial',\n)\n\n\ndef htmx_partial(request):\n \"\"\"\n Determines whether to render partial (versus complete) HTML content\n in response to an HTMX request, based on the target element.\n \"\"\"\n return request.htmx and not request.htmx.boosted\n", "path": "netbox/utilities/htmx.py"}]}
| 728 | 150 |
gh_patches_debug_18066
|
rasdani/github-patches
|
git_diff
|
magenta__magenta-624
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
issue training / generating with polyphony_rnn
Hello,
After (apparently successfully) learning an polyphony_rnn model, I cannot generate any sequence.
If I try to generate from the checkpoint, it returns the following:
> INFO:tensorflow:Checkpoint used: ./2017_05_04//run1/train/model.ckpt-3122
INFO:tensorflow:Restoring parameters from ./2017_05_04//run1/train/model.ckpt-3122
2017-05-04 18:18:18.663347: W tensorflow/core/framework/op_kernel.cc:1152] Not found: Key rnn/multi_rnn_cell/cell_2/basic_lstm_cell/weights not found in checkpoint
2017-05-04 18:18:18.666763: W tensorflow/core/framework/op_kernel.cc:1152] Not found: Key rnn/multi_rnn_cell/cell_2/basic_lstm_cell/biases not found in checkpoint
It is followed by a long traceback call message (that I'll provide if it helps).
Unsurprisingly I also cannot generate a bundle file from my checkpoint, for the same reason.
Any suggestion?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `magenta/models/polyphony_rnn/polyphony_rnn_generate.py`
Content:
```
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Generate polyphonic tracks from a trained checkpoint.
15
16 Uses flags to define operation.
17 """
18
19 import ast
20 import os
21 import time
22
23 # internal imports
24
25 import tensorflow as tf
26 import magenta
27
28 from magenta.models.polyphony_rnn import polyphony_model
29 from magenta.models.polyphony_rnn import polyphony_sequence_generator
30
31 from magenta.music import constants
32 from magenta.protobuf import generator_pb2
33 from magenta.protobuf import music_pb2
34
35 FLAGS = tf.app.flags.FLAGS
36 tf.app.flags.DEFINE_string(
37 'run_dir', None,
38 'Path to the directory where the latest checkpoint will be loaded from.')
39 tf.app.flags.DEFINE_string(
40 'bundle_file', None,
41 'Path to the bundle file. If specified, this will take priority over '
42 'run_dir, unless save_generator_bundle is True, in which case both this '
43 'flag and run_dir are required')
44 tf.app.flags.DEFINE_boolean(
45 'save_generator_bundle', False,
46 'If true, instead of generating a sequence, will save this generator as a '
47 'bundle file in the location specified by the bundle_file flag')
48 tf.app.flags.DEFINE_string(
49 'bundle_description', None,
50 'A short, human-readable text description of the bundle (e.g., training '
51 'data, hyper parameters, etc.).')
52 tf.app.flags.DEFINE_string(
53 'config', 'polyphony', 'Config to use.')
54 tf.app.flags.DEFINE_string(
55 'output_dir', '/tmp/polyphony_rnn/generated',
56 'The directory where MIDI files will be saved to.')
57 tf.app.flags.DEFINE_integer(
58 'num_outputs', 10,
59 'The number of tracks to generate. One MIDI file will be created for '
60 'each.')
61 tf.app.flags.DEFINE_integer(
62 'num_steps', 128,
63 'The total number of steps the generated track should be, priming '
64 'track length + generated steps. Each step is a 16th of a bar.')
65 tf.app.flags.DEFINE_string(
66 'primer_pitches', '',
67 'A string representation of a Python list of pitches that will be used as '
68 'a starting chord with a quarter note duration. For example: '
69 '"[60, 64, 67]"')
70 tf.app.flags.DEFINE_string(
71 'primer_melody', '',
72 'A string representation of a Python list of '
73 'magenta.music.Melody event values. For example: '
74 '"[60, -2, 60, -2, 67, -2, 67, -2]".')
75 tf.app.flags.DEFINE_string(
76 'primer_midi', '',
77 'The path to a MIDI file containing a polyphonic track that will be used '
78 'as a priming track.')
79 tf.app.flags.DEFINE_boolean(
80 'condition_on_primer', False,
81 'If set, the RNN will receive the primer as its input before it begins '
82 'generating a new sequence.')
83 tf.app.flags.DEFINE_boolean(
84 'inject_primer_during_generation', True,
85 'If set, the primer will be injected as a part of the generated sequence. '
86 'This option is useful if you want the model to harmonize an existing '
87 'melody.')
88 tf.app.flags.DEFINE_float(
89 'qpm', None,
90 'The quarters per minute to play generated output at. If a primer MIDI is '
91 'given, the qpm from that will override this flag. If qpm is None, qpm '
92 'will default to 120.')
93 tf.app.flags.DEFINE_float(
94 'temperature', 1.0,
95 'The randomness of the generated tracks. 1.0 uses the unaltered '
96 'softmax probabilities, greater than 1.0 makes tracks more random, less '
97 'than 1.0 makes tracks less random.')
98 tf.app.flags.DEFINE_integer(
99 'beam_size', 1,
100 'The beam size to use for beam search when generating tracks.')
101 tf.app.flags.DEFINE_integer(
102 'branch_factor', 1,
103 'The branch factor to use for beam search when generating tracks.')
104 tf.app.flags.DEFINE_integer(
105 'steps_per_iteration', 1,
106 'The number of steps to take per beam search iteration.')
107 tf.app.flags.DEFINE_string(
108 'log', 'INFO',
109 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
110 'or FATAL.')
111
112
113 def get_checkpoint():
114 """Get the training dir or checkpoint path to be used by the model."""
115 if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:
116 raise magenta.music.SequenceGeneratorException(
117 'Cannot specify both bundle_file and run_dir')
118 if FLAGS.run_dir:
119 train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')
120 return train_dir
121 else:
122 return None
123
124
125 def get_bundle():
126 """Returns a generator_pb2.GeneratorBundle object based read from bundle_file.
127
128 Returns:
129 Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
130 not set or the save_generator_bundle flag is set.
131 """
132 if FLAGS.save_generator_bundle:
133 return None
134 if FLAGS.bundle_file is None:
135 return None
136 bundle_file = os.path.expanduser(FLAGS.bundle_file)
137 return magenta.music.read_bundle_file(bundle_file)
138
139
140 def run_with_flags(generator):
141 """Generates polyphonic tracks and saves them as MIDI files.
142
143 Uses the options specified by the flags defined in this module.
144
145 Args:
146 generator: The PolyphonyRnnSequenceGenerator to use for generation.
147 """
148 if not FLAGS.output_dir:
149 tf.logging.fatal('--output_dir required')
150 return
151 output_dir = os.path.expanduser(FLAGS.output_dir)
152
153 primer_midi = None
154 if FLAGS.primer_midi:
155 primer_midi = os.path.expanduser(FLAGS.primer_midi)
156
157 if not tf.gfile.Exists(output_dir):
158 tf.gfile.MakeDirs(output_dir)
159
160 primer_sequence = None
161 qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE
162 if FLAGS.primer_pitches:
163 primer_sequence = music_pb2.NoteSequence()
164 primer_sequence.tempos.add().qpm = qpm
165 primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
166 for pitch in ast.literal_eval(FLAGS.primer_pitches):
167 note = primer_sequence.notes.add()
168 note.start_time = 0
169 note.end_time = 60.0 / qpm
170 note.pitch = pitch
171 note.velocity = 100
172 primer_sequence.total_time = primer_sequence.notes[-1].end_time
173 elif FLAGS.primer_melody:
174 primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))
175 primer_sequence = primer_melody.to_sequence(qpm=qpm)
176 elif primer_midi:
177 primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)
178 if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
179 qpm = primer_sequence.tempos[0].qpm
180 else:
181 tf.logging.warning(
182 'No priming sequence specified. Defaulting to empty sequence.')
183 primer_sequence = music_pb2.NoteSequence()
184 primer_sequence.tempos.add().qpm = qpm
185 primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ
186
187 # Derive the total number of seconds to generate.
188 seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
189 generate_end_time = FLAGS.num_steps * seconds_per_step
190
191 # Specify start/stop time for generation based on starting generation at the
192 # end of the priming sequence and continuing until the sequence is num_steps
193 # long.
194 generator_options = generator_pb2.GeneratorOptions()
195 # Set the start time to begin when the last note ends.
196 generate_section = generator_options.generate_sections.add(
197 start_time=primer_sequence.total_time,
198 end_time=generate_end_time)
199
200 if generate_section.start_time >= generate_section.end_time:
201 tf.logging.fatal(
202 'Priming sequence is longer than the total number of steps '
203 'requested: Priming sequence length: %s, Total length '
204 'requested: %s',
205 generate_section.start_time, generate_end_time)
206 return
207
208 generator_options.args['temperature'].float_value = FLAGS.temperature
209 generator_options.args['beam_size'].int_value = FLAGS.beam_size
210 generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
211 generator_options.args[
212 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
213
214 generator_options.args['condition_on_primer'].bool_value = (
215 FLAGS.condition_on_primer)
216 generator_options.args['no_inject_primer_during_generation'].bool_value = (
217 not FLAGS.inject_primer_during_generation)
218
219 tf.logging.debug('primer_sequence: %s', primer_sequence)
220 tf.logging.debug('generator_options: %s', generator_options)
221
222 # Make the generate request num_outputs times and save the output as midi
223 # files.
224 date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
225 digits = len(str(FLAGS.num_outputs))
226 for i in range(FLAGS.num_outputs):
227 generated_sequence = generator.generate(primer_sequence, generator_options)
228
229 midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
230 midi_path = os.path.join(output_dir, midi_filename)
231 magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)
232
233 tf.logging.info('Wrote %d MIDI files to %s',
234 FLAGS.num_outputs, output_dir)
235
236
237 def main(unused_argv):
238 """Saves bundle or runs generator based on flags."""
239 tf.logging.set_verbosity(FLAGS.log)
240
241 config = polyphony_model.default_configs[FLAGS.config]
242
243 generator = polyphony_sequence_generator.PolyphonyRnnSequenceGenerator(
244 model=polyphony_model.PolyphonyRnnModel(config),
245 details=config.details,
246 steps_per_quarter=config.steps_per_quarter,
247 checkpoint=get_checkpoint(),
248 bundle=get_bundle())
249
250 if FLAGS.save_generator_bundle:
251 bundle_filename = os.path.expanduser(FLAGS.bundle_file)
252 if FLAGS.bundle_description is None:
253 tf.logging.warning('No bundle description provided.')
254 tf.logging.info('Saving generator bundle to %s', bundle_filename)
255 generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)
256 else:
257 run_with_flags(generator)
258
259
260 def console_entry_point():
261 tf.app.run(main)
262
263
264 if __name__ == '__main__':
265 console_entry_point()
266
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/magenta/models/polyphony_rnn/polyphony_rnn_generate.py b/magenta/models/polyphony_rnn/polyphony_rnn_generate.py
--- a/magenta/models/polyphony_rnn/polyphony_rnn_generate.py
+++ b/magenta/models/polyphony_rnn/polyphony_rnn_generate.py
@@ -108,6 +108,11 @@
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
+tf.app.flags.DEFINE_string(
+ 'hparams', '{}',
+ 'String representation of a Python dictionary containing hyperparameter '
+ 'to value mapping. This mapping is merged with the default '
+ 'hyperparameters.')
def get_checkpoint():
@@ -239,6 +244,7 @@
tf.logging.set_verbosity(FLAGS.log)
config = polyphony_model.default_configs[FLAGS.config]
+ config.hparams.parse(FLAGS.hparams)
generator = polyphony_sequence_generator.PolyphonyRnnSequenceGenerator(
model=polyphony_model.PolyphonyRnnModel(config),
|
{"golden_diff": "diff --git a/magenta/models/polyphony_rnn/polyphony_rnn_generate.py b/magenta/models/polyphony_rnn/polyphony_rnn_generate.py\n--- a/magenta/models/polyphony_rnn/polyphony_rnn_generate.py\n+++ b/magenta/models/polyphony_rnn/polyphony_rnn_generate.py\n@@ -108,6 +108,11 @@\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n+tf.app.flags.DEFINE_string(\n+ 'hparams', '{}',\n+ 'String representation of a Python dictionary containing hyperparameter '\n+ 'to value mapping. This mapping is merged with the default '\n+ 'hyperparameters.')\n \n \n def get_checkpoint():\n@@ -239,6 +244,7 @@\n tf.logging.set_verbosity(FLAGS.log)\n \n config = polyphony_model.default_configs[FLAGS.config]\n+ config.hparams.parse(FLAGS.hparams)\n \n generator = polyphony_sequence_generator.PolyphonyRnnSequenceGenerator(\n model=polyphony_model.PolyphonyRnnModel(config),\n", "issue": "issue training / generating with polyphony_rnn\nHello,\r\n\r\nAfter (apparently successfully) learning an polyphony_rnn model, I cannot generate any sequence.\r\nIf I try to generate from the checkpoint, it returns the following:\r\n\r\n> INFO:tensorflow:Checkpoint used: ./2017_05_04//run1/train/model.ckpt-3122\r\nINFO:tensorflow:Restoring parameters from ./2017_05_04//run1/train/model.ckpt-3122\r\n2017-05-04 18:18:18.663347: W tensorflow/core/framework/op_kernel.cc:1152] Not found: Key rnn/multi_rnn_cell/cell_2/basic_lstm_cell/weights not found in checkpoint\r\n2017-05-04 18:18:18.666763: W tensorflow/core/framework/op_kernel.cc:1152] Not found: Key rnn/multi_rnn_cell/cell_2/basic_lstm_cell/biases not found in checkpoint\r\n\r\nIt is followed by a long traceback call message (that I'll provide if it helps).\r\n\r\nUnsurprisingly I also cannot generate a bundle file from my checkpoint, for the same reason.\r\n\r\nAny suggestion?\r\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Generate polyphonic tracks from a trained checkpoint.\n\nUses flags to define operation.\n\"\"\"\n\nimport ast\nimport os\nimport time\n\n# internal imports\n\nimport tensorflow as tf\nimport magenta\n\nfrom magenta.models.polyphony_rnn import polyphony_model\nfrom magenta.models.polyphony_rnn import polyphony_sequence_generator\n\nfrom magenta.music import constants\nfrom magenta.protobuf import generator_pb2\nfrom magenta.protobuf import music_pb2\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string(\n 'run_dir', None,\n 'Path to the directory where the latest checkpoint will be loaded from.')\ntf.app.flags.DEFINE_string(\n 'bundle_file', None,\n 'Path to the bundle file. If specified, this will take priority over '\n 'run_dir, unless save_generator_bundle is True, in which case both this '\n 'flag and run_dir are required')\ntf.app.flags.DEFINE_boolean(\n 'save_generator_bundle', False,\n 'If true, instead of generating a sequence, will save this generator as a '\n 'bundle file in the location specified by the bundle_file flag')\ntf.app.flags.DEFINE_string(\n 'bundle_description', None,\n 'A short, human-readable text description of the bundle (e.g., training '\n 'data, hyper parameters, etc.).')\ntf.app.flags.DEFINE_string(\n 'config', 'polyphony', 'Config to use.')\ntf.app.flags.DEFINE_string(\n 'output_dir', '/tmp/polyphony_rnn/generated',\n 'The directory where MIDI files will be saved to.')\ntf.app.flags.DEFINE_integer(\n 'num_outputs', 10,\n 'The number of tracks to generate. One MIDI file will be created for '\n 'each.')\ntf.app.flags.DEFINE_integer(\n 'num_steps', 128,\n 'The total number of steps the generated track should be, priming '\n 'track length + generated steps. Each step is a 16th of a bar.')\ntf.app.flags.DEFINE_string(\n 'primer_pitches', '',\n 'A string representation of a Python list of pitches that will be used as '\n 'a starting chord with a quarter note duration. For example: '\n '\"[60, 64, 67]\"')\ntf.app.flags.DEFINE_string(\n 'primer_melody', '',\n 'A string representation of a Python list of '\n 'magenta.music.Melody event values. For example: '\n '\"[60, -2, 60, -2, 67, -2, 67, -2]\".')\ntf.app.flags.DEFINE_string(\n 'primer_midi', '',\n 'The path to a MIDI file containing a polyphonic track that will be used '\n 'as a priming track.')\ntf.app.flags.DEFINE_boolean(\n 'condition_on_primer', False,\n 'If set, the RNN will receive the primer as its input before it begins '\n 'generating a new sequence.')\ntf.app.flags.DEFINE_boolean(\n 'inject_primer_during_generation', True,\n 'If set, the primer will be injected as a part of the generated sequence. '\n 'This option is useful if you want the model to harmonize an existing '\n 'melody.')\ntf.app.flags.DEFINE_float(\n 'qpm', None,\n 'The quarters per minute to play generated output at. If a primer MIDI is '\n 'given, the qpm from that will override this flag. If qpm is None, qpm '\n 'will default to 120.')\ntf.app.flags.DEFINE_float(\n 'temperature', 1.0,\n 'The randomness of the generated tracks. 1.0 uses the unaltered '\n 'softmax probabilities, greater than 1.0 makes tracks more random, less '\n 'than 1.0 makes tracks less random.')\ntf.app.flags.DEFINE_integer(\n 'beam_size', 1,\n 'The beam size to use for beam search when generating tracks.')\ntf.app.flags.DEFINE_integer(\n 'branch_factor', 1,\n 'The branch factor to use for beam search when generating tracks.')\ntf.app.flags.DEFINE_integer(\n 'steps_per_iteration', 1,\n 'The number of steps to take per beam search iteration.')\ntf.app.flags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\n\n\ndef get_checkpoint():\n \"\"\"Get the training dir or checkpoint path to be used by the model.\"\"\"\n if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:\n raise magenta.music.SequenceGeneratorException(\n 'Cannot specify both bundle_file and run_dir')\n if FLAGS.run_dir:\n train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')\n return train_dir\n else:\n return None\n\n\ndef get_bundle():\n \"\"\"Returns a generator_pb2.GeneratorBundle object based read from bundle_file.\n\n Returns:\n Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is\n not set or the save_generator_bundle flag is set.\n \"\"\"\n if FLAGS.save_generator_bundle:\n return None\n if FLAGS.bundle_file is None:\n return None\n bundle_file = os.path.expanduser(FLAGS.bundle_file)\n return magenta.music.read_bundle_file(bundle_file)\n\n\ndef run_with_flags(generator):\n \"\"\"Generates polyphonic tracks and saves them as MIDI files.\n\n Uses the options specified by the flags defined in this module.\n\n Args:\n generator: The PolyphonyRnnSequenceGenerator to use for generation.\n \"\"\"\n if not FLAGS.output_dir:\n tf.logging.fatal('--output_dir required')\n return\n output_dir = os.path.expanduser(FLAGS.output_dir)\n\n primer_midi = None\n if FLAGS.primer_midi:\n primer_midi = os.path.expanduser(FLAGS.primer_midi)\n\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n primer_sequence = None\n qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE\n if FLAGS.primer_pitches:\n primer_sequence = music_pb2.NoteSequence()\n primer_sequence.tempos.add().qpm = qpm\n primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n for pitch in ast.literal_eval(FLAGS.primer_pitches):\n note = primer_sequence.notes.add()\n note.start_time = 0\n note.end_time = 60.0 / qpm\n note.pitch = pitch\n note.velocity = 100\n primer_sequence.total_time = primer_sequence.notes[-1].end_time\n elif FLAGS.primer_melody:\n primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))\n primer_sequence = primer_melody.to_sequence(qpm=qpm)\n elif primer_midi:\n primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)\n if primer_sequence.tempos and primer_sequence.tempos[0].qpm:\n qpm = primer_sequence.tempos[0].qpm\n else:\n tf.logging.warning(\n 'No priming sequence specified. Defaulting to empty sequence.')\n primer_sequence = music_pb2.NoteSequence()\n primer_sequence.tempos.add().qpm = qpm\n primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n\n # Derive the total number of seconds to generate.\n seconds_per_step = 60.0 / qpm / generator.steps_per_quarter\n generate_end_time = FLAGS.num_steps * seconds_per_step\n\n # Specify start/stop time for generation based on starting generation at the\n # end of the priming sequence and continuing until the sequence is num_steps\n # long.\n generator_options = generator_pb2.GeneratorOptions()\n # Set the start time to begin when the last note ends.\n generate_section = generator_options.generate_sections.add(\n start_time=primer_sequence.total_time,\n end_time=generate_end_time)\n\n if generate_section.start_time >= generate_section.end_time:\n tf.logging.fatal(\n 'Priming sequence is longer than the total number of steps '\n 'requested: Priming sequence length: %s, Total length '\n 'requested: %s',\n generate_section.start_time, generate_end_time)\n return\n\n generator_options.args['temperature'].float_value = FLAGS.temperature\n generator_options.args['beam_size'].int_value = FLAGS.beam_size\n generator_options.args['branch_factor'].int_value = FLAGS.branch_factor\n generator_options.args[\n 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration\n\n generator_options.args['condition_on_primer'].bool_value = (\n FLAGS.condition_on_primer)\n generator_options.args['no_inject_primer_during_generation'].bool_value = (\n not FLAGS.inject_primer_during_generation)\n\n tf.logging.debug('primer_sequence: %s', primer_sequence)\n tf.logging.debug('generator_options: %s', generator_options)\n\n # Make the generate request num_outputs times and save the output as midi\n # files.\n date_and_time = time.strftime('%Y-%m-%d_%H%M%S')\n digits = len(str(FLAGS.num_outputs))\n for i in range(FLAGS.num_outputs):\n generated_sequence = generator.generate(primer_sequence, generator_options)\n\n midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))\n midi_path = os.path.join(output_dir, midi_filename)\n magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)\n\n tf.logging.info('Wrote %d MIDI files to %s',\n FLAGS.num_outputs, output_dir)\n\n\ndef main(unused_argv):\n \"\"\"Saves bundle or runs generator based on flags.\"\"\"\n tf.logging.set_verbosity(FLAGS.log)\n\n config = polyphony_model.default_configs[FLAGS.config]\n\n generator = polyphony_sequence_generator.PolyphonyRnnSequenceGenerator(\n model=polyphony_model.PolyphonyRnnModel(config),\n details=config.details,\n steps_per_quarter=config.steps_per_quarter,\n checkpoint=get_checkpoint(),\n bundle=get_bundle())\n\n if FLAGS.save_generator_bundle:\n bundle_filename = os.path.expanduser(FLAGS.bundle_file)\n if FLAGS.bundle_description is None:\n tf.logging.warning('No bundle description provided.')\n tf.logging.info('Saving generator bundle to %s', bundle_filename)\n generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)\n else:\n run_with_flags(generator)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/polyphony_rnn/polyphony_rnn_generate.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Generate polyphonic tracks from a trained checkpoint.\n\nUses flags to define operation.\n\"\"\"\n\nimport ast\nimport os\nimport time\n\n# internal imports\n\nimport tensorflow as tf\nimport magenta\n\nfrom magenta.models.polyphony_rnn import polyphony_model\nfrom magenta.models.polyphony_rnn import polyphony_sequence_generator\n\nfrom magenta.music import constants\nfrom magenta.protobuf import generator_pb2\nfrom magenta.protobuf import music_pb2\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string(\n 'run_dir', None,\n 'Path to the directory where the latest checkpoint will be loaded from.')\ntf.app.flags.DEFINE_string(\n 'bundle_file', None,\n 'Path to the bundle file. If specified, this will take priority over '\n 'run_dir, unless save_generator_bundle is True, in which case both this '\n 'flag and run_dir are required')\ntf.app.flags.DEFINE_boolean(\n 'save_generator_bundle', False,\n 'If true, instead of generating a sequence, will save this generator as a '\n 'bundle file in the location specified by the bundle_file flag')\ntf.app.flags.DEFINE_string(\n 'bundle_description', None,\n 'A short, human-readable text description of the bundle (e.g., training '\n 'data, hyper parameters, etc.).')\ntf.app.flags.DEFINE_string(\n 'config', 'polyphony', 'Config to use.')\ntf.app.flags.DEFINE_string(\n 'output_dir', '/tmp/polyphony_rnn/generated',\n 'The directory where MIDI files will be saved to.')\ntf.app.flags.DEFINE_integer(\n 'num_outputs', 10,\n 'The number of tracks to generate. One MIDI file will be created for '\n 'each.')\ntf.app.flags.DEFINE_integer(\n 'num_steps', 128,\n 'The total number of steps the generated track should be, priming '\n 'track length + generated steps. Each step is a 16th of a bar.')\ntf.app.flags.DEFINE_string(\n 'primer_pitches', '',\n 'A string representation of a Python list of pitches that will be used as '\n 'a starting chord with a quarter note duration. For example: '\n '\"[60, 64, 67]\"')\ntf.app.flags.DEFINE_string(\n 'primer_melody', '',\n 'A string representation of a Python list of '\n 'magenta.music.Melody event values. For example: '\n '\"[60, -2, 60, -2, 67, -2, 67, -2]\".')\ntf.app.flags.DEFINE_string(\n 'primer_midi', '',\n 'The path to a MIDI file containing a polyphonic track that will be used '\n 'as a priming track.')\ntf.app.flags.DEFINE_boolean(\n 'condition_on_primer', False,\n 'If set, the RNN will receive the primer as its input before it begins '\n 'generating a new sequence.')\ntf.app.flags.DEFINE_boolean(\n 'inject_primer_during_generation', True,\n 'If set, the primer will be injected as a part of the generated sequence. '\n 'This option is useful if you want the model to harmonize an existing '\n 'melody.')\ntf.app.flags.DEFINE_float(\n 'qpm', None,\n 'The quarters per minute to play generated output at. If a primer MIDI is '\n 'given, the qpm from that will override this flag. If qpm is None, qpm '\n 'will default to 120.')\ntf.app.flags.DEFINE_float(\n 'temperature', 1.0,\n 'The randomness of the generated tracks. 1.0 uses the unaltered '\n 'softmax probabilities, greater than 1.0 makes tracks more random, less '\n 'than 1.0 makes tracks less random.')\ntf.app.flags.DEFINE_integer(\n 'beam_size', 1,\n 'The beam size to use for beam search when generating tracks.')\ntf.app.flags.DEFINE_integer(\n 'branch_factor', 1,\n 'The branch factor to use for beam search when generating tracks.')\ntf.app.flags.DEFINE_integer(\n 'steps_per_iteration', 1,\n 'The number of steps to take per beam search iteration.')\ntf.app.flags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '\n 'or FATAL.')\ntf.app.flags.DEFINE_string(\n 'hparams', '{}',\n 'String representation of a Python dictionary containing hyperparameter '\n 'to value mapping. This mapping is merged with the default '\n 'hyperparameters.')\n\n\ndef get_checkpoint():\n \"\"\"Get the training dir or checkpoint path to be used by the model.\"\"\"\n if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:\n raise magenta.music.SequenceGeneratorException(\n 'Cannot specify both bundle_file and run_dir')\n if FLAGS.run_dir:\n train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')\n return train_dir\n else:\n return None\n\n\ndef get_bundle():\n \"\"\"Returns a generator_pb2.GeneratorBundle object based read from bundle_file.\n\n Returns:\n Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is\n not set or the save_generator_bundle flag is set.\n \"\"\"\n if FLAGS.save_generator_bundle:\n return None\n if FLAGS.bundle_file is None:\n return None\n bundle_file = os.path.expanduser(FLAGS.bundle_file)\n return magenta.music.read_bundle_file(bundle_file)\n\n\ndef run_with_flags(generator):\n \"\"\"Generates polyphonic tracks and saves them as MIDI files.\n\n Uses the options specified by the flags defined in this module.\n\n Args:\n generator: The PolyphonyRnnSequenceGenerator to use for generation.\n \"\"\"\n if not FLAGS.output_dir:\n tf.logging.fatal('--output_dir required')\n return\n output_dir = os.path.expanduser(FLAGS.output_dir)\n\n primer_midi = None\n if FLAGS.primer_midi:\n primer_midi = os.path.expanduser(FLAGS.primer_midi)\n\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n primer_sequence = None\n qpm = FLAGS.qpm if FLAGS.qpm else magenta.music.DEFAULT_QUARTERS_PER_MINUTE\n if FLAGS.primer_pitches:\n primer_sequence = music_pb2.NoteSequence()\n primer_sequence.tempos.add().qpm = qpm\n primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n for pitch in ast.literal_eval(FLAGS.primer_pitches):\n note = primer_sequence.notes.add()\n note.start_time = 0\n note.end_time = 60.0 / qpm\n note.pitch = pitch\n note.velocity = 100\n primer_sequence.total_time = primer_sequence.notes[-1].end_time\n elif FLAGS.primer_melody:\n primer_melody = magenta.music.Melody(ast.literal_eval(FLAGS.primer_melody))\n primer_sequence = primer_melody.to_sequence(qpm=qpm)\n elif primer_midi:\n primer_sequence = magenta.music.midi_file_to_sequence_proto(primer_midi)\n if primer_sequence.tempos and primer_sequence.tempos[0].qpm:\n qpm = primer_sequence.tempos[0].qpm\n else:\n tf.logging.warning(\n 'No priming sequence specified. Defaulting to empty sequence.')\n primer_sequence = music_pb2.NoteSequence()\n primer_sequence.tempos.add().qpm = qpm\n primer_sequence.ticks_per_quarter = constants.STANDARD_PPQ\n\n # Derive the total number of seconds to generate.\n seconds_per_step = 60.0 / qpm / generator.steps_per_quarter\n generate_end_time = FLAGS.num_steps * seconds_per_step\n\n # Specify start/stop time for generation based on starting generation at the\n # end of the priming sequence and continuing until the sequence is num_steps\n # long.\n generator_options = generator_pb2.GeneratorOptions()\n # Set the start time to begin when the last note ends.\n generate_section = generator_options.generate_sections.add(\n start_time=primer_sequence.total_time,\n end_time=generate_end_time)\n\n if generate_section.start_time >= generate_section.end_time:\n tf.logging.fatal(\n 'Priming sequence is longer than the total number of steps '\n 'requested: Priming sequence length: %s, Total length '\n 'requested: %s',\n generate_section.start_time, generate_end_time)\n return\n\n generator_options.args['temperature'].float_value = FLAGS.temperature\n generator_options.args['beam_size'].int_value = FLAGS.beam_size\n generator_options.args['branch_factor'].int_value = FLAGS.branch_factor\n generator_options.args[\n 'steps_per_iteration'].int_value = FLAGS.steps_per_iteration\n\n generator_options.args['condition_on_primer'].bool_value = (\n FLAGS.condition_on_primer)\n generator_options.args['no_inject_primer_during_generation'].bool_value = (\n not FLAGS.inject_primer_during_generation)\n\n tf.logging.debug('primer_sequence: %s', primer_sequence)\n tf.logging.debug('generator_options: %s', generator_options)\n\n # Make the generate request num_outputs times and save the output as midi\n # files.\n date_and_time = time.strftime('%Y-%m-%d_%H%M%S')\n digits = len(str(FLAGS.num_outputs))\n for i in range(FLAGS.num_outputs):\n generated_sequence = generator.generate(primer_sequence, generator_options)\n\n midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))\n midi_path = os.path.join(output_dir, midi_filename)\n magenta.music.sequence_proto_to_midi_file(generated_sequence, midi_path)\n\n tf.logging.info('Wrote %d MIDI files to %s',\n FLAGS.num_outputs, output_dir)\n\n\ndef main(unused_argv):\n \"\"\"Saves bundle or runs generator based on flags.\"\"\"\n tf.logging.set_verbosity(FLAGS.log)\n\n config = polyphony_model.default_configs[FLAGS.config]\n config.hparams.parse(FLAGS.hparams)\n\n generator = polyphony_sequence_generator.PolyphonyRnnSequenceGenerator(\n model=polyphony_model.PolyphonyRnnModel(config),\n details=config.details,\n steps_per_quarter=config.steps_per_quarter,\n checkpoint=get_checkpoint(),\n bundle=get_bundle())\n\n if FLAGS.save_generator_bundle:\n bundle_filename = os.path.expanduser(FLAGS.bundle_file)\n if FLAGS.bundle_description is None:\n tf.logging.warning('No bundle description provided.')\n tf.logging.info('Saving generator bundle to %s', bundle_filename)\n generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)\n else:\n run_with_flags(generator)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/polyphony_rnn/polyphony_rnn_generate.py"}]}
| 3,654 | 241 |
gh_patches_debug_20687
|
rasdani/github-patches
|
git_diff
|
jupyterhub__jupyterhub-3208
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CI: jupyter-server build fails since late september
The `test_singleuser_auth` step fails with the following error ([example failure](https://travis-ci.org/github/jupyterhub/jupyterhub/jobs/729518444))
```
404 Client Error: Not Found for url: http://127.0.0.1:59471/@/space%20word/user/nandy/api/spec.yaml?redirects=2
```
Has something change with regards to `@` symbols or spaces in words like `space word`? Yes it has, in `jupyter-server` it seems, because there have been releases in this time span.

## References
- [jupyter-server changelog](https://github.com/jupyter/jupyter_server/blob/master/CHANGELOG.md)
- [The only PR that I saw in the changelog with clear potential to cause our CI error](https://github.com/jupyter/jupyter_server/pull/304)
- [A seemingly related PR by, @minrk](https://github.com/jupyterhub/jupyterhub/pull/3168)
- [Another seemingly related PR, by @danlester](https://github.com/jupyterhub/jupyterhub/pull/3167)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/traitlets.py`
Content:
```
1 """
2 Traitlets that are used in JupyterHub
3 """
4 # Copyright (c) Jupyter Development Team.
5 # Distributed under the terms of the Modified BSD License.
6 import entrypoints
7 from traitlets import Integer
8 from traitlets import List
9 from traitlets import TraitError
10 from traitlets import TraitType
11 from traitlets import Type
12 from traitlets import Unicode
13
14
15 class URLPrefix(Unicode):
16 def validate(self, obj, value):
17 u = super().validate(obj, value)
18 if not u.startswith('/'):
19 u = '/' + u
20 if not u.endswith('/'):
21 u = u + '/'
22 return u
23
24
25 class Command(List):
26 """Traitlet for a command that should be a list of strings,
27 but allows it to be specified as a single string.
28 """
29
30 def __init__(self, default_value=None, **kwargs):
31 kwargs.setdefault('minlen', 1)
32 if isinstance(default_value, str):
33 default_value = [default_value]
34 super().__init__(Unicode(), default_value, **kwargs)
35
36 def validate(self, obj, value):
37 if isinstance(value, str):
38 value = [value]
39 return super().validate(obj, value)
40
41
42 class ByteSpecification(Integer):
43 """
44 Allow easily specifying bytes in units of 1024 with suffixes
45
46 Suffixes allowed are:
47 - K -> Kilobyte
48 - M -> Megabyte
49 - G -> Gigabyte
50 - T -> Terabyte
51 """
52
53 UNIT_SUFFIXES = {
54 'K': 1024,
55 'M': 1024 * 1024,
56 'G': 1024 * 1024 * 1024,
57 'T': 1024 * 1024 * 1024 * 1024,
58 }
59
60 # Default to allowing None as a value
61 allow_none = True
62
63 def validate(self, obj, value):
64 """
65 Validate that the passed in value is a valid memory specification
66
67 It could either be a pure int, when it is taken as a byte value.
68 If it has one of the suffixes, it is converted into the appropriate
69 pure byte value.
70 """
71 if isinstance(value, (int, float)):
72 return int(value)
73
74 try:
75 num = float(value[:-1])
76 except ValueError:
77 raise TraitError(
78 '{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(
79 val=value
80 )
81 )
82 suffix = value[-1]
83 if suffix not in self.UNIT_SUFFIXES:
84 raise TraitError(
85 '{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(
86 val=value
87 )
88 )
89 else:
90 return int(float(num) * self.UNIT_SUFFIXES[suffix])
91
92
93 class Callable(TraitType):
94 """
95 A trait which is callable.
96
97 Classes are callable, as are instances
98 with a __call__() method.
99 """
100
101 info_text = 'a callable'
102
103 def validate(self, obj, value):
104 if callable(value):
105 return value
106 else:
107 self.error(obj, value)
108
109
110 class EntryPointType(Type):
111 """Entry point-extended Type
112
113 classes can be registered via entry points
114 in addition to standard 'mypackage.MyClass' strings
115 """
116
117 _original_help = ''
118
119 def __init__(self, *args, entry_point_group, **kwargs):
120 self.entry_point_group = entry_point_group
121 super().__init__(*args, **kwargs)
122
123 @property
124 def help(self):
125 """Extend help by listing currently installed choices"""
126 chunks = [self._original_help]
127 chunks.append("Currently installed: ")
128 for key, entry_point in self.load_entry_points().items():
129 chunks.append(
130 " - {}: {}.{}".format(
131 key, entry_point.module_name, entry_point.object_name
132 )
133 )
134 return '\n'.join(chunks)
135
136 @help.setter
137 def help(self, value):
138 self._original_help = value
139
140 def load_entry_points(self):
141 """Load my entry point group"""
142 # load the group
143 group = entrypoints.get_group_named(self.entry_point_group)
144 # make it case-insensitive
145 return {key.lower(): value for key, value in group.items()}
146
147 def validate(self, obj, value):
148 if isinstance(value, str):
149 # first, look up in entry point registry
150 registry = self.load_entry_points()
151 key = value.lower()
152 if key in registry:
153 value = registry[key].load()
154 return super().validate(obj, value)
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jupyterhub/traitlets.py b/jupyterhub/traitlets.py
--- a/jupyterhub/traitlets.py
+++ b/jupyterhub/traitlets.py
@@ -9,6 +9,7 @@
from traitlets import TraitError
from traitlets import TraitType
from traitlets import Type
+from traitlets import Undefined
from traitlets import Unicode
@@ -27,11 +28,15 @@
but allows it to be specified as a single string.
"""
- def __init__(self, default_value=None, **kwargs):
+ def __init__(self, default_value=Undefined, **kwargs):
kwargs.setdefault('minlen', 1)
if isinstance(default_value, str):
default_value = [default_value]
- super().__init__(Unicode(), default_value, **kwargs)
+ if default_value is not Undefined and (
+ not (default_value is None and not kwargs.get("allow_none", False))
+ ):
+ kwargs["default_value"] = default_value
+ super().__init__(Unicode(), **kwargs)
def validate(self, obj, value):
if isinstance(value, str):
|
{"golden_diff": "diff --git a/jupyterhub/traitlets.py b/jupyterhub/traitlets.py\n--- a/jupyterhub/traitlets.py\n+++ b/jupyterhub/traitlets.py\n@@ -9,6 +9,7 @@\n from traitlets import TraitError\n from traitlets import TraitType\n from traitlets import Type\n+from traitlets import Undefined\n from traitlets import Unicode\n \n \n@@ -27,11 +28,15 @@\n but allows it to be specified as a single string.\n \"\"\"\n \n- def __init__(self, default_value=None, **kwargs):\n+ def __init__(self, default_value=Undefined, **kwargs):\n kwargs.setdefault('minlen', 1)\n if isinstance(default_value, str):\n default_value = [default_value]\n- super().__init__(Unicode(), default_value, **kwargs)\n+ if default_value is not Undefined and (\n+ not (default_value is None and not kwargs.get(\"allow_none\", False))\n+ ):\n+ kwargs[\"default_value\"] = default_value\n+ super().__init__(Unicode(), **kwargs)\n \n def validate(self, obj, value):\n if isinstance(value, str):\n", "issue": "CI: jupyter-server build fails since late september\nThe `test_singleuser_auth` step fails with the following error ([example failure](https://travis-ci.org/github/jupyterhub/jupyterhub/jobs/729518444))\r\n\r\n```\r\n404 Client Error: Not Found for url: http://127.0.0.1:59471/@/space%20word/user/nandy/api/spec.yaml?redirects=2\r\n```\r\n\r\nHas something change with regards to `@` symbols or spaces in words like `space word`? Yes it has, in `jupyter-server` it seems, because there have been releases in this time span.\r\n\r\n\r\n\r\n## References\r\n- [jupyter-server changelog](https://github.com/jupyter/jupyter_server/blob/master/CHANGELOG.md)\r\n- [The only PR that I saw in the changelog with clear potential to cause our CI error](https://github.com/jupyter/jupyter_server/pull/304)\r\n- [A seemingly related PR by, @minrk](https://github.com/jupyterhub/jupyterhub/pull/3168)\r\n- [Another seemingly related PR, by @danlester](https://github.com/jupyterhub/jupyterhub/pull/3167)\n", "before_files": [{"content": "\"\"\"\nTraitlets that are used in JupyterHub\n\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport entrypoints\nfrom traitlets import Integer\nfrom traitlets import List\nfrom traitlets import TraitError\nfrom traitlets import TraitType\nfrom traitlets import Type\nfrom traitlets import Unicode\n\n\nclass URLPrefix(Unicode):\n def validate(self, obj, value):\n u = super().validate(obj, value)\n if not u.startswith('/'):\n u = '/' + u\n if not u.endswith('/'):\n u = u + '/'\n return u\n\n\nclass Command(List):\n \"\"\"Traitlet for a command that should be a list of strings,\n but allows it to be specified as a single string.\n \"\"\"\n\n def __init__(self, default_value=None, **kwargs):\n kwargs.setdefault('minlen', 1)\n if isinstance(default_value, str):\n default_value = [default_value]\n super().__init__(Unicode(), default_value, **kwargs)\n\n def validate(self, obj, value):\n if isinstance(value, str):\n value = [value]\n return super().validate(obj, value)\n\n\nclass ByteSpecification(Integer):\n \"\"\"\n Allow easily specifying bytes in units of 1024 with suffixes\n\n Suffixes allowed are:\n - K -> Kilobyte\n - M -> Megabyte\n - G -> Gigabyte\n - T -> Terabyte\n \"\"\"\n\n UNIT_SUFFIXES = {\n 'K': 1024,\n 'M': 1024 * 1024,\n 'G': 1024 * 1024 * 1024,\n 'T': 1024 * 1024 * 1024 * 1024,\n }\n\n # Default to allowing None as a value\n allow_none = True\n\n def validate(self, obj, value):\n \"\"\"\n Validate that the passed in value is a valid memory specification\n\n It could either be a pure int, when it is taken as a byte value.\n If it has one of the suffixes, it is converted into the appropriate\n pure byte value.\n \"\"\"\n if isinstance(value, (int, float)):\n return int(value)\n\n try:\n num = float(value[:-1])\n except ValueError:\n raise TraitError(\n '{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(\n val=value\n )\n )\n suffix = value[-1]\n if suffix not in self.UNIT_SUFFIXES:\n raise TraitError(\n '{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(\n val=value\n )\n )\n else:\n return int(float(num) * self.UNIT_SUFFIXES[suffix])\n\n\nclass Callable(TraitType):\n \"\"\"\n A trait which is callable.\n\n Classes are callable, as are instances\n with a __call__() method.\n \"\"\"\n\n info_text = 'a callable'\n\n def validate(self, obj, value):\n if callable(value):\n return value\n else:\n self.error(obj, value)\n\n\nclass EntryPointType(Type):\n \"\"\"Entry point-extended Type\n\n classes can be registered via entry points\n in addition to standard 'mypackage.MyClass' strings\n \"\"\"\n\n _original_help = ''\n\n def __init__(self, *args, entry_point_group, **kwargs):\n self.entry_point_group = entry_point_group\n super().__init__(*args, **kwargs)\n\n @property\n def help(self):\n \"\"\"Extend help by listing currently installed choices\"\"\"\n chunks = [self._original_help]\n chunks.append(\"Currently installed: \")\n for key, entry_point in self.load_entry_points().items():\n chunks.append(\n \" - {}: {}.{}\".format(\n key, entry_point.module_name, entry_point.object_name\n )\n )\n return '\\n'.join(chunks)\n\n @help.setter\n def help(self, value):\n self._original_help = value\n\n def load_entry_points(self):\n \"\"\"Load my entry point group\"\"\"\n # load the group\n group = entrypoints.get_group_named(self.entry_point_group)\n # make it case-insensitive\n return {key.lower(): value for key, value in group.items()}\n\n def validate(self, obj, value):\n if isinstance(value, str):\n # first, look up in entry point registry\n registry = self.load_entry_points()\n key = value.lower()\n if key in registry:\n value = registry[key].load()\n return super().validate(obj, value)\n", "path": "jupyterhub/traitlets.py"}], "after_files": [{"content": "\"\"\"\nTraitlets that are used in JupyterHub\n\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport entrypoints\nfrom traitlets import Integer\nfrom traitlets import List\nfrom traitlets import TraitError\nfrom traitlets import TraitType\nfrom traitlets import Type\nfrom traitlets import Undefined\nfrom traitlets import Unicode\n\n\nclass URLPrefix(Unicode):\n def validate(self, obj, value):\n u = super().validate(obj, value)\n if not u.startswith('/'):\n u = '/' + u\n if not u.endswith('/'):\n u = u + '/'\n return u\n\n\nclass Command(List):\n \"\"\"Traitlet for a command that should be a list of strings,\n but allows it to be specified as a single string.\n \"\"\"\n\n def __init__(self, default_value=Undefined, **kwargs):\n kwargs.setdefault('minlen', 1)\n if isinstance(default_value, str):\n default_value = [default_value]\n if default_value is not Undefined and (\n not (default_value is None and not kwargs.get(\"allow_none\", False))\n ):\n kwargs[\"default_value\"] = default_value\n super().__init__(Unicode(), **kwargs)\n\n def validate(self, obj, value):\n if isinstance(value, str):\n value = [value]\n return super().validate(obj, value)\n\n\nclass ByteSpecification(Integer):\n \"\"\"\n Allow easily specifying bytes in units of 1024 with suffixes\n\n Suffixes allowed are:\n - K -> Kilobyte\n - M -> Megabyte\n - G -> Gigabyte\n - T -> Terabyte\n \"\"\"\n\n UNIT_SUFFIXES = {\n 'K': 1024,\n 'M': 1024 * 1024,\n 'G': 1024 * 1024 * 1024,\n 'T': 1024 * 1024 * 1024 * 1024,\n }\n\n # Default to allowing None as a value\n allow_none = True\n\n def validate(self, obj, value):\n \"\"\"\n Validate that the passed in value is a valid memory specification\n\n It could either be a pure int, when it is taken as a byte value.\n If it has one of the suffixes, it is converted into the appropriate\n pure byte value.\n \"\"\"\n if isinstance(value, (int, float)):\n return int(value)\n\n try:\n num = float(value[:-1])\n except ValueError:\n raise TraitError(\n '{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(\n val=value\n )\n )\n suffix = value[-1]\n if suffix not in self.UNIT_SUFFIXES:\n raise TraitError(\n '{val} is not a valid memory specification. Must be an int or a string with suffix K, M, G, T'.format(\n val=value\n )\n )\n else:\n return int(float(num) * self.UNIT_SUFFIXES[suffix])\n\n\nclass Callable(TraitType):\n \"\"\"\n A trait which is callable.\n\n Classes are callable, as are instances\n with a __call__() method.\n \"\"\"\n\n info_text = 'a callable'\n\n def validate(self, obj, value):\n if callable(value):\n return value\n else:\n self.error(obj, value)\n\n\nclass EntryPointType(Type):\n \"\"\"Entry point-extended Type\n\n classes can be registered via entry points\n in addition to standard 'mypackage.MyClass' strings\n \"\"\"\n\n _original_help = ''\n\n def __init__(self, *args, entry_point_group, **kwargs):\n self.entry_point_group = entry_point_group\n super().__init__(*args, **kwargs)\n\n @property\n def help(self):\n \"\"\"Extend help by listing currently installed choices\"\"\"\n chunks = [self._original_help]\n chunks.append(\"Currently installed: \")\n for key, entry_point in self.load_entry_points().items():\n chunks.append(\n \" - {}: {}.{}\".format(\n key, entry_point.module_name, entry_point.object_name\n )\n )\n return '\\n'.join(chunks)\n\n @help.setter\n def help(self, value):\n self._original_help = value\n\n def load_entry_points(self):\n \"\"\"Load my entry point group\"\"\"\n # load the group\n group = entrypoints.get_group_named(self.entry_point_group)\n # make it case-insensitive\n return {key.lower(): value for key, value in group.items()}\n\n def validate(self, obj, value):\n if isinstance(value, str):\n # first, look up in entry point registry\n registry = self.load_entry_points()\n key = value.lower()\n if key in registry:\n value = registry[key].load()\n return super().validate(obj, value)\n", "path": "jupyterhub/traitlets.py"}]}
| 2,000 | 263 |
gh_patches_debug_24464
|
rasdani/github-patches
|
git_diff
|
geopandas__geopandas-1415
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Append to PostGIS table not working when table does not exist
@jorisvandenbossche @martinfleis
When table does not exist in the PostGIS database, using the `if_exists="append"` raises an error because SRID check is attempted from non-existing table.
Fix is on the way.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/io/sql.py`
Content:
```
1 import warnings
2
3 import pandas as pd
4
5 import shapely.wkb
6
7 from geopandas import GeoDataFrame
8
9 from .. import _compat as compat
10
11
12 def _read_postgis(
13 sql,
14 con,
15 geom_col="geom",
16 crs=None,
17 index_col=None,
18 coerce_float=True,
19 parse_dates=None,
20 params=None,
21 ):
22 """
23 Returns a GeoDataFrame corresponding to the result of the query
24 string, which must contain a geometry column in WKB representation.
25
26 Parameters
27 ----------
28 sql : string
29 SQL query to execute in selecting entries from database, or name
30 of the table to read from the database.
31 con : DB connection object or SQLAlchemy engine
32 Active connection to the database to query.
33 geom_col : string, default 'geom'
34 column name to convert to shapely geometries
35 crs : pyproj.CRS, optional
36 CRS to use for the returned GeoDataFrame. The value can be anything accepted
37 by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
38 such as an authority string (eg "EPSG:4326") or a WKT string.
39 If not set, tries to determine CRS from the SRID associated with the
40 first geometry in the database, and assigns that to all geometries.
41
42 See the documentation for pandas.read_sql for further explanation
43 of the following parameters:
44 index_col, coerce_float, parse_dates, params
45
46 Returns
47 -------
48 GeoDataFrame
49
50 Example
51 -------
52 PostGIS
53 >>> sql = "SELECT geom, kind FROM polygons"
54 SpatiaLite
55 >>> sql = "SELECT ST_AsBinary(geom) AS geom, kind FROM polygons"
56 >>> df = geopandas.read_postgis(sql, con)
57 """
58
59 df = pd.read_sql(
60 sql,
61 con,
62 index_col=index_col,
63 coerce_float=coerce_float,
64 parse_dates=parse_dates,
65 params=params,
66 )
67
68 if geom_col not in df:
69 raise ValueError("Query missing geometry column '{}'".format(geom_col))
70
71 geoms = df[geom_col].dropna()
72
73 if not geoms.empty:
74 load_geom_bytes = shapely.wkb.loads
75 """Load from Python 3 binary."""
76
77 def load_geom_buffer(x):
78 """Load from Python 2 binary."""
79 return shapely.wkb.loads(str(x))
80
81 def load_geom_text(x):
82 """Load from binary encoded as text."""
83 return shapely.wkb.loads(str(x), hex=True)
84
85 if isinstance(geoms.iat[0], bytes):
86 load_geom = load_geom_bytes
87 else:
88 load_geom = load_geom_text
89
90 df[geom_col] = geoms = geoms.apply(load_geom)
91 if crs is None:
92 srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)
93 # if no defined SRID in geodatabase, returns SRID of 0
94 if srid != 0:
95 crs = "epsg:{}".format(srid)
96
97 return GeoDataFrame(df, crs=crs, geometry=geom_col)
98
99
100 def read_postgis(*args, **kwargs):
101 import warnings
102
103 warnings.warn(
104 "geopandas.io.sql.read_postgis() is intended for internal "
105 "use only, and will be deprecated. Use geopandas.read_postgis() instead.",
106 DeprecationWarning,
107 stacklevel=2,
108 )
109
110 return _read_postgis(*args, **kwargs)
111
112
113 def _get_geometry_type(gdf):
114 """
115 Get basic geometry type of a GeoDataFrame. See more info from:
116 https://geoalchemy-2.readthedocs.io/en/latest/types.html#geoalchemy2.types._GISType
117
118 Following rules apply:
119 - if geometries all share the same geometry-type,
120 geometries are inserted with the given GeometryType with following types:
121 - Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon,
122 GeometryCollection.
123 - LinearRing geometries will be converted into LineString -objects.
124 - in all other cases, geometries will be inserted with type GEOMETRY:
125 - a mix of Polygons and MultiPolygons in GeoSeries
126 - a mix of Points and LineStrings in GeoSeries
127 - geometry is of type GeometryCollection,
128 such as GeometryCollection([Point, LineStrings])
129 - if any of the geometries has Z-coordinate, all records will
130 be written with 3D.
131 """
132 geom_types = list(gdf.geometry.geom_type.unique())
133 has_curve = False
134
135 for gt in geom_types:
136 if gt is None:
137 continue
138 elif "LinearRing" in gt:
139 has_curve = True
140
141 if len(geom_types) == 1:
142 if has_curve:
143 target_geom_type = "LINESTRING"
144 else:
145 if geom_types[0] is None:
146 raise ValueError("No valid geometries in the data.")
147 else:
148 target_geom_type = geom_types[0].upper()
149 else:
150 target_geom_type = "GEOMETRY"
151
152 # Check for 3D-coordinates
153 if any(gdf.geometry.has_z):
154 target_geom_type = target_geom_type + "Z"
155
156 return target_geom_type, has_curve
157
158
159 def _get_srid_from_crs(gdf):
160 """
161 Get EPSG code from CRS if available. If not, return -1.
162 """
163
164 # Use geoalchemy2 default for srid
165 # Note: undefined srid in PostGIS is 0
166 srid = -1
167 warning_msg = (
168 "Could not parse CRS from the GeoDataFrame. "
169 + "Inserting data without defined CRS.",
170 )
171 if gdf.crs is not None:
172 try:
173 srid = gdf.crs.to_epsg(min_confidence=25)
174 if srid is None:
175 srid = -1
176 warnings.warn(warning_msg, UserWarning, stacklevel=2)
177 except Exception:
178 warnings.warn(warning_msg, UserWarning, stacklevel=2)
179 return srid
180
181
182 def _convert_linearring_to_linestring(gdf, geom_name):
183 from shapely.geometry import LineString
184
185 # Todo: Use Pygeos function once it's implemented:
186 # https://github.com/pygeos/pygeos/issues/76
187
188 mask = gdf.geom_type == "LinearRing"
189 gdf.loc[mask, geom_name] = gdf.loc[mask, geom_name].apply(
190 lambda geom: LineString(geom)
191 )
192 return gdf
193
194
195 def _convert_to_ewkb(gdf, geom_name, srid):
196 """Convert geometries to ewkb. """
197 if compat.USE_PYGEOS:
198 from pygeos import set_srid, to_wkb
199
200 geoms = to_wkb(
201 set_srid(gdf[geom_name].values.data, srid=srid), hex=True, include_srid=True
202 )
203
204 else:
205 from shapely.wkb import dumps
206
207 geoms = [dumps(geom, srid=srid, hex=True) for geom in gdf[geom_name]]
208
209 gdf[geom_name] = geoms
210 return gdf
211
212
213 def _psql_insert_copy(tbl, conn, keys, data_iter):
214 import io
215 import csv
216
217 s_buf = io.StringIO()
218 writer = csv.writer(s_buf)
219 writer.writerows(data_iter)
220 s_buf.seek(0)
221
222 columns = ", ".join('"{}"'.format(k) for k in keys)
223
224 dbapi_conn = conn.connection
225 with dbapi_conn.cursor() as cur:
226 sql = "COPY {} ({}) FROM STDIN WITH CSV".format(tbl.table.fullname, columns)
227 cur.copy_expert(sql=sql, file=s_buf)
228
229
230 def _write_postgis(
231 gdf,
232 name,
233 con,
234 schema=None,
235 if_exists="fail",
236 index=False,
237 index_label=None,
238 chunksize=None,
239 dtype=None,
240 ):
241 """
242 Upload GeoDataFrame into PostGIS database.
243
244 This method requires SQLAlchemy and GeoAlchemy2, and a PostgreSQL
245 Python driver (e.g. psycopg2) to be installed.
246
247 Parameters
248 ----------
249 name : str
250 Name of the target table.
251 con : sqlalchemy.engine.Engine
252 Active connection to the PostGIS database.
253 if_exists : {'fail', 'replace', 'append'}, default 'fail'
254 How to behave if the table already exists:
255
256 - fail: Raise a ValueError.
257 - replace: Drop the table before inserting new values.
258 - append: Insert new values to the existing table.
259 schema : string, optional
260 Specify the schema. If None, use default schema: 'public'.
261 index : bool, default True
262 Write DataFrame index as a column.
263 Uses *index_label* as the column name in the table.
264 index_label : string or sequence, default None
265 Column label for index column(s).
266 If None is given (default) and index is True,
267 then the index names are used.
268 chunksize : int, optional
269 Rows will be written in batches of this size at a time.
270 By default, all rows will be written at once.
271 dtype : dict of column name to SQL type, default None
272 Specifying the datatype for columns.
273 The keys should be the column names and the values
274 should be the SQLAlchemy types.
275
276 Examples
277 --------
278
279 >>> from sqlalchemy import create_engine
280 >>> engine = create_engine("postgres://myusername:mypassword@myhost:5432\
281 /mydatabase";)
282 >>> gdf.to_postgis("my_table", engine)
283 """
284 try:
285 from geoalchemy2 import Geometry
286 except ImportError:
287 raise ImportError("'to_postgis()' requires geoalchemy2 package. ")
288
289 if not compat.SHAPELY_GE_17:
290 raise ImportError(
291 "'to_postgis()' requires newer version of Shapely "
292 "(>= '1.7.0').\nYou can update the library using "
293 "'pip install shapely --upgrade' or using "
294 "'conda update shapely' if using conda package manager."
295 )
296
297 gdf = gdf.copy()
298 geom_name = gdf.geometry.name
299
300 # Get srid
301 srid = _get_srid_from_crs(gdf)
302
303 # Get geometry type and info whether data contains LinearRing.
304 geometry_type, has_curve = _get_geometry_type(gdf)
305
306 # Build dtype with Geometry
307 if dtype is not None:
308 dtype[geom_name] = Geometry(geometry_type=geometry_type, srid=srid)
309 else:
310 dtype = {geom_name: Geometry(geometry_type=geometry_type, srid=srid)}
311
312 # Convert LinearRing geometries to LineString
313 if has_curve:
314 gdf = _convert_linearring_to_linestring(gdf, geom_name)
315
316 # Convert geometries to EWKB
317 gdf = _convert_to_ewkb(gdf, geom_name, srid)
318
319 if if_exists == "append":
320 # Check that the geometry srid matches with the current GeoDataFrame
321 with con.begin() as connection:
322 if schema is not None:
323 schema_name = schema
324 else:
325 schema_name = "public"
326
327 target_srid = connection.execute(
328 "SELECT Find_SRID('{schema}', '{table}', '{geom_col}');".format(
329 schema=schema_name, table=name, geom_col=geom_name
330 )
331 ).fetchone()[0]
332
333 if target_srid != srid:
334 msg = (
335 "The CRS of the target table (EPSG:{epsg_t}) differs from the "
336 "CRS of current GeoDataFrame (EPSG:{epsg_src}).".format(
337 epsg_t=target_srid, epsg_src=srid
338 )
339 )
340 raise ValueError(msg)
341
342 with con.begin() as connection:
343
344 gdf.to_sql(
345 name,
346 connection,
347 schema=schema,
348 if_exists=if_exists,
349 index=index,
350 index_label=index_label,
351 chunksize=chunksize,
352 dtype=dtype,
353 method=_psql_insert_copy,
354 )
355
356 return
357
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geopandas/io/sql.py b/geopandas/io/sql.py
--- a/geopandas/io/sql.py
+++ b/geopandas/io/sql.py
@@ -324,20 +324,22 @@
else:
schema_name = "public"
- target_srid = connection.execute(
- "SELECT Find_SRID('{schema}', '{table}', '{geom_col}');".format(
- schema=schema_name, table=name, geom_col=geom_name
- )
- ).fetchone()[0]
-
- if target_srid != srid:
- msg = (
- "The CRS of the target table (EPSG:{epsg_t}) differs from the "
- "CRS of current GeoDataFrame (EPSG:{epsg_src}).".format(
- epsg_t=target_srid, epsg_src=srid
+ # Only check SRID if table exists
+ if connection.run_callable(connection.dialect.has_table, name, schema):
+ target_srid = connection.execute(
+ "SELECT Find_SRID('{schema}', '{table}', '{geom_col}');".format(
+ schema=schema_name, table=name, geom_col=geom_name
)
- )
- raise ValueError(msg)
+ ).fetchone()[0]
+
+ if target_srid != srid:
+ msg = (
+ "The CRS of the target table (EPSG:{epsg_t}) differs from the "
+ "CRS of current GeoDataFrame (EPSG:{epsg_src}).".format(
+ epsg_t=target_srid, epsg_src=srid
+ )
+ )
+ raise ValueError(msg)
with con.begin() as connection:
|
{"golden_diff": "diff --git a/geopandas/io/sql.py b/geopandas/io/sql.py\n--- a/geopandas/io/sql.py\n+++ b/geopandas/io/sql.py\n@@ -324,20 +324,22 @@\n else:\n schema_name = \"public\"\n \n- target_srid = connection.execute(\n- \"SELECT Find_SRID('{schema}', '{table}', '{geom_col}');\".format(\n- schema=schema_name, table=name, geom_col=geom_name\n- )\n- ).fetchone()[0]\n-\n- if target_srid != srid:\n- msg = (\n- \"The CRS of the target table (EPSG:{epsg_t}) differs from the \"\n- \"CRS of current GeoDataFrame (EPSG:{epsg_src}).\".format(\n- epsg_t=target_srid, epsg_src=srid\n+ # Only check SRID if table exists\n+ if connection.run_callable(connection.dialect.has_table, name, schema):\n+ target_srid = connection.execute(\n+ \"SELECT Find_SRID('{schema}', '{table}', '{geom_col}');\".format(\n+ schema=schema_name, table=name, geom_col=geom_name\n )\n- )\n- raise ValueError(msg)\n+ ).fetchone()[0]\n+\n+ if target_srid != srid:\n+ msg = (\n+ \"The CRS of the target table (EPSG:{epsg_t}) differs from the \"\n+ \"CRS of current GeoDataFrame (EPSG:{epsg_src}).\".format(\n+ epsg_t=target_srid, epsg_src=srid\n+ )\n+ )\n+ raise ValueError(msg)\n \n with con.begin() as connection:\n", "issue": "Append to PostGIS table not working when table does not exist \n@jorisvandenbossche @martinfleis \r\nWhen table does not exist in the PostGIS database, using the `if_exists=\"append\"` raises an error because SRID check is attempted from non-existing table.\r\n\r\nFix is on the way. \n", "before_files": [{"content": "import warnings\n\nimport pandas as pd\n\nimport shapely.wkb\n\nfrom geopandas import GeoDataFrame\n\nfrom .. import _compat as compat\n\n\ndef _read_postgis(\n sql,\n con,\n geom_col=\"geom\",\n crs=None,\n index_col=None,\n coerce_float=True,\n parse_dates=None,\n params=None,\n):\n \"\"\"\n Returns a GeoDataFrame corresponding to the result of the query\n string, which must contain a geometry column in WKB representation.\n\n Parameters\n ----------\n sql : string\n SQL query to execute in selecting entries from database, or name\n of the table to read from the database.\n con : DB connection object or SQLAlchemy engine\n Active connection to the database to query.\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n crs : pyproj.CRS, optional\n CRS to use for the returned GeoDataFrame. The value can be anything accepted\n by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n If not set, tries to determine CRS from the SRID associated with the\n first geometry in the database, and assigns that to all geometries.\n\n See the documentation for pandas.read_sql for further explanation\n of the following parameters:\n index_col, coerce_float, parse_dates, params\n\n Returns\n -------\n GeoDataFrame\n\n Example\n -------\n PostGIS\n >>> sql = \"SELECT geom, kind FROM polygons\"\n SpatiaLite\n >>> sql = \"SELECT ST_AsBinary(geom) AS geom, kind FROM polygons\"\n >>> df = geopandas.read_postgis(sql, con)\n \"\"\"\n\n df = pd.read_sql(\n sql,\n con,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n params=params,\n )\n\n if geom_col not in df:\n raise ValueError(\"Query missing geometry column '{}'\".format(geom_col))\n\n geoms = df[geom_col].dropna()\n\n if not geoms.empty:\n load_geom_bytes = shapely.wkb.loads\n \"\"\"Load from Python 3 binary.\"\"\"\n\n def load_geom_buffer(x):\n \"\"\"Load from Python 2 binary.\"\"\"\n return shapely.wkb.loads(str(x))\n\n def load_geom_text(x):\n \"\"\"Load from binary encoded as text.\"\"\"\n return shapely.wkb.loads(str(x), hex=True)\n\n if isinstance(geoms.iat[0], bytes):\n load_geom = load_geom_bytes\n else:\n load_geom = load_geom_text\n\n df[geom_col] = geoms = geoms.apply(load_geom)\n if crs is None:\n srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)\n # if no defined SRID in geodatabase, returns SRID of 0\n if srid != 0:\n crs = \"epsg:{}\".format(srid)\n\n return GeoDataFrame(df, crs=crs, geometry=geom_col)\n\n\ndef read_postgis(*args, **kwargs):\n import warnings\n\n warnings.warn(\n \"geopandas.io.sql.read_postgis() is intended for internal \"\n \"use only, and will be deprecated. Use geopandas.read_postgis() instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return _read_postgis(*args, **kwargs)\n\n\ndef _get_geometry_type(gdf):\n \"\"\"\n Get basic geometry type of a GeoDataFrame. See more info from:\n https://geoalchemy-2.readthedocs.io/en/latest/types.html#geoalchemy2.types._GISType\n\n Following rules apply:\n - if geometries all share the same geometry-type,\n geometries are inserted with the given GeometryType with following types:\n - Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon,\n GeometryCollection.\n - LinearRing geometries will be converted into LineString -objects.\n - in all other cases, geometries will be inserted with type GEOMETRY:\n - a mix of Polygons and MultiPolygons in GeoSeries\n - a mix of Points and LineStrings in GeoSeries\n - geometry is of type GeometryCollection,\n such as GeometryCollection([Point, LineStrings])\n - if any of the geometries has Z-coordinate, all records will\n be written with 3D.\n \"\"\"\n geom_types = list(gdf.geometry.geom_type.unique())\n has_curve = False\n\n for gt in geom_types:\n if gt is None:\n continue\n elif \"LinearRing\" in gt:\n has_curve = True\n\n if len(geom_types) == 1:\n if has_curve:\n target_geom_type = \"LINESTRING\"\n else:\n if geom_types[0] is None:\n raise ValueError(\"No valid geometries in the data.\")\n else:\n target_geom_type = geom_types[0].upper()\n else:\n target_geom_type = \"GEOMETRY\"\n\n # Check for 3D-coordinates\n if any(gdf.geometry.has_z):\n target_geom_type = target_geom_type + \"Z\"\n\n return target_geom_type, has_curve\n\n\ndef _get_srid_from_crs(gdf):\n \"\"\"\n Get EPSG code from CRS if available. If not, return -1.\n \"\"\"\n\n # Use geoalchemy2 default for srid\n # Note: undefined srid in PostGIS is 0\n srid = -1\n warning_msg = (\n \"Could not parse CRS from the GeoDataFrame. \"\n + \"Inserting data without defined CRS.\",\n )\n if gdf.crs is not None:\n try:\n srid = gdf.crs.to_epsg(min_confidence=25)\n if srid is None:\n srid = -1\n warnings.warn(warning_msg, UserWarning, stacklevel=2)\n except Exception:\n warnings.warn(warning_msg, UserWarning, stacklevel=2)\n return srid\n\n\ndef _convert_linearring_to_linestring(gdf, geom_name):\n from shapely.geometry import LineString\n\n # Todo: Use Pygeos function once it's implemented:\n # https://github.com/pygeos/pygeos/issues/76\n\n mask = gdf.geom_type == \"LinearRing\"\n gdf.loc[mask, geom_name] = gdf.loc[mask, geom_name].apply(\n lambda geom: LineString(geom)\n )\n return gdf\n\n\ndef _convert_to_ewkb(gdf, geom_name, srid):\n \"\"\"Convert geometries to ewkb. \"\"\"\n if compat.USE_PYGEOS:\n from pygeos import set_srid, to_wkb\n\n geoms = to_wkb(\n set_srid(gdf[geom_name].values.data, srid=srid), hex=True, include_srid=True\n )\n\n else:\n from shapely.wkb import dumps\n\n geoms = [dumps(geom, srid=srid, hex=True) for geom in gdf[geom_name]]\n\n gdf[geom_name] = geoms\n return gdf\n\n\ndef _psql_insert_copy(tbl, conn, keys, data_iter):\n import io\n import csv\n\n s_buf = io.StringIO()\n writer = csv.writer(s_buf)\n writer.writerows(data_iter)\n s_buf.seek(0)\n\n columns = \", \".join('\"{}\"'.format(k) for k in keys)\n\n dbapi_conn = conn.connection\n with dbapi_conn.cursor() as cur:\n sql = \"COPY {} ({}) FROM STDIN WITH CSV\".format(tbl.table.fullname, columns)\n cur.copy_expert(sql=sql, file=s_buf)\n\n\ndef _write_postgis(\n gdf,\n name,\n con,\n schema=None,\n if_exists=\"fail\",\n index=False,\n index_label=None,\n chunksize=None,\n dtype=None,\n):\n \"\"\"\n Upload GeoDataFrame into PostGIS database.\n\n This method requires SQLAlchemy and GeoAlchemy2, and a PostgreSQL\n Python driver (e.g. psycopg2) to be installed.\n\n Parameters\n ----------\n name : str\n Name of the target table.\n con : sqlalchemy.engine.Engine\n Active connection to the PostGIS database.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists:\n\n - fail: Raise a ValueError.\n - replace: Drop the table before inserting new values.\n - append: Insert new values to the existing table.\n schema : string, optional\n Specify the schema. If None, use default schema: 'public'.\n index : bool, default True\n Write DataFrame index as a column.\n Uses *index_label* as the column name in the table.\n index_label : string or sequence, default None\n Column label for index column(s).\n If None is given (default) and index is True,\n then the index names are used.\n chunksize : int, optional\n Rows will be written in batches of this size at a time.\n By default, all rows will be written at once.\n dtype : dict of column name to SQL type, default None\n Specifying the datatype for columns.\n The keys should be the column names and the values\n should be the SQLAlchemy types.\n\n Examples\n --------\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine(\"postgres://myusername:mypassword@myhost:5432\\\n/mydatabase\";)\n >>> gdf.to_postgis(\"my_table\", engine)\n \"\"\"\n try:\n from geoalchemy2 import Geometry\n except ImportError:\n raise ImportError(\"'to_postgis()' requires geoalchemy2 package. \")\n\n if not compat.SHAPELY_GE_17:\n raise ImportError(\n \"'to_postgis()' requires newer version of Shapely \"\n \"(>= '1.7.0').\\nYou can update the library using \"\n \"'pip install shapely --upgrade' or using \"\n \"'conda update shapely' if using conda package manager.\"\n )\n\n gdf = gdf.copy()\n geom_name = gdf.geometry.name\n\n # Get srid\n srid = _get_srid_from_crs(gdf)\n\n # Get geometry type and info whether data contains LinearRing.\n geometry_type, has_curve = _get_geometry_type(gdf)\n\n # Build dtype with Geometry\n if dtype is not None:\n dtype[geom_name] = Geometry(geometry_type=geometry_type, srid=srid)\n else:\n dtype = {geom_name: Geometry(geometry_type=geometry_type, srid=srid)}\n\n # Convert LinearRing geometries to LineString\n if has_curve:\n gdf = _convert_linearring_to_linestring(gdf, geom_name)\n\n # Convert geometries to EWKB\n gdf = _convert_to_ewkb(gdf, geom_name, srid)\n\n if if_exists == \"append\":\n # Check that the geometry srid matches with the current GeoDataFrame\n with con.begin() as connection:\n if schema is not None:\n schema_name = schema\n else:\n schema_name = \"public\"\n\n target_srid = connection.execute(\n \"SELECT Find_SRID('{schema}', '{table}', '{geom_col}');\".format(\n schema=schema_name, table=name, geom_col=geom_name\n )\n ).fetchone()[0]\n\n if target_srid != srid:\n msg = (\n \"The CRS of the target table (EPSG:{epsg_t}) differs from the \"\n \"CRS of current GeoDataFrame (EPSG:{epsg_src}).\".format(\n epsg_t=target_srid, epsg_src=srid\n )\n )\n raise ValueError(msg)\n\n with con.begin() as connection:\n\n gdf.to_sql(\n name,\n connection,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=_psql_insert_copy,\n )\n\n return\n", "path": "geopandas/io/sql.py"}], "after_files": [{"content": "import warnings\n\nimport pandas as pd\n\nimport shapely.wkb\n\nfrom geopandas import GeoDataFrame\n\nfrom .. import _compat as compat\n\n\ndef _read_postgis(\n sql,\n con,\n geom_col=\"geom\",\n crs=None,\n index_col=None,\n coerce_float=True,\n parse_dates=None,\n params=None,\n):\n \"\"\"\n Returns a GeoDataFrame corresponding to the result of the query\n string, which must contain a geometry column in WKB representation.\n\n Parameters\n ----------\n sql : string\n SQL query to execute in selecting entries from database, or name\n of the table to read from the database.\n con : DB connection object or SQLAlchemy engine\n Active connection to the database to query.\n geom_col : string, default 'geom'\n column name to convert to shapely geometries\n crs : pyproj.CRS, optional\n CRS to use for the returned GeoDataFrame. The value can be anything accepted\n by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,\n such as an authority string (eg \"EPSG:4326\") or a WKT string.\n If not set, tries to determine CRS from the SRID associated with the\n first geometry in the database, and assigns that to all geometries.\n\n See the documentation for pandas.read_sql for further explanation\n of the following parameters:\n index_col, coerce_float, parse_dates, params\n\n Returns\n -------\n GeoDataFrame\n\n Example\n -------\n PostGIS\n >>> sql = \"SELECT geom, kind FROM polygons\"\n SpatiaLite\n >>> sql = \"SELECT ST_AsBinary(geom) AS geom, kind FROM polygons\"\n >>> df = geopandas.read_postgis(sql, con)\n \"\"\"\n\n df = pd.read_sql(\n sql,\n con,\n index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates,\n params=params,\n )\n\n if geom_col not in df:\n raise ValueError(\"Query missing geometry column '{}'\".format(geom_col))\n\n geoms = df[geom_col].dropna()\n\n if not geoms.empty:\n load_geom_bytes = shapely.wkb.loads\n \"\"\"Load from Python 3 binary.\"\"\"\n\n def load_geom_buffer(x):\n \"\"\"Load from Python 2 binary.\"\"\"\n return shapely.wkb.loads(str(x))\n\n def load_geom_text(x):\n \"\"\"Load from binary encoded as text.\"\"\"\n return shapely.wkb.loads(str(x), hex=True)\n\n if isinstance(geoms.iat[0], bytes):\n load_geom = load_geom_bytes\n else:\n load_geom = load_geom_text\n\n df[geom_col] = geoms = geoms.apply(load_geom)\n if crs is None:\n srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)\n # if no defined SRID in geodatabase, returns SRID of 0\n if srid != 0:\n crs = \"epsg:{}\".format(srid)\n\n return GeoDataFrame(df, crs=crs, geometry=geom_col)\n\n\ndef read_postgis(*args, **kwargs):\n import warnings\n\n warnings.warn(\n \"geopandas.io.sql.read_postgis() is intended for internal \"\n \"use only, and will be deprecated. Use geopandas.read_postgis() instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return _read_postgis(*args, **kwargs)\n\n\ndef _get_geometry_type(gdf):\n \"\"\"\n Get basic geometry type of a GeoDataFrame. See more info from:\n https://geoalchemy-2.readthedocs.io/en/latest/types.html#geoalchemy2.types._GISType\n\n Following rules apply:\n - if geometries all share the same geometry-type,\n geometries are inserted with the given GeometryType with following types:\n - Point, LineString, Polygon, MultiPoint, MultiLineString, MultiPolygon,\n GeometryCollection.\n - LinearRing geometries will be converted into LineString -objects.\n - in all other cases, geometries will be inserted with type GEOMETRY:\n - a mix of Polygons and MultiPolygons in GeoSeries\n - a mix of Points and LineStrings in GeoSeries\n - geometry is of type GeometryCollection,\n such as GeometryCollection([Point, LineStrings])\n - if any of the geometries has Z-coordinate, all records will\n be written with 3D.\n \"\"\"\n geom_types = list(gdf.geometry.geom_type.unique())\n has_curve = False\n\n for gt in geom_types:\n if gt is None:\n continue\n elif \"LinearRing\" in gt:\n has_curve = True\n\n if len(geom_types) == 1:\n if has_curve:\n target_geom_type = \"LINESTRING\"\n else:\n if geom_types[0] is None:\n raise ValueError(\"No valid geometries in the data.\")\n else:\n target_geom_type = geom_types[0].upper()\n else:\n target_geom_type = \"GEOMETRY\"\n\n # Check for 3D-coordinates\n if any(gdf.geometry.has_z):\n target_geom_type = target_geom_type + \"Z\"\n\n return target_geom_type, has_curve\n\n\ndef _get_srid_from_crs(gdf):\n \"\"\"\n Get EPSG code from CRS if available. If not, return -1.\n \"\"\"\n\n # Use geoalchemy2 default for srid\n # Note: undefined srid in PostGIS is 0\n srid = -1\n warning_msg = (\n \"Could not parse CRS from the GeoDataFrame. \"\n + \"Inserting data without defined CRS.\",\n )\n if gdf.crs is not None:\n try:\n srid = gdf.crs.to_epsg(min_confidence=25)\n if srid is None:\n srid = -1\n warnings.warn(warning_msg, UserWarning, stacklevel=2)\n except Exception:\n warnings.warn(warning_msg, UserWarning, stacklevel=2)\n return srid\n\n\ndef _convert_linearring_to_linestring(gdf, geom_name):\n from shapely.geometry import LineString\n\n # Todo: Use Pygeos function once it's implemented:\n # https://github.com/pygeos/pygeos/issues/76\n\n mask = gdf.geom_type == \"LinearRing\"\n gdf.loc[mask, geom_name] = gdf.loc[mask, geom_name].apply(\n lambda geom: LineString(geom)\n )\n return gdf\n\n\ndef _convert_to_ewkb(gdf, geom_name, srid):\n \"\"\"Convert geometries to ewkb. \"\"\"\n if compat.USE_PYGEOS:\n from pygeos import set_srid, to_wkb\n\n geoms = to_wkb(\n set_srid(gdf[geom_name].values.data, srid=srid), hex=True, include_srid=True\n )\n\n else:\n from shapely.wkb import dumps\n\n geoms = [dumps(geom, srid=srid, hex=True) for geom in gdf[geom_name]]\n\n gdf[geom_name] = geoms\n return gdf\n\n\ndef _psql_insert_copy(tbl, conn, keys, data_iter):\n import io\n import csv\n\n s_buf = io.StringIO()\n writer = csv.writer(s_buf)\n writer.writerows(data_iter)\n s_buf.seek(0)\n\n columns = \", \".join('\"{}\"'.format(k) for k in keys)\n\n dbapi_conn = conn.connection\n with dbapi_conn.cursor() as cur:\n sql = \"COPY {} ({}) FROM STDIN WITH CSV\".format(tbl.table.fullname, columns)\n cur.copy_expert(sql=sql, file=s_buf)\n\n\ndef _write_postgis(\n gdf,\n name,\n con,\n schema=None,\n if_exists=\"fail\",\n index=False,\n index_label=None,\n chunksize=None,\n dtype=None,\n):\n \"\"\"\n Upload GeoDataFrame into PostGIS database.\n\n This method requires SQLAlchemy and GeoAlchemy2, and a PostgreSQL\n Python driver (e.g. psycopg2) to be installed.\n\n Parameters\n ----------\n name : str\n Name of the target table.\n con : sqlalchemy.engine.Engine\n Active connection to the PostGIS database.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n How to behave if the table already exists:\n\n - fail: Raise a ValueError.\n - replace: Drop the table before inserting new values.\n - append: Insert new values to the existing table.\n schema : string, optional\n Specify the schema. If None, use default schema: 'public'.\n index : bool, default True\n Write DataFrame index as a column.\n Uses *index_label* as the column name in the table.\n index_label : string or sequence, default None\n Column label for index column(s).\n If None is given (default) and index is True,\n then the index names are used.\n chunksize : int, optional\n Rows will be written in batches of this size at a time.\n By default, all rows will be written at once.\n dtype : dict of column name to SQL type, default None\n Specifying the datatype for columns.\n The keys should be the column names and the values\n should be the SQLAlchemy types.\n\n Examples\n --------\n\n >>> from sqlalchemy import create_engine\n >>> engine = create_engine(\"postgres://myusername:mypassword@myhost:5432\\\n/mydatabase\";)\n >>> gdf.to_postgis(\"my_table\", engine)\n \"\"\"\n try:\n from geoalchemy2 import Geometry\n except ImportError:\n raise ImportError(\"'to_postgis()' requires geoalchemy2 package. \")\n\n if not compat.SHAPELY_GE_17:\n raise ImportError(\n \"'to_postgis()' requires newer version of Shapely \"\n \"(>= '1.7.0').\\nYou can update the library using \"\n \"'pip install shapely --upgrade' or using \"\n \"'conda update shapely' if using conda package manager.\"\n )\n\n gdf = gdf.copy()\n geom_name = gdf.geometry.name\n\n # Get srid\n srid = _get_srid_from_crs(gdf)\n\n # Get geometry type and info whether data contains LinearRing.\n geometry_type, has_curve = _get_geometry_type(gdf)\n\n # Build dtype with Geometry\n if dtype is not None:\n dtype[geom_name] = Geometry(geometry_type=geometry_type, srid=srid)\n else:\n dtype = {geom_name: Geometry(geometry_type=geometry_type, srid=srid)}\n\n # Convert LinearRing geometries to LineString\n if has_curve:\n gdf = _convert_linearring_to_linestring(gdf, geom_name)\n\n # Convert geometries to EWKB\n gdf = _convert_to_ewkb(gdf, geom_name, srid)\n\n if if_exists == \"append\":\n # Check that the geometry srid matches with the current GeoDataFrame\n with con.begin() as connection:\n if schema is not None:\n schema_name = schema\n else:\n schema_name = \"public\"\n\n # Only check SRID if table exists\n if connection.run_callable(connection.dialect.has_table, name, schema):\n target_srid = connection.execute(\n \"SELECT Find_SRID('{schema}', '{table}', '{geom_col}');\".format(\n schema=schema_name, table=name, geom_col=geom_name\n )\n ).fetchone()[0]\n\n if target_srid != srid:\n msg = (\n \"The CRS of the target table (EPSG:{epsg_t}) differs from the \"\n \"CRS of current GeoDataFrame (EPSG:{epsg_src}).\".format(\n epsg_t=target_srid, epsg_src=srid\n )\n )\n raise ValueError(msg)\n\n with con.begin() as connection:\n\n gdf.to_sql(\n name,\n connection,\n schema=schema,\n if_exists=if_exists,\n index=index,\n index_label=index_label,\n chunksize=chunksize,\n dtype=dtype,\n method=_psql_insert_copy,\n )\n\n return\n", "path": "geopandas/io/sql.py"}]}
| 3,982 | 377 |
gh_patches_debug_12851
|
rasdani/github-patches
|
git_diff
|
wright-group__WrightTools-534
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove_nans_1D fails for list
```
>>> wt.kit.remove_nans_1D([np.nan, 1, 2, 2])
Traceback (most recent call last):
File "<input>", line 1, in <module>
wt.kit.remove_nans_1D([np.nan, 1, 2, 2])
File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in rem
ove_nans_1D
return tuple(a[goods] for a in args)
File "/home/kyle/wright/WrightTools/WrightTools/kit/_array.py", line 223, in <ge
nexpr>
return tuple(a[goods] for a in args)
TypeError: list indices must be integers or slices, not list
>>> wt.kit.remove_nans_1D(np.array([np.nan, 1, 2, 2]))
(array([1., 2., 2.]),)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/kit/_array.py`
Content:
```
1 """Array interaction tools."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import numpy as np
8
9 from .. import exceptions as wt_exceptions
10
11
12 # --- define --------------------------------------------------------------------------------------
13
14
15 __all__ = ['closest_pair',
16 'diff',
17 'fft',
18 'joint_shape',
19 'orthogonal',
20 'remove_nans_1D',
21 'share_nans',
22 'smooth_1D',
23 'unique',
24 'valid_index']
25
26
27 # --- functions -----------------------------------------------------------------------------------
28
29
30 def closest_pair(arr, give='indicies'):
31 """Find the pair of indices corresponding to the closest elements in an array.
32
33 If multiple pairs are equally close, both pairs of indicies are returned.
34 Optionally returns the closest distance itself.
35
36 I am sure that this could be written as a cheaper operation. I
37 wrote this as a quick and dirty method because I need it now to use on some
38 relatively small arrays. Feel free to refactor if you need this operation
39 done as fast as possible. - Blaise 2016-02-07
40
41 Parameters
42 ----------
43 arr : numpy.ndarray
44 The array to search.
45 give : {'indicies', 'distance'} (optional)
46 Toggle return behavior. If 'distance', returns a single float - the
47 closest distance itself. Default is indicies.
48
49 Returns
50 -------
51 list of lists of two tuples
52 List containing lists of two tuples: indicies the nearest pair in the
53 array.
54
55 >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])
56 >>> closest_pair(arr)
57 [[(1,), (8,)], [(3,), (4,)]]
58
59 """
60 idxs = [idx for idx in np.ndindex(arr.shape)]
61 outs = []
62 min_dist = arr.max() - arr.min()
63 for idxa in idxs:
64 for idxb in idxs:
65 if idxa == idxb:
66 continue
67 dist = abs(arr[idxa] - arr[idxb])
68 if dist == min_dist:
69 if not [idxb, idxa] in outs:
70 outs.append([idxa, idxb])
71 elif dist < min_dist:
72 min_dist = dist
73 outs = [[idxa, idxb]]
74 if give == 'indicies':
75 return outs
76 elif give == 'distance':
77 return min_dist
78 else:
79 raise KeyError('give not recognized in closest_pair')
80
81
82 def diff(xi, yi, order=1):
83 """Take the numerical derivative of a 1D array.
84
85 Output is mapped onto the original coordinates using linear interpolation.
86 Expects monotonic xi values.
87
88 Parameters
89 ----------
90 xi : 1D array-like
91 Coordinates.
92 yi : 1D array-like
93 Values.
94 order : positive integer (optional)
95 Order of differentiation.
96
97 Returns
98 -------
99 1D numpy array
100 Numerical derivative. Has the same shape as the input arrays.
101 """
102 yi = np.array(yi).copy()
103 flip = False
104 if xi[-1] < xi[0]:
105 xi = np.flipud(xi.copy())
106 yi = np.flipud(yi)
107 flip = True
108 midpoints = (xi[1:] + xi[:-1]) / 2
109 for _ in range(order):
110 d = np.diff(yi)
111 d /= np.diff(xi)
112 yi = np.interp(xi, midpoints, d)
113 if flip:
114 yi = np.flipud(yi)
115 return yi
116
117
118 def fft(xi, yi, axis=0):
119 """Take the 1D FFT of an N-dimensional array and return "sensible" properly shifted arrays.
120
121 Parameters
122 ----------
123 xi : numpy.ndarray
124 1D array over which the points to be FFT'ed are defined
125 yi : numpy.ndarray
126 ND array with values to FFT
127 axis : int
128 axis of yi to perform FFT over
129
130 Returns
131 -------
132 xi : 1D numpy.ndarray
133 1D array. Conjugate to input xi. Example: if input xi is in the time
134 domain, output xi is in frequency domain.
135 yi : ND numpy.ndarray
136 FFT. Has the same shape as the input array (yi).
137 """
138 # xi must be 1D
139 if xi.ndim != 1:
140 raise wt_exceptions.DimensionalityError(1, xi.ndim)
141 # xi must be evenly spaced
142 spacing = np.diff(xi)
143 if not np.allclose(spacing, spacing.mean()):
144 raise RuntimeError('WrightTools.kit.fft: argument xi must be evenly spaced')
145 # fft
146 yi = np.fft.fft(yi, axis=axis)
147 d = (xi.max() - xi.min()) / (xi.size - 1)
148 xi = np.fft.fftfreq(xi.size, d=d)
149 # shift
150 xi = np.fft.fftshift(xi)
151 yi = np.fft.fftshift(yi, axes=axis)
152 return xi, yi
153
154
155 def joint_shape(*args):
156 """Given a set of arrays, return the joint shape.
157
158 Parameters
159 ----------
160 args : array-likes
161
162 Returns
163 -------
164 tuple of int
165 Joint shape.
166 """
167 if len(args) == 0:
168 return ()
169 shape = []
170 shapes = [a.shape for a in args]
171 ndim = args[0].ndim
172 for i in range(ndim):
173 shape.append(max([s[i] for s in shapes]))
174 return tuple(shape)
175
176
177 def orthogonal(*args):
178 """Determine if a set of arrays are orthogonal.
179
180 Parameters
181 ----------
182 args : array-likes or array shapes
183
184 Returns
185 -------
186 bool
187 Array orthogonality condition.
188 """
189 for i, arg in enumerate(args):
190 if hasattr(arg, 'shape'):
191 args[i] = arg.shape
192 for s in zip(*args):
193 if np.product(s) != max(s):
194 return False
195 return True
196
197
198 def remove_nans_1D(*args):
199 """Remove nans in a set of 1D arrays.
200
201 Removes indicies in all arrays if any array is nan at that index.
202 All input arrays must have the same size.
203
204 Parameters
205 ----------
206 args : 1D arrays
207
208 Returns
209 -------
210 tuple
211 Tuple of 1D arrays in same order as given, with nan indicies removed.
212 """
213 # find all indicies to keep
214 bads = np.array([])
215 for arr in args:
216 bad = np.array(np.where(np.isnan(arr))).flatten()
217 bads = np.hstack((bad, bads))
218 if hasattr(args, 'shape') and len(args.shape) == 1:
219 goods = [i for i in np.arange(args.shape[0]) if i not in bads]
220 else:
221 goods = [i for i in np.arange(len(args[0])) if i not in bads]
222 # apply
223 return tuple(a[goods] for a in args)
224
225
226 def share_nans(*arrs):
227 """Take a list of nD arrays and return a new list of nD arrays.
228
229 The new list is in the same order as the old list.
230 If one indexed element in an old array is nan then every element for that
231 index in all new arrays in the list is then nan.
232
233 Parameters
234 ----------
235 *arrs : nD arrays.
236
237 Returns
238 -------
239 list
240 List of nD arrays in same order as given, with nan indicies syncronized.
241 """
242 nans = np.zeros((arrs[0].shape))
243 for arr in arrs:
244 nans *= arr
245 return tuple([a + nans for a in arrs])
246
247
248 def smooth_1D(arr, n=10):
249 """Smooth 1D data by 'running average'.
250
251 Parameters
252 ----------
253 n : int
254 number of points to average
255 """
256 for i in range(n, len(arr) - n):
257 window = arr[i - n:i + n].copy()
258 arr[i] = window.mean()
259 return arr
260
261
262 def unique(arr, tolerance=1e-6):
263 """Return unique elements in 1D array, within tolerance.
264
265 Parameters
266 ----------
267 arr : array_like
268 Input array. This will be flattened if it is not already 1D.
269 tolerance : number (optional)
270 The tolerance for uniqueness.
271
272 Returns
273 -------
274 array
275 The sorted unique values.
276 """
277 arr = sorted(arr.flatten())
278 unique = []
279 while len(arr) > 0:
280 current = arr[0]
281 lis = [xi for xi in arr if np.abs(current - xi) < tolerance]
282 arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]
283 xi_lis_average = sum(lis) / len(lis)
284 unique.append(xi_lis_average)
285 return np.array(unique)
286
287
288 def valid_index(index, shape):
289 """Get a valid index for a broadcastable shape.
290
291 Parameters
292 ----------
293 index : tuple
294 Given index.
295 shape : tuple of int
296 Shape.
297
298 Returns
299 -------
300 tuple
301 Valid index.
302 """
303 # append slices to index
304 index = list(index)
305 while len(index) < len(shape):
306 index.append(slice(None))
307 # fill out, in reverse
308 out = []
309 for i, s in zip(index[::-1], shape[::-1]):
310 if s == 1:
311 if isinstance(i, slice):
312 out.append(slice(None))
313 else:
314 out.append(0)
315 else:
316 out.append(i)
317 return tuple(out[::-1])
318
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py
--- a/WrightTools/kit/_array.py
+++ b/WrightTools/kit/_array.py
@@ -210,17 +210,10 @@
tuple
Tuple of 1D arrays in same order as given, with nan indicies removed.
"""
- # find all indicies to keep
- bads = np.array([])
- for arr in args:
- bad = np.array(np.where(np.isnan(arr))).flatten()
- bads = np.hstack((bad, bads))
- if hasattr(args, 'shape') and len(args.shape) == 1:
- goods = [i for i in np.arange(args.shape[0]) if i not in bads]
- else:
- goods = [i for i in np.arange(len(args[0])) if i not in bads]
- # apply
- return tuple(a[goods] for a in args)
+ vals = np.isnan(args[0])
+ for a in args:
+ vals |= np.isnan(a)
+ return tuple(np.array(a)[vals == False] for a in args)
def share_nans(*arrs):
|
{"golden_diff": "diff --git a/WrightTools/kit/_array.py b/WrightTools/kit/_array.py\n--- a/WrightTools/kit/_array.py\n+++ b/WrightTools/kit/_array.py\n@@ -210,17 +210,10 @@\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n- # find all indicies to keep\n- bads = np.array([])\n- for arr in args:\n- bad = np.array(np.where(np.isnan(arr))).flatten()\n- bads = np.hstack((bad, bads))\n- if hasattr(args, 'shape') and len(args.shape) == 1:\n- goods = [i for i in np.arange(args.shape[0]) if i not in bads]\n- else:\n- goods = [i for i in np.arange(len(args[0])) if i not in bads]\n- # apply\n- return tuple(a[goods] for a in args)\n+ vals = np.isnan(args[0])\n+ for a in args:\n+ vals |= np.isnan(a)\n+ return tuple(np.array(a)[vals == False] for a in args)\n \n \n def share_nans(*arrs):\n", "issue": "remove_nans_1D fails for list\n```\r\n>>> wt.kit.remove_nans_1D([np.nan, 1, 2, 2])\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n wt.kit.remove_nans_1D([np.nan, 1, 2, 2])\r\n File \"/home/kyle/wright/WrightTools/WrightTools/kit/_array.py\", line 223, in rem\r\nove_nans_1D\r\n return tuple(a[goods] for a in args)\r\n File \"/home/kyle/wright/WrightTools/WrightTools/kit/_array.py\", line 223, in <ge\r\nnexpr>\r\n return tuple(a[goods] for a in args)\r\nTypeError: list indices must be integers or slices, not list\r\n>>> wt.kit.remove_nans_1D(np.array([np.nan, 1, 2, 2]))\r\n(array([1., 2., 2.]),)\r\n```\n", "before_files": [{"content": "\"\"\"Array interaction tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = ['closest_pair',\n 'diff',\n 'fft',\n 'joint_shape',\n 'orthogonal',\n 'remove_nans_1D',\n 'share_nans',\n 'smooth_1D',\n 'unique',\n 'valid_index']\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef closest_pair(arr, give='indicies'):\n \"\"\"Find the pair of indices corresponding to the closest elements in an array.\n\n If multiple pairs are equally close, both pairs of indicies are returned.\n Optionally returns the closest distance itself.\n\n I am sure that this could be written as a cheaper operation. I\n wrote this as a quick and dirty method because I need it now to use on some\n relatively small arrays. Feel free to refactor if you need this operation\n done as fast as possible. - Blaise 2016-02-07\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array to search.\n give : {'indicies', 'distance'} (optional)\n Toggle return behavior. If 'distance', returns a single float - the\n closest distance itself. Default is indicies.\n\n Returns\n -------\n list of lists of two tuples\n List containing lists of two tuples: indicies the nearest pair in the\n array.\n\n >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])\n >>> closest_pair(arr)\n [[(1,), (8,)], [(3,), (4,)]]\n\n \"\"\"\n idxs = [idx for idx in np.ndindex(arr.shape)]\n outs = []\n min_dist = arr.max() - arr.min()\n for idxa in idxs:\n for idxb in idxs:\n if idxa == idxb:\n continue\n dist = abs(arr[idxa] - arr[idxb])\n if dist == min_dist:\n if not [idxb, idxa] in outs:\n outs.append([idxa, idxb])\n elif dist < min_dist:\n min_dist = dist\n outs = [[idxa, idxb]]\n if give == 'indicies':\n return outs\n elif give == 'distance':\n return min_dist\n else:\n raise KeyError('give not recognized in closest_pair')\n\n\ndef diff(xi, yi, order=1):\n \"\"\"Take the numerical derivative of a 1D array.\n\n Output is mapped onto the original coordinates using linear interpolation.\n Expects monotonic xi values.\n\n Parameters\n ----------\n xi : 1D array-like\n Coordinates.\n yi : 1D array-like\n Values.\n order : positive integer (optional)\n Order of differentiation.\n\n Returns\n -------\n 1D numpy array\n Numerical derivative. Has the same shape as the input arrays.\n \"\"\"\n yi = np.array(yi).copy()\n flip = False\n if xi[-1] < xi[0]:\n xi = np.flipud(xi.copy())\n yi = np.flipud(yi)\n flip = True\n midpoints = (xi[1:] + xi[:-1]) / 2\n for _ in range(order):\n d = np.diff(yi)\n d /= np.diff(xi)\n yi = np.interp(xi, midpoints, d)\n if flip:\n yi = np.flipud(yi)\n return yi\n\n\ndef fft(xi, yi, axis=0):\n \"\"\"Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi).\n \"\"\"\n # xi must be 1D\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n # xi must be evenly spaced\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError('WrightTools.kit.fft: argument xi must be evenly spaced')\n # fft\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n # shift\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi\n\n\ndef joint_shape(*args):\n \"\"\"Given a set of arrays, return the joint shape.\n\n Parameters\n ----------\n args : array-likes\n\n Returns\n -------\n tuple of int\n Joint shape.\n \"\"\"\n if len(args) == 0:\n return ()\n shape = []\n shapes = [a.shape for a in args]\n ndim = args[0].ndim\n for i in range(ndim):\n shape.append(max([s[i] for s in shapes]))\n return tuple(shape)\n\n\ndef orthogonal(*args):\n \"\"\"Determine if a set of arrays are orthogonal.\n\n Parameters\n ----------\n args : array-likes or array shapes\n\n Returns\n -------\n bool\n Array orthogonality condition.\n \"\"\"\n for i, arg in enumerate(args):\n if hasattr(arg, 'shape'):\n args[i] = arg.shape\n for s in zip(*args):\n if np.product(s) != max(s):\n return False\n return True\n\n\ndef remove_nans_1D(*args):\n \"\"\"Remove nans in a set of 1D arrays.\n\n Removes indicies in all arrays if any array is nan at that index.\n All input arrays must have the same size.\n\n Parameters\n ----------\n args : 1D arrays\n\n Returns\n -------\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n # find all indicies to keep\n bads = np.array([])\n for arr in args:\n bad = np.array(np.where(np.isnan(arr))).flatten()\n bads = np.hstack((bad, bads))\n if hasattr(args, 'shape') and len(args.shape) == 1:\n goods = [i for i in np.arange(args.shape[0]) if i not in bads]\n else:\n goods = [i for i in np.arange(len(args[0])) if i not in bads]\n # apply\n return tuple(a[goods] for a in args)\n\n\ndef share_nans(*arrs):\n \"\"\"Take a list of nD arrays and return a new list of nD arrays.\n\n The new list is in the same order as the old list.\n If one indexed element in an old array is nan then every element for that\n index in all new arrays in the list is then nan.\n\n Parameters\n ----------\n *arrs : nD arrays.\n\n Returns\n -------\n list\n List of nD arrays in same order as given, with nan indicies syncronized.\n \"\"\"\n nans = np.zeros((arrs[0].shape))\n for arr in arrs:\n nans *= arr\n return tuple([a + nans for a in arrs])\n\n\ndef smooth_1D(arr, n=10):\n \"\"\"Smooth 1D data by 'running average'.\n\n Parameters\n ----------\n n : int\n number of points to average\n \"\"\"\n for i in range(n, len(arr) - n):\n window = arr[i - n:i + n].copy()\n arr[i] = window.mean()\n return arr\n\n\ndef unique(arr, tolerance=1e-6):\n \"\"\"Return unique elements in 1D array, within tolerance.\n\n Parameters\n ----------\n arr : array_like\n Input array. This will be flattened if it is not already 1D.\n tolerance : number (optional)\n The tolerance for uniqueness.\n\n Returns\n -------\n array\n The sorted unique values.\n \"\"\"\n arr = sorted(arr.flatten())\n unique = []\n while len(arr) > 0:\n current = arr[0]\n lis = [xi for xi in arr if np.abs(current - xi) < tolerance]\n arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]\n xi_lis_average = sum(lis) / len(lis)\n unique.append(xi_lis_average)\n return np.array(unique)\n\n\ndef valid_index(index, shape):\n \"\"\"Get a valid index for a broadcastable shape.\n\n Parameters\n ----------\n index : tuple\n Given index.\n shape : tuple of int\n Shape.\n\n Returns\n -------\n tuple\n Valid index.\n \"\"\"\n # append slices to index\n index = list(index)\n while len(index) < len(shape):\n index.append(slice(None))\n # fill out, in reverse\n out = []\n for i, s in zip(index[::-1], shape[::-1]):\n if s == 1:\n if isinstance(i, slice):\n out.append(slice(None))\n else:\n out.append(0)\n else:\n out.append(i)\n return tuple(out[::-1])\n", "path": "WrightTools/kit/_array.py"}], "after_files": [{"content": "\"\"\"Array interaction tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = ['closest_pair',\n 'diff',\n 'fft',\n 'joint_shape',\n 'orthogonal',\n 'remove_nans_1D',\n 'share_nans',\n 'smooth_1D',\n 'unique',\n 'valid_index']\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef closest_pair(arr, give='indicies'):\n \"\"\"Find the pair of indices corresponding to the closest elements in an array.\n\n If multiple pairs are equally close, both pairs of indicies are returned.\n Optionally returns the closest distance itself.\n\n I am sure that this could be written as a cheaper operation. I\n wrote this as a quick and dirty method because I need it now to use on some\n relatively small arrays. Feel free to refactor if you need this operation\n done as fast as possible. - Blaise 2016-02-07\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array to search.\n give : {'indicies', 'distance'} (optional)\n Toggle return behavior. If 'distance', returns a single float - the\n closest distance itself. Default is indicies.\n\n Returns\n -------\n list of lists of two tuples\n List containing lists of two tuples: indicies the nearest pair in the\n array.\n\n >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1])\n >>> closest_pair(arr)\n [[(1,), (8,)], [(3,), (4,)]]\n\n \"\"\"\n idxs = [idx for idx in np.ndindex(arr.shape)]\n outs = []\n min_dist = arr.max() - arr.min()\n for idxa in idxs:\n for idxb in idxs:\n if idxa == idxb:\n continue\n dist = abs(arr[idxa] - arr[idxb])\n if dist == min_dist:\n if not [idxb, idxa] in outs:\n outs.append([idxa, idxb])\n elif dist < min_dist:\n min_dist = dist\n outs = [[idxa, idxb]]\n if give == 'indicies':\n return outs\n elif give == 'distance':\n return min_dist\n else:\n raise KeyError('give not recognized in closest_pair')\n\n\ndef diff(xi, yi, order=1):\n \"\"\"Take the numerical derivative of a 1D array.\n\n Output is mapped onto the original coordinates using linear interpolation.\n Expects monotonic xi values.\n\n Parameters\n ----------\n xi : 1D array-like\n Coordinates.\n yi : 1D array-like\n Values.\n order : positive integer (optional)\n Order of differentiation.\n\n Returns\n -------\n 1D numpy array\n Numerical derivative. Has the same shape as the input arrays.\n \"\"\"\n yi = np.array(yi).copy()\n flip = False\n if xi[-1] < xi[0]:\n xi = np.flipud(xi.copy())\n yi = np.flipud(yi)\n flip = True\n midpoints = (xi[1:] + xi[:-1]) / 2\n for _ in range(order):\n d = np.diff(yi)\n d /= np.diff(xi)\n yi = np.interp(xi, midpoints, d)\n if flip:\n yi = np.flipud(yi)\n return yi\n\n\ndef fft(xi, yi, axis=0):\n \"\"\"Take the 1D FFT of an N-dimensional array and return \"sensible\" properly shifted arrays.\n\n Parameters\n ----------\n xi : numpy.ndarray\n 1D array over which the points to be FFT'ed are defined\n yi : numpy.ndarray\n ND array with values to FFT\n axis : int\n axis of yi to perform FFT over\n\n Returns\n -------\n xi : 1D numpy.ndarray\n 1D array. Conjugate to input xi. Example: if input xi is in the time\n domain, output xi is in frequency domain.\n yi : ND numpy.ndarray\n FFT. Has the same shape as the input array (yi).\n \"\"\"\n # xi must be 1D\n if xi.ndim != 1:\n raise wt_exceptions.DimensionalityError(1, xi.ndim)\n # xi must be evenly spaced\n spacing = np.diff(xi)\n if not np.allclose(spacing, spacing.mean()):\n raise RuntimeError('WrightTools.kit.fft: argument xi must be evenly spaced')\n # fft\n yi = np.fft.fft(yi, axis=axis)\n d = (xi.max() - xi.min()) / (xi.size - 1)\n xi = np.fft.fftfreq(xi.size, d=d)\n # shift\n xi = np.fft.fftshift(xi)\n yi = np.fft.fftshift(yi, axes=axis)\n return xi, yi\n\n\ndef joint_shape(*args):\n \"\"\"Given a set of arrays, return the joint shape.\n\n Parameters\n ----------\n args : array-likes\n\n Returns\n -------\n tuple of int\n Joint shape.\n \"\"\"\n if len(args) == 0:\n return ()\n shape = []\n shapes = [a.shape for a in args]\n ndim = args[0].ndim\n for i in range(ndim):\n shape.append(max([s[i] for s in shapes]))\n return tuple(shape)\n\n\ndef orthogonal(*args):\n \"\"\"Determine if a set of arrays are orthogonal.\n\n Parameters\n ----------\n args : array-likes or array shapes\n\n Returns\n -------\n bool\n Array orthogonality condition.\n \"\"\"\n for i, arg in enumerate(args):\n if hasattr(arg, 'shape'):\n args[i] = arg.shape\n for s in zip(*args):\n if np.product(s) != max(s):\n return False\n return True\n\n\ndef remove_nans_1D(*args):\n \"\"\"Remove nans in a set of 1D arrays.\n\n Removes indicies in all arrays if any array is nan at that index.\n All input arrays must have the same size.\n\n Parameters\n ----------\n args : 1D arrays\n\n Returns\n -------\n tuple\n Tuple of 1D arrays in same order as given, with nan indicies removed.\n \"\"\"\n vals = np.isnan(args[0])\n for a in args:\n vals |= np.isnan(a)\n return tuple(np.array(a)[vals == False] for a in args)\n\n\ndef share_nans(*arrs):\n \"\"\"Take a list of nD arrays and return a new list of nD arrays.\n\n The new list is in the same order as the old list.\n If one indexed element in an old array is nan then every element for that\n index in all new arrays in the list is then nan.\n\n Parameters\n ----------\n *arrs : nD arrays.\n\n Returns\n -------\n list\n List of nD arrays in same order as given, with nan indicies syncronized.\n \"\"\"\n nans = np.zeros((arrs[0].shape))\n for arr in arrs:\n nans *= arr\n return tuple([a + nans for a in arrs])\n\n\ndef smooth_1D(arr, n=10):\n \"\"\"Smooth 1D data by 'running average'.\n\n Parameters\n ----------\n n : int\n number of points to average\n \"\"\"\n for i in range(n, len(arr) - n):\n window = arr[i - n:i + n].copy()\n arr[i] = window.mean()\n return arr\n\n\ndef unique(arr, tolerance=1e-6):\n \"\"\"Return unique elements in 1D array, within tolerance.\n\n Parameters\n ----------\n arr : array_like\n Input array. This will be flattened if it is not already 1D.\n tolerance : number (optional)\n The tolerance for uniqueness.\n\n Returns\n -------\n array\n The sorted unique values.\n \"\"\"\n arr = sorted(arr.flatten())\n unique = []\n while len(arr) > 0:\n current = arr[0]\n lis = [xi for xi in arr if np.abs(current - xi) < tolerance]\n arr = [xi for xi in arr if not np.abs(lis[0] - xi) < tolerance]\n xi_lis_average = sum(lis) / len(lis)\n unique.append(xi_lis_average)\n return np.array(unique)\n\n\ndef valid_index(index, shape):\n \"\"\"Get a valid index for a broadcastable shape.\n\n Parameters\n ----------\n index : tuple\n Given index.\n shape : tuple of int\n Shape.\n\n Returns\n -------\n tuple\n Valid index.\n \"\"\"\n # append slices to index\n index = list(index)\n while len(index) < len(shape):\n index.append(slice(None))\n # fill out, in reverse\n out = []\n for i, s in zip(index[::-1], shape[::-1]):\n if s == 1:\n if isinstance(i, slice):\n out.append(slice(None))\n else:\n out.append(0)\n else:\n out.append(i)\n return tuple(out[::-1])\n", "path": "WrightTools/kit/_array.py"}]}
| 3,463 | 273 |
gh_patches_debug_16018
|
rasdani/github-patches
|
git_diff
|
fonttools__fonttools-1839
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing a TTFont from XML fails when LC_TIME is set
Importing a font from XML while LC_TIME locale is set to non-English, causes an error.
### How to reproduce?
This might be easy when a non-English locale is available in the system. I came across this, while using a package on top. The corresponding issue in their package is amueller/word_cloud#530. There is a script to reproduce, which only throws an error, when a non-English locale like 'de_DE' is set with e. g. `locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')` or just by opening Spyder-IDE.
**A simplified test is:**
```python
import locale
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8') # works if de_DE is available
from fontTools.misc.timeTools import timestampFromString,timestampToString,timestampNow
ts_now = timestampNow()
str_now = timestampToString(ts_now)
timestampFromString(str_now) # ValueError
```
Let's go into the cause of the error.
### Basics
The locale for LC_TIME can be checked with
```python
import locale
print(locale.getlocale(locale.LC_TIME))
```
This outputs `('de_DE', 'UTF-8')` in my case.
With this locale the following fails:
```python
import time
time.strptime('Mon', '%a')
# ValueError: unconverted data remains: n
```
`'Mo'` is the localized abbreviation in de_DE for Monday.
### TTFont
The method [`importXML`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ttLib/ttFont.py#L318) in `TTFont` receives the font object as XML. This can contain created and modified dates. The XML is parsed by the `XMLReader`, which somehow uses the [`fromXML`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ttLib/tables/_h_e_a_d.py#L107) method in `table__h_e_a_d`. There the created and modified dates are parsed using [`timestampFromString`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/misc/timeTools.py#L46) from timeTools. This helper function uses `time.strptime(value)`.
In my test case `value` is initialized from the 'created' attribute of a font as `'Mon Jan 8 12:28:04 2007'`, which throws the following error:
```
ValueError: time data 'Mon Jan 8 12:28:04 2007' does not match format '%a %b %d %H:%M:%S %Y'
```
### How to resolve?
I think the parsing should be done without locale, since the XML attribute is likely to be non-local. In the opposite function [`timestampToString`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/misc/timeTools.py#L43) `asctime` is used, which uses a fixed list of abbreviated week days and months. So that is not localized. Hence [`timestampFromString`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/misc/timeTools.py#L46) shouldn't be localized as well.
A simple solution could be
```python
def timestampFromString(value):
import locale
l = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, 'C')
try:
t = time.strptime(value)
finally:
locale.setlocale(locale.LC_TIME, l)
return calendar.timegm(t) - epoch_diff
```
However, changing the locale is not recommended. It's better to use a function that can parse a date with specified locale without changing it. You could use [dateparser](https://dateparser.readthedocs.io/en/latest/) for example, but I don't know about your dependencies and how you handle it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Lib/fontTools/misc/timeTools.py`
Content:
```
1 """fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.
2 """
3
4 from fontTools.misc.py23 import *
5 import os
6 import time
7 import calendar
8
9
10 epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
11
12 DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
13 MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
14 "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
15
16
17 def asctime(t=None):
18 """
19 Convert a tuple or struct_time representing a time as returned by gmtime()
20 or localtime() to a 24-character string of the following form:
21
22 >>> asctime(time.gmtime(0))
23 'Thu Jan 1 00:00:00 1970'
24
25 If t is not provided, the current time as returned by localtime() is used.
26 Locale information is not used by asctime().
27
28 This is meant to normalise the output of the built-in time.asctime() across
29 different platforms and Python versions.
30 In Python 3.x, the day of the month is right-justified, whereas on Windows
31 Python 2.7 it is padded with zeros.
32
33 See https://github.com/fonttools/fonttools/issues/455
34 """
35 if t is None:
36 t = time.localtime()
37 s = "%s %s %2s %s" % (
38 DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,
39 time.strftime("%H:%M:%S %Y", t))
40 return s
41
42
43 def timestampToString(value):
44 return asctime(time.gmtime(max(0, value + epoch_diff)))
45
46 def timestampFromString(value):
47 return calendar.timegm(time.strptime(value)) - epoch_diff
48
49 def timestampNow():
50 # https://reproducible-builds.org/specs/source-date-epoch/
51 source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
52 if source_date_epoch is not None:
53 return int(source_date_epoch) - epoch_diff
54 return int(time.time() - epoch_diff)
55
56 def timestampSinceEpoch(value):
57 return int(value - epoch_diff)
58
59
60 if __name__ == "__main__":
61 import sys
62 import doctest
63 sys.exit(doctest.testmod().failed)
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Lib/fontTools/misc/timeTools.py b/Lib/fontTools/misc/timeTools.py
--- a/Lib/fontTools/misc/timeTools.py
+++ b/Lib/fontTools/misc/timeTools.py
@@ -4,6 +4,7 @@
from fontTools.misc.py23 import *
import os
import time
+from datetime import datetime, timezone
import calendar
@@ -44,7 +45,12 @@
return asctime(time.gmtime(max(0, value + epoch_diff)))
def timestampFromString(value):
- return calendar.timegm(time.strptime(value)) - epoch_diff
+ wkday, mnth = value[:7].split()
+ t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')
+ t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
+ wkday_idx = DAYNAMES.index(wkday)
+ assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
+ return int(t.timestamp()) - epoch_diff
def timestampNow():
# https://reproducible-builds.org/specs/source-date-epoch/
|
{"golden_diff": "diff --git a/Lib/fontTools/misc/timeTools.py b/Lib/fontTools/misc/timeTools.py\n--- a/Lib/fontTools/misc/timeTools.py\n+++ b/Lib/fontTools/misc/timeTools.py\n@@ -4,6 +4,7 @@\n from fontTools.misc.py23 import *\n import os\n import time\n+from datetime import datetime, timezone\n import calendar\n \n \n@@ -44,7 +45,12 @@\n \treturn asctime(time.gmtime(max(0, value + epoch_diff)))\n \n def timestampFromString(value):\n-\treturn calendar.timegm(time.strptime(value)) - epoch_diff\n+\twkday, mnth = value[:7].split()\n+\tt = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')\n+\tt = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)\n+\twkday_idx = DAYNAMES.index(wkday)\n+\tassert t.weekday() == wkday_idx, '\"' + value + '\" has inconsistent weekday'\n+\treturn int(t.timestamp()) - epoch_diff\n \n def timestampNow():\n \t# https://reproducible-builds.org/specs/source-date-epoch/\n", "issue": "Importing a TTFont from XML fails when LC_TIME is set\nImporting a font from XML while LC_TIME locale is set to non-English, causes an error.\r\n\r\n### How to reproduce?\r\n\r\nThis might be easy when a non-English locale is available in the system. I came across this, while using a package on top. The corresponding issue in their package is amueller/word_cloud#530. There is a script to reproduce, which only throws an error, when a non-English locale like 'de_DE' is set with e. g. `locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')` or just by opening Spyder-IDE.\r\n\r\n**A simplified test is:**\r\n```python\r\nimport locale\r\nlocale.setlocale(locale.LC_TIME, 'de_DE.UTF-8') # works if de_DE is available\r\n\r\nfrom fontTools.misc.timeTools import timestampFromString,timestampToString,timestampNow\r\nts_now = timestampNow()\r\nstr_now = timestampToString(ts_now)\r\ntimestampFromString(str_now) # ValueError\r\n```\r\n\r\nLet's go into the cause of the error.\r\n\r\n### Basics\r\n\r\nThe locale for LC_TIME can be checked with\r\n```python\r\nimport locale\r\nprint(locale.getlocale(locale.LC_TIME))\r\n```\r\nThis outputs `('de_DE', 'UTF-8')` in my case.\r\n\r\nWith this locale the following fails:\r\n```python\r\nimport time\r\ntime.strptime('Mon', '%a')\r\n# ValueError: unconverted data remains: n\r\n```\r\n`'Mo'` is the localized abbreviation in de_DE for Monday.\r\n\r\n### TTFont\r\n\r\nThe method [`importXML`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ttLib/ttFont.py#L318) in `TTFont` receives the font object as XML. This can contain created and modified dates. The XML is parsed by the `XMLReader`, which somehow uses the [`fromXML`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/ttLib/tables/_h_e_a_d.py#L107) method in `table__h_e_a_d`. There the created and modified dates are parsed using [`timestampFromString`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/misc/timeTools.py#L46) from timeTools. This helper function uses `time.strptime(value)`.\r\n\r\nIn my test case `value` is initialized from the 'created' attribute of a font as `'Mon Jan 8 12:28:04 2007'`, which throws the following error:\r\n```\r\nValueError: time data 'Mon Jan 8 12:28:04 2007' does not match format '%a %b %d %H:%M:%S %Y'\r\n```\r\n\r\n\r\n### How to resolve?\r\n\r\nI think the parsing should be done without locale, since the XML attribute is likely to be non-local. In the opposite function [`timestampToString`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/misc/timeTools.py#L43) `asctime` is used, which uses a fixed list of abbreviated week days and months. So that is not localized. Hence [`timestampFromString`](https://github.com/fonttools/fonttools/blob/master/Lib/fontTools/misc/timeTools.py#L46) shouldn't be localized as well.\r\n\r\nA simple solution could be\r\n```python\r\ndef timestampFromString(value):\r\n\timport locale\r\n\tl = locale.getlocale(locale.LC_TIME)\r\n\tlocale.setlocale(locale.LC_TIME, 'C')\r\n\ttry:\r\n\t\tt = time.strptime(value)\r\n\tfinally:\r\n\t\tlocale.setlocale(locale.LC_TIME, l)\r\n\treturn calendar.timegm(t) - epoch_diff\r\n```\r\n\r\nHowever, changing the locale is not recommended. It's better to use a function that can parse a date with specified locale without changing it. You could use [dateparser](https://dateparser.readthedocs.io/en/latest/) for example, but I don't know about your dependencies and how you handle it.\n", "before_files": [{"content": "\"\"\"fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.\n\"\"\"\n\nfrom fontTools.misc.py23 import *\nimport os\nimport time\nimport calendar\n\n\nepoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))\n\nDAYNAMES = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\nMONTHNAMES = [None, \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\",\n\t\t\t \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\n\ndef asctime(t=None):\n\t\"\"\"\n\tConvert a tuple or struct_time representing a time as returned by gmtime()\n\tor localtime() to a 24-character string of the following form:\n\n\t>>> asctime(time.gmtime(0))\n\t'Thu Jan 1 00:00:00 1970'\n\n\tIf t is not provided, the current time as returned by localtime() is used.\n\tLocale information is not used by asctime().\n\n\tThis is meant to normalise the output of the built-in time.asctime() across\n\tdifferent platforms and Python versions.\n\tIn Python 3.x, the day of the month is right-justified, whereas on Windows\n\tPython 2.7 it is padded with zeros.\n\n\tSee https://github.com/fonttools/fonttools/issues/455\n\t\"\"\"\n\tif t is None:\n\t\tt = time.localtime()\n\ts = \"%s %s %2s %s\" % (\n\t\tDAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,\n\t\ttime.strftime(\"%H:%M:%S %Y\", t))\n\treturn s\n\n\ndef timestampToString(value):\n\treturn asctime(time.gmtime(max(0, value + epoch_diff)))\n\ndef timestampFromString(value):\n\treturn calendar.timegm(time.strptime(value)) - epoch_diff\n\ndef timestampNow():\n\t# https://reproducible-builds.org/specs/source-date-epoch/\n\tsource_date_epoch = os.environ.get(\"SOURCE_DATE_EPOCH\")\n\tif source_date_epoch is not None:\n\t\treturn int(source_date_epoch) - epoch_diff\n\treturn int(time.time() - epoch_diff)\n\ndef timestampSinceEpoch(value):\n\treturn int(value - epoch_diff)\n\n\nif __name__ == \"__main__\":\n\timport sys\n\timport doctest\n\tsys.exit(doctest.testmod().failed)\n", "path": "Lib/fontTools/misc/timeTools.py"}], "after_files": [{"content": "\"\"\"fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.\n\"\"\"\n\nfrom fontTools.misc.py23 import *\nimport os\nimport time\nfrom datetime import datetime, timezone\nimport calendar\n\n\nepoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))\n\nDAYNAMES = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\nMONTHNAMES = [None, \"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\",\n\t\t\t \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n\n\ndef asctime(t=None):\n\t\"\"\"\n\tConvert a tuple or struct_time representing a time as returned by gmtime()\n\tor localtime() to a 24-character string of the following form:\n\n\t>>> asctime(time.gmtime(0))\n\t'Thu Jan 1 00:00:00 1970'\n\n\tIf t is not provided, the current time as returned by localtime() is used.\n\tLocale information is not used by asctime().\n\n\tThis is meant to normalise the output of the built-in time.asctime() across\n\tdifferent platforms and Python versions.\n\tIn Python 3.x, the day of the month is right-justified, whereas on Windows\n\tPython 2.7 it is padded with zeros.\n\n\tSee https://github.com/fonttools/fonttools/issues/455\n\t\"\"\"\n\tif t is None:\n\t\tt = time.localtime()\n\ts = \"%s %s %2s %s\" % (\n\t\tDAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,\n\t\ttime.strftime(\"%H:%M:%S %Y\", t))\n\treturn s\n\n\ndef timestampToString(value):\n\treturn asctime(time.gmtime(max(0, value + epoch_diff)))\n\ndef timestampFromString(value):\n\twkday, mnth = value[:7].split()\n\tt = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')\n\tt = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)\n\twkday_idx = DAYNAMES.index(wkday)\n\tassert t.weekday() == wkday_idx, '\"' + value + '\" has inconsistent weekday'\n\treturn int(t.timestamp()) - epoch_diff\n\ndef timestampNow():\n\t# https://reproducible-builds.org/specs/source-date-epoch/\n\tsource_date_epoch = os.environ.get(\"SOURCE_DATE_EPOCH\")\n\tif source_date_epoch is not None:\n\t\treturn int(source_date_epoch) - epoch_diff\n\treturn int(time.time() - epoch_diff)\n\ndef timestampSinceEpoch(value):\n\treturn int(value - epoch_diff)\n\n\nif __name__ == \"__main__\":\n\timport sys\n\timport doctest\n\tsys.exit(doctest.testmod().failed)\n", "path": "Lib/fontTools/misc/timeTools.py"}]}
| 1,779 | 250 |
gh_patches_debug_3077
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-1168
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expected XGBoost model file "model.bst" extension is undocumented?
On https://github.com/SeldonIO/MLServer/blob/master/runtimes/xgboost/mlserver_xgboost/xgboost.py#L21 you can see that MLServer is looking for an XGBoost model file called "model.bst". However, I cannot find any reference to that file extension in the XGBoost documentation. As far as I can see, XGBoost's documented file extensions are:
- ".json" added in 1.0.0, an "open format that can be easily reused"
- ".ubj" for Universal Binary JSON format, available in 1.6.0
- ".model" for the "old binary internal format" prior to 1.0.0, as shown in examples
Where does MLServer get the ".bst" extension from, and what model format does it use? Shouldn't it use one of the extensions mentioned in the XGBoost documentation instead, to avoid ambiguity?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `runtimes/xgboost/mlserver_xgboost/xgboost.py`
Content:
```
1 import xgboost as xgb
2
3 from typing import List
4 from xgboost.sklearn import XGBModel
5
6 from mlserver.errors import InferenceError
7 from mlserver.model import MLModel
8 from mlserver.utils import get_model_uri
9 from mlserver.codecs import NumpyRequestCodec, NumpyCodec
10 from mlserver.types import (
11 InferenceRequest,
12 InferenceResponse,
13 RequestOutput,
14 ResponseOutput,
15 )
16
17 PREDICT_OUTPUT = "predict"
18 PREDICT_PROBA_OUTPUT = "predict_proba"
19 VALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]
20
21 WELLKNOWN_MODEL_FILENAMES = ["model.bst", "model.json"]
22
23
24 def _load_sklearn_interface(model_uri: str) -> XGBModel:
25 try:
26 regressor = xgb.XGBRegressor()
27 regressor.load_model(model_uri)
28 return regressor
29 except TypeError:
30 # If there was an error, it's likely due to the model being a
31 # classifier
32 classifier = xgb.XGBClassifier()
33 classifier.load_model(model_uri)
34 return classifier
35
36
37 class XGBoostModel(MLModel):
38 """
39 Implementationof the MLModel interface to load and serve `xgboost` models.
40 """
41
42 async def load(self) -> bool:
43 model_uri = await get_model_uri(
44 self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES
45 )
46
47 self._model = _load_sklearn_interface(model_uri)
48
49 return True
50
51 def _check_request(self, payload: InferenceRequest) -> InferenceRequest:
52 if not payload.outputs:
53 # By default, only return the result of `predict()`
54 payload.outputs = [RequestOutput(name=PREDICT_OUTPUT)]
55 else:
56 for request_output in payload.outputs:
57 if request_output.name not in VALID_OUTPUTS:
58 raise InferenceError(
59 f"XGBoostModel only supports '{PREDICT_OUTPUT}' and "
60 f"'{PREDICT_PROBA_OUTPUT}' as outputs "
61 f"({request_output.name} was received)"
62 )
63
64 # Regression models do not support `predict_proba`
65 if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]:
66 if isinstance(self._model, xgb.XGBRegressor):
67 raise InferenceError(
68 f"XGBRegressor models do not support '{PREDICT_PROBA_OUTPUT}"
69 )
70
71 return payload
72
73 def _get_model_outputs(self, payload: InferenceRequest) -> List[ResponseOutput]:
74 decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec)
75
76 outputs = []
77 for request_output in payload.outputs: # type: ignore
78 predict_fn = getattr(self._model, request_output.name)
79 y = predict_fn(decoded_request)
80
81 output = self.encode(y, request_output, default_codec=NumpyCodec)
82 outputs.append(output)
83
84 return outputs
85
86 async def predict(self, payload: InferenceRequest) -> InferenceResponse:
87 payload = self._check_request(payload)
88 outputs = self._get_model_outputs(payload)
89
90 return InferenceResponse(
91 model_name=self.name,
92 model_version=self.version,
93 outputs=outputs,
94 )
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/runtimes/xgboost/mlserver_xgboost/xgboost.py b/runtimes/xgboost/mlserver_xgboost/xgboost.py
--- a/runtimes/xgboost/mlserver_xgboost/xgboost.py
+++ b/runtimes/xgboost/mlserver_xgboost/xgboost.py
@@ -18,7 +18,7 @@
PREDICT_PROBA_OUTPUT = "predict_proba"
VALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]
-WELLKNOWN_MODEL_FILENAMES = ["model.bst", "model.json"]
+WELLKNOWN_MODEL_FILENAMES = ["model.bst", "model.json", "model.ubj"]
def _load_sklearn_interface(model_uri: str) -> XGBModel:
|
{"golden_diff": "diff --git a/runtimes/xgboost/mlserver_xgboost/xgboost.py b/runtimes/xgboost/mlserver_xgboost/xgboost.py\n--- a/runtimes/xgboost/mlserver_xgboost/xgboost.py\n+++ b/runtimes/xgboost/mlserver_xgboost/xgboost.py\n@@ -18,7 +18,7 @@\n PREDICT_PROBA_OUTPUT = \"predict_proba\"\n VALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]\n \n-WELLKNOWN_MODEL_FILENAMES = [\"model.bst\", \"model.json\"]\n+WELLKNOWN_MODEL_FILENAMES = [\"model.bst\", \"model.json\", \"model.ubj\"]\n \n \n def _load_sklearn_interface(model_uri: str) -> XGBModel:\n", "issue": "Expected XGBoost model file \"model.bst\" extension is undocumented? \nOn https://github.com/SeldonIO/MLServer/blob/master/runtimes/xgboost/mlserver_xgboost/xgboost.py#L21 you can see that MLServer is looking for an XGBoost model file called \"model.bst\". However, I cannot find any reference to that file extension in the XGBoost documentation. As far as I can see, XGBoost's documented file extensions are:\r\n\r\n- \".json\" added in 1.0.0, an \"open format that can be easily reused\"\r\n- \".ubj\" for Universal Binary JSON format, available in 1.6.0\r\n- \".model\" for the \"old binary internal format\" prior to 1.0.0, as shown in examples\r\n\r\nWhere does MLServer get the \".bst\" extension from, and what model format does it use? Shouldn't it use one of the extensions mentioned in the XGBoost documentation instead, to avoid ambiguity?\n", "before_files": [{"content": "import xgboost as xgb\n\nfrom typing import List\nfrom xgboost.sklearn import XGBModel\n\nfrom mlserver.errors import InferenceError\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import NumpyRequestCodec, NumpyCodec\nfrom mlserver.types import (\n InferenceRequest,\n InferenceResponse,\n RequestOutput,\n ResponseOutput,\n)\n\nPREDICT_OUTPUT = \"predict\"\nPREDICT_PROBA_OUTPUT = \"predict_proba\"\nVALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]\n\nWELLKNOWN_MODEL_FILENAMES = [\"model.bst\", \"model.json\"]\n\n\ndef _load_sklearn_interface(model_uri: str) -> XGBModel:\n try:\n regressor = xgb.XGBRegressor()\n regressor.load_model(model_uri)\n return regressor\n except TypeError:\n # If there was an error, it's likely due to the model being a\n # classifier\n classifier = xgb.XGBClassifier()\n classifier.load_model(model_uri)\n return classifier\n\n\nclass XGBoostModel(MLModel):\n \"\"\"\n Implementationof the MLModel interface to load and serve `xgboost` models.\n \"\"\"\n\n async def load(self) -> bool:\n model_uri = await get_model_uri(\n self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES\n )\n\n self._model = _load_sklearn_interface(model_uri)\n\n return True\n\n def _check_request(self, payload: InferenceRequest) -> InferenceRequest:\n if not payload.outputs:\n # By default, only return the result of `predict()`\n payload.outputs = [RequestOutput(name=PREDICT_OUTPUT)]\n else:\n for request_output in payload.outputs:\n if request_output.name not in VALID_OUTPUTS:\n raise InferenceError(\n f\"XGBoostModel only supports '{PREDICT_OUTPUT}' and \"\n f\"'{PREDICT_PROBA_OUTPUT}' as outputs \"\n f\"({request_output.name} was received)\"\n )\n\n # Regression models do not support `predict_proba`\n if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]:\n if isinstance(self._model, xgb.XGBRegressor):\n raise InferenceError(\n f\"XGBRegressor models do not support '{PREDICT_PROBA_OUTPUT}\"\n )\n\n return payload\n\n def _get_model_outputs(self, payload: InferenceRequest) -> List[ResponseOutput]:\n decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec)\n\n outputs = []\n for request_output in payload.outputs: # type: ignore\n predict_fn = getattr(self._model, request_output.name)\n y = predict_fn(decoded_request)\n\n output = self.encode(y, request_output, default_codec=NumpyCodec)\n outputs.append(output)\n\n return outputs\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n payload = self._check_request(payload)\n outputs = self._get_model_outputs(payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=outputs,\n )\n", "path": "runtimes/xgboost/mlserver_xgboost/xgboost.py"}], "after_files": [{"content": "import xgboost as xgb\n\nfrom typing import List\nfrom xgboost.sklearn import XGBModel\n\nfrom mlserver.errors import InferenceError\nfrom mlserver.model import MLModel\nfrom mlserver.utils import get_model_uri\nfrom mlserver.codecs import NumpyRequestCodec, NumpyCodec\nfrom mlserver.types import (\n InferenceRequest,\n InferenceResponse,\n RequestOutput,\n ResponseOutput,\n)\n\nPREDICT_OUTPUT = \"predict\"\nPREDICT_PROBA_OUTPUT = \"predict_proba\"\nVALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]\n\nWELLKNOWN_MODEL_FILENAMES = [\"model.bst\", \"model.json\", \"model.ubj\"]\n\n\ndef _load_sklearn_interface(model_uri: str) -> XGBModel:\n try:\n regressor = xgb.XGBRegressor()\n regressor.load_model(model_uri)\n return regressor\n except TypeError:\n # If there was an error, it's likely due to the model being a\n # classifier\n classifier = xgb.XGBClassifier()\n classifier.load_model(model_uri)\n return classifier\n\n\nclass XGBoostModel(MLModel):\n \"\"\"\n Implementationof the MLModel interface to load and serve `xgboost` models.\n \"\"\"\n\n async def load(self) -> bool:\n model_uri = await get_model_uri(\n self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES\n )\n\n self._model = _load_sklearn_interface(model_uri)\n\n return True\n\n def _check_request(self, payload: InferenceRequest) -> InferenceRequest:\n if not payload.outputs:\n # By default, only return the result of `predict()`\n payload.outputs = [RequestOutput(name=PREDICT_OUTPUT)]\n else:\n for request_output in payload.outputs:\n if request_output.name not in VALID_OUTPUTS:\n raise InferenceError(\n f\"XGBoostModel only supports '{PREDICT_OUTPUT}' and \"\n f\"'{PREDICT_PROBA_OUTPUT}' as outputs \"\n f\"({request_output.name} was received)\"\n )\n\n # Regression models do not support `predict_proba`\n if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]:\n if isinstance(self._model, xgb.XGBRegressor):\n raise InferenceError(\n f\"XGBRegressor models do not support '{PREDICT_PROBA_OUTPUT}\"\n )\n\n return payload\n\n def _get_model_outputs(self, payload: InferenceRequest) -> List[ResponseOutput]:\n decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec)\n\n outputs = []\n for request_output in payload.outputs: # type: ignore\n predict_fn = getattr(self._model, request_output.name)\n y = predict_fn(decoded_request)\n\n output = self.encode(y, request_output, default_codec=NumpyCodec)\n outputs.append(output)\n\n return outputs\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n payload = self._check_request(payload)\n outputs = self._get_model_outputs(payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=outputs,\n )\n", "path": "runtimes/xgboost/mlserver_xgboost/xgboost.py"}]}
| 1,354 | 170 |
gh_patches_debug_63210
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-1635
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
numpy not imported in `manim.mobject.probability`
## Description of bug / unexpected behavior
<!-- Add a clear and concise description of the problem you encountered. -->
When you try to use `BarChart` it raises an error saying `np is not defined`
## Expected behavior
<!-- Add a clear and concise description of what you expected to happen. -->
To not get the error and show the bar chart.
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
class Barchart(Scene):
def construct(self):
ls = [12,12,13,15,19,20,21]
bg = BarChart(ls)
self.add(bg)
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
<details><summary>Images/GIFs</summary>
<!-- PASTE MEDIA HERE -->
</details>
## Logs
<details><summary>Terminal output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
<string> in <module>
<string> in construct(self)
/usr/local/lib/python3.7/dist-packages/manim/mobject/probability.py in add_axes(self, width, height)
197 x_axis = Line(self.tick_width * LEFT / 2, width * RIGHT)
198 y_axis = Line(MED_LARGE_BUFF * DOWN, height * UP)
--> 199 ticks = VGroup()
200 heights = np.linspace(0, height, self.n_ticks + 1)
201 values = np.linspace(0, self.max_value, self.n_ticks + 1)
NameError: name 'np' is not defined
```
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):
- RAM:
- Python version (`python/py/python3 --version`):
- Installed modules (provide output from `pip list`):
```
Google Colab
```
</details>
<details><summary>LaTeX details</summary>
+ LaTeX distribution (e.g. TeX Live 2020):
+ Installed LaTeX packages:
<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->
</details>
<details><summary>FFMPEG</summary>
Output of `ffmpeg -version`:
```
PASTE HERE
```
</details>
## Additional comments
<!-- Add further context that you think might be relevant for this issue here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manim/mobject/probability.py`
Content:
```
1 """Mobjects representing objects from probability theory and statistics."""
2
3 __all__ = ["SampleSpace", "BarChart"]
4
5
6 from ..constants import *
7 from ..mobject.geometry import Line, Rectangle
8 from ..mobject.mobject import Mobject
9 from ..mobject.opengl_mobject import OpenGLMobject
10 from ..mobject.svg.brace import Brace
11 from ..mobject.svg.tex_mobject import MathTex, Tex
12 from ..mobject.types.vectorized_mobject import VGroup
13 from ..utils.color import (
14 BLUE,
15 BLUE_E,
16 DARK_GREY,
17 GREEN_E,
18 LIGHT_GREY,
19 MAROON_B,
20 YELLOW,
21 color_gradient,
22 )
23 from ..utils.iterables import tuplify
24
25 EPSILON = 0.0001
26
27
28 class SampleSpace(Rectangle):
29 def __init__(
30 self,
31 height=3,
32 width=3,
33 fill_color=DARK_GREY,
34 fill_opacity=1,
35 stroke_width=0.5,
36 stroke_color=LIGHT_GREY,
37 default_label_scale_val=1,
38 ):
39 Rectangle.__init__(
40 self,
41 height=height,
42 width=width,
43 fill_color=fill_color,
44 fill_opacity=fill_opacity,
45 stroke_width=stroke_width,
46 stroke_color=stroke_color,
47 )
48 self.default_label_scale_val = default_label_scale_val
49
50 def add_title(self, title="Sample space", buff=MED_SMALL_BUFF):
51 # TODO, should this really exist in SampleSpaceScene
52 title_mob = Tex(title)
53 if title_mob.width > self.width:
54 title_mob.width = self.width
55 title_mob.next_to(self, UP, buff=buff)
56 self.title = title_mob
57 self.add(title_mob)
58
59 def add_label(self, label):
60 self.label = label
61
62 def complete_p_list(self, p_list):
63 new_p_list = list(tuplify(p_list))
64 remainder = 1.0 - sum(new_p_list)
65 if abs(remainder) > EPSILON:
66 new_p_list.append(remainder)
67 return new_p_list
68
69 def get_division_along_dimension(self, p_list, dim, colors, vect):
70 p_list = self.complete_p_list(p_list)
71 colors = color_gradient(colors, len(p_list))
72
73 last_point = self.get_edge_center(-vect)
74 parts = VGroup()
75 for factor, color in zip(p_list, colors):
76 part = SampleSpace()
77 part.set_fill(color, 1)
78 part.replace(self, stretch=True)
79 part.stretch(factor, dim)
80 part.move_to(last_point, -vect)
81 last_point = part.get_edge_center(vect)
82 parts.add(part)
83 return parts
84
85 def get_horizontal_division(self, p_list, colors=[GREEN_E, BLUE_E], vect=DOWN):
86 return self.get_division_along_dimension(p_list, 1, colors, vect)
87
88 def get_vertical_division(self, p_list, colors=[MAROON_B, YELLOW], vect=RIGHT):
89 return self.get_division_along_dimension(p_list, 0, colors, vect)
90
91 def divide_horizontally(self, *args, **kwargs):
92 self.horizontal_parts = self.get_horizontal_division(*args, **kwargs)
93 self.add(self.horizontal_parts)
94
95 def divide_vertically(self, *args, **kwargs):
96 self.vertical_parts = self.get_vertical_division(*args, **kwargs)
97 self.add(self.vertical_parts)
98
99 def get_subdivision_braces_and_labels(
100 self, parts, labels, direction, buff=SMALL_BUFF, min_num_quads=1
101 ):
102 label_mobs = VGroup()
103 braces = VGroup()
104 for label, part in zip(labels, parts):
105 brace = Brace(part, direction, min_num_quads=min_num_quads, buff=buff)
106 if isinstance(label, (Mobject, OpenGLMobject)):
107 label_mob = label
108 else:
109 label_mob = MathTex(label)
110 label_mob.scale(self.default_label_scale_val)
111 label_mob.next_to(brace, direction, buff)
112
113 braces.add(brace)
114 label_mobs.add(label_mob)
115 parts.braces = braces
116 parts.labels = label_mobs
117 parts.label_kwargs = {
118 "labels": label_mobs.copy(),
119 "direction": direction,
120 "buff": buff,
121 }
122 return VGroup(parts.braces, parts.labels)
123
124 def get_side_braces_and_labels(self, labels, direction=LEFT, **kwargs):
125 assert hasattr(self, "horizontal_parts")
126 parts = self.horizontal_parts
127 return self.get_subdivision_braces_and_labels(
128 parts, labels, direction, **kwargs
129 )
130
131 def get_top_braces_and_labels(self, labels, **kwargs):
132 assert hasattr(self, "vertical_parts")
133 parts = self.vertical_parts
134 return self.get_subdivision_braces_and_labels(parts, labels, UP, **kwargs)
135
136 def get_bottom_braces_and_labels(self, labels, **kwargs):
137 assert hasattr(self, "vertical_parts")
138 parts = self.vertical_parts
139 return self.get_subdivision_braces_and_labels(parts, labels, DOWN, **kwargs)
140
141 def add_braces_and_labels(self):
142 for attr in "horizontal_parts", "vertical_parts":
143 if not hasattr(self, attr):
144 continue
145 parts = getattr(self, attr)
146 for subattr in "braces", "labels":
147 if hasattr(parts, subattr):
148 self.add(getattr(parts, subattr))
149
150 def __getitem__(self, index):
151 if hasattr(self, "horizontal_parts"):
152 return self.horizontal_parts[index]
153 elif hasattr(self, "vertical_parts"):
154 return self.vertical_parts[index]
155 return self.split()[index]
156
157
158 class BarChart(VGroup):
159 def __init__(
160 self,
161 values,
162 height=4,
163 width=6,
164 n_ticks=4,
165 tick_width=0.2,
166 label_y_axis=True,
167 y_axis_label_height=0.25,
168 max_value=1,
169 bar_colors=[BLUE, YELLOW],
170 bar_fill_opacity=0.8,
171 bar_stroke_width=3,
172 bar_names=[],
173 bar_label_scale_val=0.75,
174 **kwargs
175 ):
176 VGroup.__init__(self, **kwargs)
177 self.n_ticks = n_ticks
178 self.tick_width = tick_width
179 self.label_y_axis = label_y_axis
180 self.y_axis_label_height = y_axis_label_height
181 self.max_value = max_value
182 self.bar_colors = bar_colors
183 self.bar_fill_opacity = bar_fill_opacity
184 self.bar_stroke_width = bar_stroke_width
185 self.bar_names = bar_names
186 self.bar_label_scale_val = bar_label_scale_val
187
188 if self.max_value is None:
189 self.max_value = max(values)
190
191 self.add_axes(width, height)
192 self.add_bars(values, width, height)
193 self.center()
194
195 def add_axes(self, width, height):
196 x_axis = Line(self.tick_width * LEFT / 2, width * RIGHT)
197 y_axis = Line(MED_LARGE_BUFF * DOWN, height * UP)
198 ticks = VGroup()
199 heights = np.linspace(0, height, self.n_ticks + 1)
200 values = np.linspace(0, self.max_value, self.n_ticks + 1)
201 for y, _value in zip(heights, values):
202 tick = Line(LEFT, RIGHT)
203 tick.width = self.tick_width
204 tick.move_to(y * UP)
205 ticks.add(tick)
206 y_axis.add(ticks)
207
208 self.add(x_axis, y_axis)
209 self.x_axis, self.y_axis = x_axis, y_axis
210
211 if self.label_y_axis:
212 labels = VGroup()
213 for tick, value in zip(ticks, values):
214 label = MathTex(str(np.round(value, 2)))
215 label.height = self.y_axis_label_height
216 label.next_to(tick, LEFT, SMALL_BUFF)
217 labels.add(label)
218 self.y_axis_labels = labels
219 self.add(labels)
220
221 def add_bars(self, values, width, height):
222 buff = float(width) / (2 * len(values) + 1)
223 bars = VGroup()
224 for i, value in enumerate(values):
225 bar = Rectangle(
226 height=(value / self.max_value) * height,
227 width=buff,
228 stroke_width=self.bar_stroke_width,
229 fill_opacity=self.bar_fill_opacity,
230 )
231 bar.move_to((2 * i + 1) * buff * RIGHT, DOWN + LEFT)
232 bars.add(bar)
233 bars.set_color_by_gradient(*self.bar_colors)
234
235 bar_labels = VGroup()
236 for bar, name in zip(bars, self.bar_names):
237 label = MathTex(str(name))
238 label.scale(self.bar_label_scale_val)
239 label.next_to(bar, DOWN, SMALL_BUFF)
240 bar_labels.add(label)
241
242 self.add(bars, bar_labels)
243 self.bars = bars
244 self.bar_labels = bar_labels
245
246 def change_bar_values(self, values):
247 for bar, value in zip(self.bars, values):
248 bar_bottom = bar.get_bottom()
249 bar.stretch_to_fit_height((value / self.max_value) * self.height)
250 bar.move_to(bar_bottom, DOWN)
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/manim/mobject/probability.py b/manim/mobject/probability.py
--- a/manim/mobject/probability.py
+++ b/manim/mobject/probability.py
@@ -2,6 +2,7 @@
__all__ = ["SampleSpace", "BarChart"]
+import numpy as np
from ..constants import *
from ..mobject.geometry import Line, Rectangle
|
{"golden_diff": "diff --git a/manim/mobject/probability.py b/manim/mobject/probability.py\n--- a/manim/mobject/probability.py\n+++ b/manim/mobject/probability.py\n@@ -2,6 +2,7 @@\n \n __all__ = [\"SampleSpace\", \"BarChart\"]\n \n+import numpy as np\n \n from ..constants import *\n from ..mobject.geometry import Line, Rectangle\n", "issue": "numpy not imported in `manim.mobject.probability`\n## Description of bug / unexpected behavior\r\n<!-- Add a clear and concise description of the problem you encountered. -->\r\nWhen you try to use `BarChart` it raises an error saying `np is not defined`\r\n\r\n## Expected behavior\r\n<!-- Add a clear and concise description of what you expected to happen. -->\r\nTo not get the error and show the bar chart.\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nclass Barchart(Scene):\r\n def construct(self):\r\n ls = [12,12,13,15,19,20,21]\r\n bg = BarChart(ls)\r\n self.add(bg)\r\n```\r\n\r\n</details>\r\n\r\n\r\n## Additional media files\r\n<!-- Paste in the files manim produced on rendering the code above. -->\r\n\r\n<details><summary>Images/GIFs</summary>\r\n\r\n<!-- PASTE MEDIA HERE -->\r\n\r\n</details>\r\n\r\n\r\n## Logs\r\n<details><summary>Terminal output</summary>\r\n<!-- Add \"-v DEBUG\" when calling manim to generate more detailed logs -->\r\n\r\n```\r\n<string> in <module>\r\n\r\n<string> in construct(self)\r\n\r\n/usr/local/lib/python3.7/dist-packages/manim/mobject/probability.py in add_axes(self, width, height)\r\n 197 x_axis = Line(self.tick_width * LEFT / 2, width * RIGHT)\r\n 198 y_axis = Line(MED_LARGE_BUFF * DOWN, height * UP)\r\n--> 199 ticks = VGroup()\r\n 200 heights = np.linspace(0, height, self.n_ticks + 1)\r\n 201 values = np.linspace(0, self.max_value, self.n_ticks + 1)\r\n\r\nNameError: name 'np' is not defined\r\n```\r\n\r\n<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->\r\n\r\n</details>\r\n\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):\r\n- RAM:\r\n- Python version (`python/py/python3 --version`):\r\n- Installed modules (provide output from `pip list`):\r\n```\r\nGoogle Colab\r\n```\r\n</details>\r\n\r\n<details><summary>LaTeX details</summary>\r\n\r\n+ LaTeX distribution (e.g. TeX Live 2020):\r\n+ Installed LaTeX packages:\r\n<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->\r\n</details>\r\n\r\n<details><summary>FFMPEG</summary>\r\n\r\nOutput of `ffmpeg -version`:\r\n\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant for this issue here. -->\r\n\n", "before_files": [{"content": "\"\"\"Mobjects representing objects from probability theory and statistics.\"\"\"\n\n__all__ = [\"SampleSpace\", \"BarChart\"]\n\n\nfrom ..constants import *\nfrom ..mobject.geometry import Line, Rectangle\nfrom ..mobject.mobject import Mobject\nfrom ..mobject.opengl_mobject import OpenGLMobject\nfrom ..mobject.svg.brace import Brace\nfrom ..mobject.svg.tex_mobject import MathTex, Tex\nfrom ..mobject.types.vectorized_mobject import VGroup\nfrom ..utils.color import (\n BLUE,\n BLUE_E,\n DARK_GREY,\n GREEN_E,\n LIGHT_GREY,\n MAROON_B,\n YELLOW,\n color_gradient,\n)\nfrom ..utils.iterables import tuplify\n\nEPSILON = 0.0001\n\n\nclass SampleSpace(Rectangle):\n def __init__(\n self,\n height=3,\n width=3,\n fill_color=DARK_GREY,\n fill_opacity=1,\n stroke_width=0.5,\n stroke_color=LIGHT_GREY,\n default_label_scale_val=1,\n ):\n Rectangle.__init__(\n self,\n height=height,\n width=width,\n fill_color=fill_color,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n stroke_color=stroke_color,\n )\n self.default_label_scale_val = default_label_scale_val\n\n def add_title(self, title=\"Sample space\", buff=MED_SMALL_BUFF):\n # TODO, should this really exist in SampleSpaceScene\n title_mob = Tex(title)\n if title_mob.width > self.width:\n title_mob.width = self.width\n title_mob.next_to(self, UP, buff=buff)\n self.title = title_mob\n self.add(title_mob)\n\n def add_label(self, label):\n self.label = label\n\n def complete_p_list(self, p_list):\n new_p_list = list(tuplify(p_list))\n remainder = 1.0 - sum(new_p_list)\n if abs(remainder) > EPSILON:\n new_p_list.append(remainder)\n return new_p_list\n\n def get_division_along_dimension(self, p_list, dim, colors, vect):\n p_list = self.complete_p_list(p_list)\n colors = color_gradient(colors, len(p_list))\n\n last_point = self.get_edge_center(-vect)\n parts = VGroup()\n for factor, color in zip(p_list, colors):\n part = SampleSpace()\n part.set_fill(color, 1)\n part.replace(self, stretch=True)\n part.stretch(factor, dim)\n part.move_to(last_point, -vect)\n last_point = part.get_edge_center(vect)\n parts.add(part)\n return parts\n\n def get_horizontal_division(self, p_list, colors=[GREEN_E, BLUE_E], vect=DOWN):\n return self.get_division_along_dimension(p_list, 1, colors, vect)\n\n def get_vertical_division(self, p_list, colors=[MAROON_B, YELLOW], vect=RIGHT):\n return self.get_division_along_dimension(p_list, 0, colors, vect)\n\n def divide_horizontally(self, *args, **kwargs):\n self.horizontal_parts = self.get_horizontal_division(*args, **kwargs)\n self.add(self.horizontal_parts)\n\n def divide_vertically(self, *args, **kwargs):\n self.vertical_parts = self.get_vertical_division(*args, **kwargs)\n self.add(self.vertical_parts)\n\n def get_subdivision_braces_and_labels(\n self, parts, labels, direction, buff=SMALL_BUFF, min_num_quads=1\n ):\n label_mobs = VGroup()\n braces = VGroup()\n for label, part in zip(labels, parts):\n brace = Brace(part, direction, min_num_quads=min_num_quads, buff=buff)\n if isinstance(label, (Mobject, OpenGLMobject)):\n label_mob = label\n else:\n label_mob = MathTex(label)\n label_mob.scale(self.default_label_scale_val)\n label_mob.next_to(brace, direction, buff)\n\n braces.add(brace)\n label_mobs.add(label_mob)\n parts.braces = braces\n parts.labels = label_mobs\n parts.label_kwargs = {\n \"labels\": label_mobs.copy(),\n \"direction\": direction,\n \"buff\": buff,\n }\n return VGroup(parts.braces, parts.labels)\n\n def get_side_braces_and_labels(self, labels, direction=LEFT, **kwargs):\n assert hasattr(self, \"horizontal_parts\")\n parts = self.horizontal_parts\n return self.get_subdivision_braces_and_labels(\n parts, labels, direction, **kwargs\n )\n\n def get_top_braces_and_labels(self, labels, **kwargs):\n assert hasattr(self, \"vertical_parts\")\n parts = self.vertical_parts\n return self.get_subdivision_braces_and_labels(parts, labels, UP, **kwargs)\n\n def get_bottom_braces_and_labels(self, labels, **kwargs):\n assert hasattr(self, \"vertical_parts\")\n parts = self.vertical_parts\n return self.get_subdivision_braces_and_labels(parts, labels, DOWN, **kwargs)\n\n def add_braces_and_labels(self):\n for attr in \"horizontal_parts\", \"vertical_parts\":\n if not hasattr(self, attr):\n continue\n parts = getattr(self, attr)\n for subattr in \"braces\", \"labels\":\n if hasattr(parts, subattr):\n self.add(getattr(parts, subattr))\n\n def __getitem__(self, index):\n if hasattr(self, \"horizontal_parts\"):\n return self.horizontal_parts[index]\n elif hasattr(self, \"vertical_parts\"):\n return self.vertical_parts[index]\n return self.split()[index]\n\n\nclass BarChart(VGroup):\n def __init__(\n self,\n values,\n height=4,\n width=6,\n n_ticks=4,\n tick_width=0.2,\n label_y_axis=True,\n y_axis_label_height=0.25,\n max_value=1,\n bar_colors=[BLUE, YELLOW],\n bar_fill_opacity=0.8,\n bar_stroke_width=3,\n bar_names=[],\n bar_label_scale_val=0.75,\n **kwargs\n ):\n VGroup.__init__(self, **kwargs)\n self.n_ticks = n_ticks\n self.tick_width = tick_width\n self.label_y_axis = label_y_axis\n self.y_axis_label_height = y_axis_label_height\n self.max_value = max_value\n self.bar_colors = bar_colors\n self.bar_fill_opacity = bar_fill_opacity\n self.bar_stroke_width = bar_stroke_width\n self.bar_names = bar_names\n self.bar_label_scale_val = bar_label_scale_val\n\n if self.max_value is None:\n self.max_value = max(values)\n\n self.add_axes(width, height)\n self.add_bars(values, width, height)\n self.center()\n\n def add_axes(self, width, height):\n x_axis = Line(self.tick_width * LEFT / 2, width * RIGHT)\n y_axis = Line(MED_LARGE_BUFF * DOWN, height * UP)\n ticks = VGroup()\n heights = np.linspace(0, height, self.n_ticks + 1)\n values = np.linspace(0, self.max_value, self.n_ticks + 1)\n for y, _value in zip(heights, values):\n tick = Line(LEFT, RIGHT)\n tick.width = self.tick_width\n tick.move_to(y * UP)\n ticks.add(tick)\n y_axis.add(ticks)\n\n self.add(x_axis, y_axis)\n self.x_axis, self.y_axis = x_axis, y_axis\n\n if self.label_y_axis:\n labels = VGroup()\n for tick, value in zip(ticks, values):\n label = MathTex(str(np.round(value, 2)))\n label.height = self.y_axis_label_height\n label.next_to(tick, LEFT, SMALL_BUFF)\n labels.add(label)\n self.y_axis_labels = labels\n self.add(labels)\n\n def add_bars(self, values, width, height):\n buff = float(width) / (2 * len(values) + 1)\n bars = VGroup()\n for i, value in enumerate(values):\n bar = Rectangle(\n height=(value / self.max_value) * height,\n width=buff,\n stroke_width=self.bar_stroke_width,\n fill_opacity=self.bar_fill_opacity,\n )\n bar.move_to((2 * i + 1) * buff * RIGHT, DOWN + LEFT)\n bars.add(bar)\n bars.set_color_by_gradient(*self.bar_colors)\n\n bar_labels = VGroup()\n for bar, name in zip(bars, self.bar_names):\n label = MathTex(str(name))\n label.scale(self.bar_label_scale_val)\n label.next_to(bar, DOWN, SMALL_BUFF)\n bar_labels.add(label)\n\n self.add(bars, bar_labels)\n self.bars = bars\n self.bar_labels = bar_labels\n\n def change_bar_values(self, values):\n for bar, value in zip(self.bars, values):\n bar_bottom = bar.get_bottom()\n bar.stretch_to_fit_height((value / self.max_value) * self.height)\n bar.move_to(bar_bottom, DOWN)\n", "path": "manim/mobject/probability.py"}], "after_files": [{"content": "\"\"\"Mobjects representing objects from probability theory and statistics.\"\"\"\n\n__all__ = [\"SampleSpace\", \"BarChart\"]\n\nimport numpy as np\n\nfrom ..constants import *\nfrom ..mobject.geometry import Line, Rectangle\nfrom ..mobject.mobject import Mobject\nfrom ..mobject.opengl_mobject import OpenGLMobject\nfrom ..mobject.svg.brace import Brace\nfrom ..mobject.svg.tex_mobject import MathTex, Tex\nfrom ..mobject.types.vectorized_mobject import VGroup\nfrom ..utils.color import (\n BLUE,\n BLUE_E,\n DARK_GREY,\n GREEN_E,\n LIGHT_GREY,\n MAROON_B,\n YELLOW,\n color_gradient,\n)\nfrom ..utils.iterables import tuplify\n\nEPSILON = 0.0001\n\n\nclass SampleSpace(Rectangle):\n def __init__(\n self,\n height=3,\n width=3,\n fill_color=DARK_GREY,\n fill_opacity=1,\n stroke_width=0.5,\n stroke_color=LIGHT_GREY,\n default_label_scale_val=1,\n ):\n Rectangle.__init__(\n self,\n height=height,\n width=width,\n fill_color=fill_color,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n stroke_color=stroke_color,\n )\n self.default_label_scale_val = default_label_scale_val\n\n def add_title(self, title=\"Sample space\", buff=MED_SMALL_BUFF):\n # TODO, should this really exist in SampleSpaceScene\n title_mob = Tex(title)\n if title_mob.width > self.width:\n title_mob.width = self.width\n title_mob.next_to(self, UP, buff=buff)\n self.title = title_mob\n self.add(title_mob)\n\n def add_label(self, label):\n self.label = label\n\n def complete_p_list(self, p_list):\n new_p_list = list(tuplify(p_list))\n remainder = 1.0 - sum(new_p_list)\n if abs(remainder) > EPSILON:\n new_p_list.append(remainder)\n return new_p_list\n\n def get_division_along_dimension(self, p_list, dim, colors, vect):\n p_list = self.complete_p_list(p_list)\n colors = color_gradient(colors, len(p_list))\n\n last_point = self.get_edge_center(-vect)\n parts = VGroup()\n for factor, color in zip(p_list, colors):\n part = SampleSpace()\n part.set_fill(color, 1)\n part.replace(self, stretch=True)\n part.stretch(factor, dim)\n part.move_to(last_point, -vect)\n last_point = part.get_edge_center(vect)\n parts.add(part)\n return parts\n\n def get_horizontal_division(self, p_list, colors=[GREEN_E, BLUE_E], vect=DOWN):\n return self.get_division_along_dimension(p_list, 1, colors, vect)\n\n def get_vertical_division(self, p_list, colors=[MAROON_B, YELLOW], vect=RIGHT):\n return self.get_division_along_dimension(p_list, 0, colors, vect)\n\n def divide_horizontally(self, *args, **kwargs):\n self.horizontal_parts = self.get_horizontal_division(*args, **kwargs)\n self.add(self.horizontal_parts)\n\n def divide_vertically(self, *args, **kwargs):\n self.vertical_parts = self.get_vertical_division(*args, **kwargs)\n self.add(self.vertical_parts)\n\n def get_subdivision_braces_and_labels(\n self, parts, labels, direction, buff=SMALL_BUFF, min_num_quads=1\n ):\n label_mobs = VGroup()\n braces = VGroup()\n for label, part in zip(labels, parts):\n brace = Brace(part, direction, min_num_quads=min_num_quads, buff=buff)\n if isinstance(label, (Mobject, OpenGLMobject)):\n label_mob = label\n else:\n label_mob = MathTex(label)\n label_mob.scale(self.default_label_scale_val)\n label_mob.next_to(brace, direction, buff)\n\n braces.add(brace)\n label_mobs.add(label_mob)\n parts.braces = braces\n parts.labels = label_mobs\n parts.label_kwargs = {\n \"labels\": label_mobs.copy(),\n \"direction\": direction,\n \"buff\": buff,\n }\n return VGroup(parts.braces, parts.labels)\n\n def get_side_braces_and_labels(self, labels, direction=LEFT, **kwargs):\n assert hasattr(self, \"horizontal_parts\")\n parts = self.horizontal_parts\n return self.get_subdivision_braces_and_labels(\n parts, labels, direction, **kwargs\n )\n\n def get_top_braces_and_labels(self, labels, **kwargs):\n assert hasattr(self, \"vertical_parts\")\n parts = self.vertical_parts\n return self.get_subdivision_braces_and_labels(parts, labels, UP, **kwargs)\n\n def get_bottom_braces_and_labels(self, labels, **kwargs):\n assert hasattr(self, \"vertical_parts\")\n parts = self.vertical_parts\n return self.get_subdivision_braces_and_labels(parts, labels, DOWN, **kwargs)\n\n def add_braces_and_labels(self):\n for attr in \"horizontal_parts\", \"vertical_parts\":\n if not hasattr(self, attr):\n continue\n parts = getattr(self, attr)\n for subattr in \"braces\", \"labels\":\n if hasattr(parts, subattr):\n self.add(getattr(parts, subattr))\n\n def __getitem__(self, index):\n if hasattr(self, \"horizontal_parts\"):\n return self.horizontal_parts[index]\n elif hasattr(self, \"vertical_parts\"):\n return self.vertical_parts[index]\n return self.split()[index]\n\n\nclass BarChart(VGroup):\n def __init__(\n self,\n values,\n height=4,\n width=6,\n n_ticks=4,\n tick_width=0.2,\n label_y_axis=True,\n y_axis_label_height=0.25,\n max_value=1,\n bar_colors=[BLUE, YELLOW],\n bar_fill_opacity=0.8,\n bar_stroke_width=3,\n bar_names=[],\n bar_label_scale_val=0.75,\n **kwargs\n ):\n VGroup.__init__(self, **kwargs)\n self.n_ticks = n_ticks\n self.tick_width = tick_width\n self.label_y_axis = label_y_axis\n self.y_axis_label_height = y_axis_label_height\n self.max_value = max_value\n self.bar_colors = bar_colors\n self.bar_fill_opacity = bar_fill_opacity\n self.bar_stroke_width = bar_stroke_width\n self.bar_names = bar_names\n self.bar_label_scale_val = bar_label_scale_val\n\n if self.max_value is None:\n self.max_value = max(values)\n\n self.add_axes(width, height)\n self.add_bars(values, width, height)\n self.center()\n\n def add_axes(self, width, height):\n x_axis = Line(self.tick_width * LEFT / 2, width * RIGHT)\n y_axis = Line(MED_LARGE_BUFF * DOWN, height * UP)\n ticks = VGroup()\n heights = np.linspace(0, height, self.n_ticks + 1)\n values = np.linspace(0, self.max_value, self.n_ticks + 1)\n for y, _value in zip(heights, values):\n tick = Line(LEFT, RIGHT)\n tick.width = self.tick_width\n tick.move_to(y * UP)\n ticks.add(tick)\n y_axis.add(ticks)\n\n self.add(x_axis, y_axis)\n self.x_axis, self.y_axis = x_axis, y_axis\n\n if self.label_y_axis:\n labels = VGroup()\n for tick, value in zip(ticks, values):\n label = MathTex(str(np.round(value, 2)))\n label.height = self.y_axis_label_height\n label.next_to(tick, LEFT, SMALL_BUFF)\n labels.add(label)\n self.y_axis_labels = labels\n self.add(labels)\n\n def add_bars(self, values, width, height):\n buff = float(width) / (2 * len(values) + 1)\n bars = VGroup()\n for i, value in enumerate(values):\n bar = Rectangle(\n height=(value / self.max_value) * height,\n width=buff,\n stroke_width=self.bar_stroke_width,\n fill_opacity=self.bar_fill_opacity,\n )\n bar.move_to((2 * i + 1) * buff * RIGHT, DOWN + LEFT)\n bars.add(bar)\n bars.set_color_by_gradient(*self.bar_colors)\n\n bar_labels = VGroup()\n for bar, name in zip(bars, self.bar_names):\n label = MathTex(str(name))\n label.scale(self.bar_label_scale_val)\n label.next_to(bar, DOWN, SMALL_BUFF)\n bar_labels.add(label)\n\n self.add(bars, bar_labels)\n self.bars = bars\n self.bar_labels = bar_labels\n\n def change_bar_values(self, values):\n for bar, value in zip(self.bars, values):\n bar_bottom = bar.get_bottom()\n bar.stretch_to_fit_height((value / self.max_value) * self.height)\n bar.move_to(bar_bottom, DOWN)\n", "path": "manim/mobject/probability.py"}]}
| 3,511 | 89 |
gh_patches_debug_16400
|
rasdani/github-patches
|
git_diff
|
networkx__networkx-2950
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
networkx 2.1 error building doc with sphinx 1.7.2
Hello,
when building the doc with sphinx 1.7.2 i got this error:
```
sphinx-build -b html -d build/doctrees . build/html
Running Sphinx v1.6.6
making output directory...
/usr/lib/python2.7/dist-packages/IPython/nbconvert.py:13: ShimWarning: The `IPython.nbconvert` package has been deprecated since IPython 4.0. You should import from nbconvert instead.
"You should import from nbconvert instead.", ShimWarning)
Change of translator for the pyfile builder.
Change of translator for the ipynb builder.
loading pickled environment... not yet created
[autosummary] generating autosummary for: bibliography.rst, citing.rst, credits.rst, developer/contribute.rst, developer/gitwash/configure_git.rst, developer/gitwash/development_workflow.rst, developer/gitwash/following_latest.rst, developer/gitwash/forking_hell.rst, developer/gitwash/git_development.rst, developer/gitwash/git_install.rst, ..., release/api_1.7.rst, release/api_1.8.rst, release/api_1.9.rst, release/index.rst, release/migration_guide_from_1.x_to_2.0.rst, release/release_2.0.rst, release/release_2.1.rst, release/release_dev.rst, release/release_template.rst, tutorial.rst
[autosummary] generating autosummary for: /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clique.clique_removal.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clique.large_clique_size.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clique.max_clique.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clustering_coefficient.average_clustering.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.connectivity.all_pairs_node_connectivity.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.connectivity.local_node_connectivity.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.connectivity.node_connectivity.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.dominating_set.min_edge_dominating_set.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.dominating_set.min_weighted_dominating_set.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.independent_set.maximum_independent_set.rst, ..., /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.nx_shp.write_shp.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.nx_yaml.read_yaml.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.nx_yaml.write_yaml.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.pajek.parse_pajek.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.pajek.read_pajek.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.pajek.write_pajek.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.from_sparse6_bytes.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.read_sparse6.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.to_sparse6_bytes.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.write_sparse6.rst
loading intersphinx inventory from ../../debian/python.org_objects.inv...
WARNING: intersphinx inventory '../../debian/python.org_objects.inv' not fetchable due to <type 'exceptions.IOError'>: [Errno 2] No such file or directory: u'/home/morph/deb/build-area/python-networkx-2.1/doc/../../debian/python.org_objects.inv'
loading intersphinx inventory from ../../debian/scipy.org_numpy_objects.inv...
WARNING: intersphinx inventory '../../debian/scipy.org_numpy_objects.inv' not fetchable due to <type 'exceptions.IOError'>: [Errno 2] No such file or directory: u'/home/morph/deb/build-area/python-networkx-2.1/doc/../../debian/scipy.org_numpy_objects.inv'
generating gallery...
Exception occurred:
File "/usr/lib/python2.7/dist-packages/sphinx_gallery/gen_gallery.py", line 222, in generate_gallery_rst
.format(examples_dir))
IOError: Main example directory /home/morph/deb/build-area/python-networkx-2.1/doc/../examples does not have a README.txt file. Please write one to introduce your gallery.
The full traceback has been saved in /tmp/sphinx-err-SnsvwK.log, if you want to report the issue to the developers.
```
content of `/tmp/sphinx-err-SnsvwK.log` is:
```
# Sphinx version: 1.6.6
# Python version: 2.7.14+ (CPython)
# Docutils version: 0.14
# Jinja2 version: 2.10
# Last messages:
# Loaded extensions:
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/sphinx/cmdline.py", line 305, in main
opts.warningiserror, opts.tags, opts.verbosity, opts.jobs)
File "/usr/lib/python2.7/dist-packages/sphinx/application.py", line 234, in __init__
self._init_builder()
File "/usr/lib/python2.7/dist-packages/sphinx/application.py", line 312, in _init_builder
self.emit('builder-inited')
File "/usr/lib/python2.7/dist-packages/sphinx/application.py", line 489, in emit
return self.events.emit(event, self, *args)
File "/usr/lib/python2.7/dist-packages/sphinx/events.py", line 79, in emit
results.append(callback(*args))
File "/usr/lib/python2.7/dist-packages/sphinx_gallery/gen_gallery.py", line 222, in generate_gallery_rst
.format(examples_dir))
IOError: Main example directory /home/morph/deb/build-area/python-networkx-2.1/doc/../examples does not have a README.txt file. Please write one to introduce your gallery.
```
can you have a look?
thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Setup script for networkx
5
6 You can install networkx with
7
8 python setup.py install
9 """
10 from glob import glob
11 import os
12 import sys
13 if os.path.exists('MANIFEST'):
14 os.remove('MANIFEST')
15
16 from setuptools import setup
17
18 if sys.argv[-1] == 'setup.py':
19 print("To install, run 'python setup.py install'")
20 print()
21
22 if sys.version_info[:2] < (2, 7):
23 print("NetworkX requires Python 2.7 or later (%d.%d detected)." %
24 sys.version_info[:2])
25 sys.exit(-1)
26
27 # Write the version information.
28 sys.path.insert(0, 'networkx')
29 import release
30 version = release.write_versionfile()
31 sys.path.pop(0)
32
33 packages = ["networkx",
34 "networkx.algorithms",
35 "networkx.algorithms.assortativity",
36 "networkx.algorithms.bipartite",
37 "networkx.algorithms.node_classification",
38 "networkx.algorithms.centrality",
39 "networkx.algorithms.community",
40 "networkx.algorithms.components",
41 "networkx.algorithms.connectivity",
42 "networkx.algorithms.coloring",
43 "networkx.algorithms.flow",
44 "networkx.algorithms.traversal",
45 "networkx.algorithms.isomorphism",
46 "networkx.algorithms.shortest_paths",
47 "networkx.algorithms.link_analysis",
48 "networkx.algorithms.operators",
49 "networkx.algorithms.approximation",
50 "networkx.algorithms.tree",
51 "networkx.classes",
52 "networkx.generators",
53 "networkx.drawing",
54 "networkx.linalg",
55 "networkx.readwrite",
56 "networkx.readwrite.json_graph",
57 "networkx.tests",
58 "networkx.testing",
59 "networkx.utils"]
60
61 docdirbase = 'share/doc/networkx-%s' % version
62 # add basic documentation
63 data = [(docdirbase, glob("*.txt"))]
64 # add examples
65 for d in ['advanced',
66 'algorithms',
67 'basic',
68 '3d_drawing',
69 'drawing',
70 'graph',
71 'javascript',
72 'jit',
73 'pygraphviz',
74 'subclass']:
75 dd = os.path.join(docdirbase, 'examples', d)
76 pp = os.path.join('examples', d)
77 data.append((dd, glob(os.path.join(pp, "*.py"))))
78 data.append((dd, glob(os.path.join(pp, "*.bz2"))))
79 data.append((dd, glob(os.path.join(pp, "*.gz"))))
80 data.append((dd, glob(os.path.join(pp, "*.mbox"))))
81 data.append((dd, glob(os.path.join(pp, "*.edgelist"))))
82
83 # add the tests
84 package_data = {
85 'networkx': ['tests/*.py'],
86 'networkx.algorithms': ['tests/*.py'],
87 'networkx.algorithms.assortativity': ['tests/*.py'],
88 'networkx.algorithms.bipartite': ['tests/*.py'],
89 'networkx.algorithms.node_classification': ['tests/*.py'],
90 'networkx.algorithms.centrality': ['tests/*.py'],
91 'networkx.algorithms.community': ['tests/*.py'],
92 'networkx.algorithms.components': ['tests/*.py'],
93 'networkx.algorithms.connectivity': ['tests/*.py'],
94 'networkx.algorithms.coloring': ['tests/*.py'],
95 'networkx.algorithms.flow': ['tests/*.py', 'tests/*.bz2'],
96 'networkx.algorithms.isomorphism': ['tests/*.py', 'tests/*.*99'],
97 'networkx.algorithms.link_analysis': ['tests/*.py'],
98 'networkx.algorithms.approximation': ['tests/*.py'],
99 'networkx.algorithms.operators': ['tests/*.py'],
100 'networkx.algorithms.shortest_paths': ['tests/*.py'],
101 'networkx.algorithms.traversal': ['tests/*.py'],
102 'networkx.algorithms.tree': ['tests/*.py'],
103 'networkx.classes': ['tests/*.py'],
104 'networkx.generators': ['tests/*.py', 'atlas.dat.gz'],
105 'networkx.drawing': ['tests/*.py'],
106 'networkx.linalg': ['tests/*.py'],
107 'networkx.readwrite': ['tests/*.py'],
108 'networkx.readwrite.json_graph': ['tests/*.py'],
109 'networkx.testing': ['tests/*.py'],
110 'networkx.utils': ['tests/*.py']
111 }
112
113 install_requires = ['decorator>=4.1.0']
114 extras_require = {'all': ['numpy', 'scipy', 'pandas', 'matplotlib',
115 'pygraphviz', 'pydot', 'pyyaml', 'gdal', 'lxml']}
116
117 if __name__ == "__main__":
118
119 setup(
120 name=release.name.lower(),
121 version=version,
122 maintainer=release.maintainer,
123 maintainer_email=release.maintainer_email,
124 author=release.authors['Hagberg'][0],
125 author_email=release.authors['Hagberg'][1],
126 description=release.description,
127 keywords=release.keywords,
128 long_description=release.long_description,
129 license=release.license,
130 platforms=release.platforms,
131 url=release.url,
132 download_url=release.download_url,
133 classifiers=release.classifiers,
134 packages=packages,
135 data_files=data,
136 package_data=package_data,
137 install_requires=install_requires,
138 extras_require=extras_require,
139 test_suite='nose.collector',
140 tests_require=['nose>=0.10.1'],
141 zip_safe=False
142 )
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -62,7 +62,8 @@
# add basic documentation
data = [(docdirbase, glob("*.txt"))]
# add examples
-for d in ['advanced',
+for d in ['.',
+ 'advanced',
'algorithms',
'basic',
'3d_drawing',
@@ -74,6 +75,7 @@
'subclass']:
dd = os.path.join(docdirbase, 'examples', d)
pp = os.path.join('examples', d)
+ data.append((dd, glob(os.path.join(pp, "*.txt"))))
data.append((dd, glob(os.path.join(pp, "*.py"))))
data.append((dd, glob(os.path.join(pp, "*.bz2"))))
data.append((dd, glob(os.path.join(pp, "*.gz"))))
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -62,7 +62,8 @@\n # add basic documentation\n data = [(docdirbase, glob(\"*.txt\"))]\n # add examples\n-for d in ['advanced',\n+for d in ['.',\n+ 'advanced',\n 'algorithms',\n 'basic',\n '3d_drawing',\n@@ -74,6 +75,7 @@\n 'subclass']:\n dd = os.path.join(docdirbase, 'examples', d)\n pp = os.path.join('examples', d)\n+ data.append((dd, glob(os.path.join(pp, \"*.txt\"))))\n data.append((dd, glob(os.path.join(pp, \"*.py\"))))\n data.append((dd, glob(os.path.join(pp, \"*.bz2\"))))\n data.append((dd, glob(os.path.join(pp, \"*.gz\"))))\n", "issue": "networkx 2.1 error building doc with sphinx 1.7.2\nHello,\r\nwhen building the doc with sphinx 1.7.2 i got this error:\r\n\r\n```\r\nsphinx-build -b html -d build/doctrees . build/html\r\nRunning Sphinx v1.6.6\r\nmaking output directory...\r\n/usr/lib/python2.7/dist-packages/IPython/nbconvert.py:13: ShimWarning: The `IPython.nbconvert` package has been deprecated since IPython 4.0. You should import from nbconvert instead.\r\n \"You should import from nbconvert instead.\", ShimWarning)\r\nChange of translator for the pyfile builder.\r\nChange of translator for the ipynb builder.\r\nloading pickled environment... not yet created\r\n[autosummary] generating autosummary for: bibliography.rst, citing.rst, credits.rst, developer/contribute.rst, developer/gitwash/configure_git.rst, developer/gitwash/development_workflow.rst, developer/gitwash/following_latest.rst, developer/gitwash/forking_hell.rst, developer/gitwash/git_development.rst, developer/gitwash/git_install.rst, ..., release/api_1.7.rst, release/api_1.8.rst, release/api_1.9.rst, release/index.rst, release/migration_guide_from_1.x_to_2.0.rst, release/release_2.0.rst, release/release_2.1.rst, release/release_dev.rst, release/release_template.rst, tutorial.rst\r\n[autosummary] generating autosummary for: /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clique.clique_removal.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clique.large_clique_size.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clique.max_clique.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.clustering_coefficient.average_clustering.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.connectivity.all_pairs_node_connectivity.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.connectivity.local_node_connectivity.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.connectivity.node_connectivity.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.dominating_set.min_edge_dominating_set.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.dominating_set.min_weighted_dominating_set.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/algorithms/generated/networkx.algorithms.approximation.independent_set.maximum_independent_set.rst, ..., /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.nx_shp.write_shp.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.nx_yaml.read_yaml.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.nx_yaml.write_yaml.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.pajek.parse_pajek.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.pajek.read_pajek.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.pajek.write_pajek.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.from_sparse6_bytes.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.read_sparse6.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.to_sparse6_bytes.rst, /home/morph/deb/build-area/python-networkx-2.1/doc/reference/readwrite/generated/networkx.readwrite.sparse6.write_sparse6.rst\r\nloading intersphinx inventory from ../../debian/python.org_objects.inv...\r\nWARNING: intersphinx inventory '../../debian/python.org_objects.inv' not fetchable due to <type 'exceptions.IOError'>: [Errno 2] No such file or directory: u'/home/morph/deb/build-area/python-networkx-2.1/doc/../../debian/python.org_objects.inv'\r\nloading intersphinx inventory from ../../debian/scipy.org_numpy_objects.inv...\r\nWARNING: intersphinx inventory '../../debian/scipy.org_numpy_objects.inv' not fetchable due to <type 'exceptions.IOError'>: [Errno 2] No such file or directory: u'/home/morph/deb/build-area/python-networkx-2.1/doc/../../debian/scipy.org_numpy_objects.inv'\r\ngenerating gallery...\r\n\r\nException occurred:\r\n File \"/usr/lib/python2.7/dist-packages/sphinx_gallery/gen_gallery.py\", line 222, in generate_gallery_rst\r\n .format(examples_dir))\r\nIOError: Main example directory /home/morph/deb/build-area/python-networkx-2.1/doc/../examples does not have a README.txt file. Please write one to introduce your gallery.\r\nThe full traceback has been saved in /tmp/sphinx-err-SnsvwK.log, if you want to report the issue to the developers.\r\n```\r\n\r\ncontent of `/tmp/sphinx-err-SnsvwK.log` is:\r\n\r\n```\r\n# Sphinx version: 1.6.6\r\n# Python version: 2.7.14+ (CPython)\r\n# Docutils version: 0.14 \r\n# Jinja2 version: 2.10\r\n# Last messages:\r\n\r\n# Loaded extensions:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/dist-packages/sphinx/cmdline.py\", line 305, in main\r\n opts.warningiserror, opts.tags, opts.verbosity, opts.jobs)\r\n File \"/usr/lib/python2.7/dist-packages/sphinx/application.py\", line 234, in __init__\r\n self._init_builder()\r\n File \"/usr/lib/python2.7/dist-packages/sphinx/application.py\", line 312, in _init_builder\r\n self.emit('builder-inited')\r\n File \"/usr/lib/python2.7/dist-packages/sphinx/application.py\", line 489, in emit\r\n return self.events.emit(event, self, *args)\r\n File \"/usr/lib/python2.7/dist-packages/sphinx/events.py\", line 79, in emit\r\n results.append(callback(*args))\r\n File \"/usr/lib/python2.7/dist-packages/sphinx_gallery/gen_gallery.py\", line 222, in generate_gallery_rst\r\n .format(examples_dir))\r\nIOError: Main example directory /home/morph/deb/build-area/python-networkx-2.1/doc/../examples does not have a README.txt file. Please write one to introduce your gallery.\r\n```\r\n\r\ncan you have a look?\r\n\r\nthanks!\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSetup script for networkx\n\nYou can install networkx with\n\npython setup.py install\n\"\"\"\nfrom glob import glob\nimport os\nimport sys\nif os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\nfrom setuptools import setup\n\nif sys.argv[-1] == 'setup.py':\n print(\"To install, run 'python setup.py install'\")\n print()\n\nif sys.version_info[:2] < (2, 7):\n print(\"NetworkX requires Python 2.7 or later (%d.%d detected).\" %\n sys.version_info[:2])\n sys.exit(-1)\n\n# Write the version information.\nsys.path.insert(0, 'networkx')\nimport release\nversion = release.write_versionfile()\nsys.path.pop(0)\n\npackages = [\"networkx\",\n \"networkx.algorithms\",\n \"networkx.algorithms.assortativity\",\n \"networkx.algorithms.bipartite\",\n \"networkx.algorithms.node_classification\",\n \"networkx.algorithms.centrality\",\n \"networkx.algorithms.community\",\n \"networkx.algorithms.components\",\n \"networkx.algorithms.connectivity\",\n \"networkx.algorithms.coloring\",\n \"networkx.algorithms.flow\",\n \"networkx.algorithms.traversal\",\n \"networkx.algorithms.isomorphism\",\n \"networkx.algorithms.shortest_paths\",\n \"networkx.algorithms.link_analysis\",\n \"networkx.algorithms.operators\",\n \"networkx.algorithms.approximation\",\n \"networkx.algorithms.tree\",\n \"networkx.classes\",\n \"networkx.generators\",\n \"networkx.drawing\",\n \"networkx.linalg\",\n \"networkx.readwrite\",\n \"networkx.readwrite.json_graph\",\n \"networkx.tests\",\n \"networkx.testing\",\n \"networkx.utils\"]\n\ndocdirbase = 'share/doc/networkx-%s' % version\n# add basic documentation\ndata = [(docdirbase, glob(\"*.txt\"))]\n# add examples\nfor d in ['advanced',\n 'algorithms',\n 'basic',\n '3d_drawing',\n 'drawing',\n 'graph',\n 'javascript',\n 'jit',\n 'pygraphviz',\n 'subclass']:\n dd = os.path.join(docdirbase, 'examples', d)\n pp = os.path.join('examples', d)\n data.append((dd, glob(os.path.join(pp, \"*.py\"))))\n data.append((dd, glob(os.path.join(pp, \"*.bz2\"))))\n data.append((dd, glob(os.path.join(pp, \"*.gz\"))))\n data.append((dd, glob(os.path.join(pp, \"*.mbox\"))))\n data.append((dd, glob(os.path.join(pp, \"*.edgelist\"))))\n\n# add the tests\npackage_data = {\n 'networkx': ['tests/*.py'],\n 'networkx.algorithms': ['tests/*.py'],\n 'networkx.algorithms.assortativity': ['tests/*.py'],\n 'networkx.algorithms.bipartite': ['tests/*.py'],\n 'networkx.algorithms.node_classification': ['tests/*.py'],\n 'networkx.algorithms.centrality': ['tests/*.py'],\n 'networkx.algorithms.community': ['tests/*.py'],\n 'networkx.algorithms.components': ['tests/*.py'],\n 'networkx.algorithms.connectivity': ['tests/*.py'],\n 'networkx.algorithms.coloring': ['tests/*.py'],\n 'networkx.algorithms.flow': ['tests/*.py', 'tests/*.bz2'],\n 'networkx.algorithms.isomorphism': ['tests/*.py', 'tests/*.*99'],\n 'networkx.algorithms.link_analysis': ['tests/*.py'],\n 'networkx.algorithms.approximation': ['tests/*.py'],\n 'networkx.algorithms.operators': ['tests/*.py'],\n 'networkx.algorithms.shortest_paths': ['tests/*.py'],\n 'networkx.algorithms.traversal': ['tests/*.py'],\n 'networkx.algorithms.tree': ['tests/*.py'],\n 'networkx.classes': ['tests/*.py'],\n 'networkx.generators': ['tests/*.py', 'atlas.dat.gz'],\n 'networkx.drawing': ['tests/*.py'],\n 'networkx.linalg': ['tests/*.py'],\n 'networkx.readwrite': ['tests/*.py'],\n 'networkx.readwrite.json_graph': ['tests/*.py'],\n 'networkx.testing': ['tests/*.py'],\n 'networkx.utils': ['tests/*.py']\n}\n\ninstall_requires = ['decorator>=4.1.0']\nextras_require = {'all': ['numpy', 'scipy', 'pandas', 'matplotlib',\n 'pygraphviz', 'pydot', 'pyyaml', 'gdal', 'lxml']}\n\nif __name__ == \"__main__\":\n\n setup(\n name=release.name.lower(),\n version=version,\n maintainer=release.maintainer,\n maintainer_email=release.maintainer_email,\n author=release.authors['Hagberg'][0],\n author_email=release.authors['Hagberg'][1],\n description=release.description,\n keywords=release.keywords,\n long_description=release.long_description,\n license=release.license,\n platforms=release.platforms,\n url=release.url,\n download_url=release.download_url,\n classifiers=release.classifiers,\n packages=packages,\n data_files=data,\n package_data=package_data,\n install_requires=install_requires,\n extras_require=extras_require,\n test_suite='nose.collector',\n tests_require=['nose>=0.10.1'],\n zip_safe=False\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSetup script for networkx\n\nYou can install networkx with\n\npython setup.py install\n\"\"\"\nfrom glob import glob\nimport os\nimport sys\nif os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\nfrom setuptools import setup\n\nif sys.argv[-1] == 'setup.py':\n print(\"To install, run 'python setup.py install'\")\n print()\n\nif sys.version_info[:2] < (2, 7):\n print(\"NetworkX requires Python 2.7 or later (%d.%d detected).\" %\n sys.version_info[:2])\n sys.exit(-1)\n\n# Write the version information.\nsys.path.insert(0, 'networkx')\nimport release\nversion = release.write_versionfile()\nsys.path.pop(0)\n\npackages = [\"networkx\",\n \"networkx.algorithms\",\n \"networkx.algorithms.assortativity\",\n \"networkx.algorithms.bipartite\",\n \"networkx.algorithms.node_classification\",\n \"networkx.algorithms.centrality\",\n \"networkx.algorithms.community\",\n \"networkx.algorithms.components\",\n \"networkx.algorithms.connectivity\",\n \"networkx.algorithms.coloring\",\n \"networkx.algorithms.flow\",\n \"networkx.algorithms.traversal\",\n \"networkx.algorithms.isomorphism\",\n \"networkx.algorithms.shortest_paths\",\n \"networkx.algorithms.link_analysis\",\n \"networkx.algorithms.operators\",\n \"networkx.algorithms.approximation\",\n \"networkx.algorithms.tree\",\n \"networkx.classes\",\n \"networkx.generators\",\n \"networkx.drawing\",\n \"networkx.linalg\",\n \"networkx.readwrite\",\n \"networkx.readwrite.json_graph\",\n \"networkx.tests\",\n \"networkx.testing\",\n \"networkx.utils\"]\n\ndocdirbase = 'share/doc/networkx-%s' % version\n# add basic documentation\ndata = [(docdirbase, glob(\"*.txt\"))]\n# add examples\nfor d in ['.',\n 'advanced',\n 'algorithms',\n 'basic',\n '3d_drawing',\n 'drawing',\n 'graph',\n 'javascript',\n 'jit',\n 'pygraphviz',\n 'subclass']:\n dd = os.path.join(docdirbase, 'examples', d)\n pp = os.path.join('examples', d)\n data.append((dd, glob(os.path.join(pp, \"*.txt\"))))\n data.append((dd, glob(os.path.join(pp, \"*.py\"))))\n data.append((dd, glob(os.path.join(pp, \"*.bz2\"))))\n data.append((dd, glob(os.path.join(pp, \"*.gz\"))))\n data.append((dd, glob(os.path.join(pp, \"*.mbox\"))))\n data.append((dd, glob(os.path.join(pp, \"*.edgelist\"))))\n\n# add the tests\npackage_data = {\n 'networkx': ['tests/*.py'],\n 'networkx.algorithms': ['tests/*.py'],\n 'networkx.algorithms.assortativity': ['tests/*.py'],\n 'networkx.algorithms.bipartite': ['tests/*.py'],\n 'networkx.algorithms.node_classification': ['tests/*.py'],\n 'networkx.algorithms.centrality': ['tests/*.py'],\n 'networkx.algorithms.community': ['tests/*.py'],\n 'networkx.algorithms.components': ['tests/*.py'],\n 'networkx.algorithms.connectivity': ['tests/*.py'],\n 'networkx.algorithms.coloring': ['tests/*.py'],\n 'networkx.algorithms.flow': ['tests/*.py', 'tests/*.bz2'],\n 'networkx.algorithms.isomorphism': ['tests/*.py', 'tests/*.*99'],\n 'networkx.algorithms.link_analysis': ['tests/*.py'],\n 'networkx.algorithms.approximation': ['tests/*.py'],\n 'networkx.algorithms.operators': ['tests/*.py'],\n 'networkx.algorithms.shortest_paths': ['tests/*.py'],\n 'networkx.algorithms.traversal': ['tests/*.py'],\n 'networkx.algorithms.tree': ['tests/*.py'],\n 'networkx.classes': ['tests/*.py'],\n 'networkx.generators': ['tests/*.py', 'atlas.dat.gz'],\n 'networkx.drawing': ['tests/*.py'],\n 'networkx.linalg': ['tests/*.py'],\n 'networkx.readwrite': ['tests/*.py'],\n 'networkx.readwrite.json_graph': ['tests/*.py'],\n 'networkx.testing': ['tests/*.py'],\n 'networkx.utils': ['tests/*.py']\n}\n\ninstall_requires = ['decorator>=4.1.0']\nextras_require = {'all': ['numpy', 'scipy', 'pandas', 'matplotlib',\n 'pygraphviz', 'pydot', 'pyyaml', 'gdal', 'lxml']}\n\nif __name__ == \"__main__\":\n\n setup(\n name=release.name.lower(),\n version=version,\n maintainer=release.maintainer,\n maintainer_email=release.maintainer_email,\n author=release.authors['Hagberg'][0],\n author_email=release.authors['Hagberg'][1],\n description=release.description,\n keywords=release.keywords,\n long_description=release.long_description,\n license=release.license,\n platforms=release.platforms,\n url=release.url,\n download_url=release.download_url,\n classifiers=release.classifiers,\n packages=packages,\n data_files=data,\n package_data=package_data,\n install_requires=install_requires,\n extras_require=extras_require,\n test_suite='nose.collector',\n tests_require=['nose>=0.10.1'],\n zip_safe=False\n )\n", "path": "setup.py"}]}
| 3,449 | 191 |
gh_patches_debug_6405
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1467
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken headlines using typogrify (caused by insertion of ` `)
Maybe we should prevent typogrify on running on h-elements because otherwise you headings won't wrap like you expect on mobile displays. I have created an [issue](https://github.com/mintchaos/typogrify/issues/40) with a more detailed description in the typogrify repo. This is not a real typogrify "bug", but we could implement a workaround in the [filters.py](https://github.com/getnikola/nikola/blob/master/nikola/filters.py) on line 163, because I don't think that the current behaviour is what most nikola users would expect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/filters.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Utility functions to help you run filters on files."""
28
29 from .utils import req_missing
30 from functools import wraps
31 import os
32 import io
33 import shutil
34 import subprocess
35 import tempfile
36 import shlex
37
38 try:
39 import typogrify.filters as typo
40 except ImportError:
41 typo = None # NOQA
42
43
44 def apply_to_binary_file(f):
45 """Take a function f that transforms a data argument, and returns
46 a function that takes a filename and applies f to the contents,
47 in place. Reads files in binary mode."""
48 @wraps(f)
49 def f_in_file(fname):
50 with open(fname, 'rb') as inf:
51 data = inf.read()
52 data = f(data)
53 with open(fname, 'wb+') as outf:
54 outf.write(data)
55
56 return f_in_file
57
58
59 def apply_to_text_file(f):
60 """Take a function f that transforms a data argument, and returns
61 a function that takes a filename and applies f to the contents,
62 in place. Reads files in UTF-8."""
63 @wraps(f)
64 def f_in_file(fname):
65 with io.open(fname, 'r', encoding='utf-8') as inf:
66 data = inf.read()
67 data = f(data)
68 with io.open(fname, 'w+', encoding='utf-8') as outf:
69 outf.write(data)
70
71 return f_in_file
72
73
74 def list_replace(the_list, find, replacement):
75 "Replace all occurrences of ``find`` with ``replacement`` in ``the_list``"
76 for i, v in enumerate(the_list):
77 if v == find:
78 the_list[i] = replacement
79
80
81 def runinplace(command, infile):
82 """Run a command in-place on a file.
83
84 command is a string of the form: "commandname %1 %2" and
85 it will be execed with infile as %1 and a temporary file
86 as %2. Then, that temporary file will be moved over %1.
87
88 Example usage:
89
90 runinplace("yui-compressor %1 -o %2", "myfile.css")
91
92 That will replace myfile.css with a minified version.
93
94 You can also supply command as a list.
95 """
96
97 if not isinstance(command, list):
98 command = shlex.split(command)
99
100 tmpdir = None
101
102 if "%2" in command:
103 tmpdir = tempfile.mkdtemp(prefix="nikola")
104 tmpfname = os.path.join(tmpdir, os.path.basename(infile))
105
106 try:
107 list_replace(command, "%1", infile)
108 if tmpdir:
109 list_replace(command, "%2", tmpfname)
110
111 subprocess.check_call(command)
112
113 if tmpdir:
114 shutil.move(tmpfname, infile)
115 finally:
116 if tmpdir:
117 shutil.rmtree(tmpdir)
118
119
120 def yui_compressor(infile):
121 yuicompressor = False
122 try:
123 subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
124 yuicompressor = 'yui-compressor'
125 except Exception:
126 pass
127 if not yuicompressor:
128 try:
129 subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
130 yuicompressor = 'yuicompressor'
131 except:
132 raise Exception("yui-compressor is not installed.")
133 return False
134
135 return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)
136
137
138 def closure_compiler(infile):
139 return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)
140
141
142 def optipng(infile):
143 return runinplace(r"optipng -preserve -o2 -quiet %1", infile)
144
145
146 def jpegoptim(infile):
147 return runinplace(r"jpegoptim -p --strip-all -q %1", infile)
148
149
150 @apply_to_text_file
151 def minify_lines(data):
152 datalines = data.splitlines()
153 datalines = [line.lstrip() for line in datalines if not (line.strip() == "")]
154 return "\n".join(datalines)
155
156
157 @apply_to_text_file
158 def typogrify(data):
159 if typo is None:
160 req_missing(['typogrify'], 'use the typogrify filter')
161
162 data = typo.amp(data)
163 data = typo.widont(data)
164 data = typo.smartypants(data)
165 # Disabled because of typogrify bug where it breaks <title>
166 # data = typo.caps(data)
167 data = typo.initial_quotes(data)
168 return data
169
170
171 @apply_to_text_file
172 def php_template_injection(data):
173 import re
174 template = re.search('<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data)
175 if template:
176 source = template.group(1)
177 with io.open(source, "r", encoding="utf-8") as in_file:
178 phpdata = in_file.read()
179 _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
180 phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]
181 phpdata = re.sub(template.group(0), phpdata, data)
182 return phpdata
183 else:
184 return data
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/filters.py b/nikola/filters.py
--- a/nikola/filters.py
+++ b/nikola/filters.py
@@ -160,7 +160,8 @@
req_missing(['typogrify'], 'use the typogrify filter')
data = typo.amp(data)
- data = typo.widont(data)
+ # disabled because typogrify widow prevention caused broken headline wrapping, see issue #1465
+ # data = typo.widont(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
|
{"golden_diff": "diff --git a/nikola/filters.py b/nikola/filters.py\n--- a/nikola/filters.py\n+++ b/nikola/filters.py\n@@ -160,7 +160,8 @@\n req_missing(['typogrify'], 'use the typogrify filter')\n \n data = typo.amp(data)\n- data = typo.widont(data)\n+ # disabled because typogrify widow prevention caused broken headline wrapping, see issue #1465\n+ # data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n", "issue": "Broken headlines using typogrify (caused by insertion of ` `)\nMaybe we should prevent typogrify on running on h-elements because otherwise you headings won't wrap like you expect on mobile displays. I have created an [issue](https://github.com/mintchaos/typogrify/issues/40) with a more detailed description in the typogrify repo. This is not a real typogrify \"bug\", but we could implement a workaround in the [filters.py](https://github.com/getnikola/nikola/blob/master/nikola/filters.py) on line 163, because I don't think that the current behaviour is what most nikola users would expect.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help you run filters on files.\"\"\"\n\nfrom .utils import req_missing\nfrom functools import wraps\nimport os\nimport io\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\n\n\ndef apply_to_binary_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with io.open(fname, 'r', encoding='utf-8') as inf:\n data = inf.read()\n data = f(data)\n with io.open(fname, 'w+', encoding='utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef closure_compiler(infile):\n return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)\n\n\ndef optipng(infile):\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\n@apply_to_text_file\ndef minify_lines(data):\n datalines = data.splitlines()\n datalines = [line.lstrip() for line in datalines if not (line.strip() == \"\")]\n return \"\\n\".join(datalines)\n\n\n@apply_to_text_file\ndef typogrify(data):\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef php_template_injection(data):\n import re\n template = re.search('<\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\:(.*) checksum\\:(.*)__ -->', data)\n if template:\n source = template.group(1)\n with io.open(source, \"r\", encoding=\"utf-8\") as in_file:\n phpdata = in_file.read()\n _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\\n' * 2) + '|' + (\"\\r\\n\" * 2) + ')'\n phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]\n phpdata = re.sub(template.group(0), phpdata, data)\n return phpdata\n else:\n return data\n", "path": "nikola/filters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Utility functions to help you run filters on files.\"\"\"\n\nfrom .utils import req_missing\nfrom functools import wraps\nimport os\nimport io\nimport shutil\nimport subprocess\nimport tempfile\nimport shlex\n\ntry:\n import typogrify.filters as typo\nexcept ImportError:\n typo = None # NOQA\n\n\ndef apply_to_binary_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in binary mode.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with open(fname, 'rb') as inf:\n data = inf.read()\n data = f(data)\n with open(fname, 'wb+') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef apply_to_text_file(f):\n \"\"\"Take a function f that transforms a data argument, and returns\n a function that takes a filename and applies f to the contents,\n in place. Reads files in UTF-8.\"\"\"\n @wraps(f)\n def f_in_file(fname):\n with io.open(fname, 'r', encoding='utf-8') as inf:\n data = inf.read()\n data = f(data)\n with io.open(fname, 'w+', encoding='utf-8') as outf:\n outf.write(data)\n\n return f_in_file\n\n\ndef list_replace(the_list, find, replacement):\n \"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``\"\n for i, v in enumerate(the_list):\n if v == find:\n the_list[i] = replacement\n\n\ndef runinplace(command, infile):\n \"\"\"Run a command in-place on a file.\n\n command is a string of the form: \"commandname %1 %2\" and\n it will be execed with infile as %1 and a temporary file\n as %2. Then, that temporary file will be moved over %1.\n\n Example usage:\n\n runinplace(\"yui-compressor %1 -o %2\", \"myfile.css\")\n\n That will replace myfile.css with a minified version.\n\n You can also supply command as a list.\n \"\"\"\n\n if not isinstance(command, list):\n command = shlex.split(command)\n\n tmpdir = None\n\n if \"%2\" in command:\n tmpdir = tempfile.mkdtemp(prefix=\"nikola\")\n tmpfname = os.path.join(tmpdir, os.path.basename(infile))\n\n try:\n list_replace(command, \"%1\", infile)\n if tmpdir:\n list_replace(command, \"%2\", tmpfname)\n\n subprocess.check_call(command)\n\n if tmpdir:\n shutil.move(tmpfname, infile)\n finally:\n if tmpdir:\n shutil.rmtree(tmpdir)\n\n\ndef yui_compressor(infile):\n yuicompressor = False\n try:\n subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yui-compressor'\n except Exception:\n pass\n if not yuicompressor:\n try:\n subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))\n yuicompressor = 'yuicompressor'\n except:\n raise Exception(\"yui-compressor is not installed.\")\n return False\n\n return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)\n\n\ndef closure_compiler(infile):\n return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)\n\n\ndef optipng(infile):\n return runinplace(r\"optipng -preserve -o2 -quiet %1\", infile)\n\n\ndef jpegoptim(infile):\n return runinplace(r\"jpegoptim -p --strip-all -q %1\", infile)\n\n\n@apply_to_text_file\ndef minify_lines(data):\n datalines = data.splitlines()\n datalines = [line.lstrip() for line in datalines if not (line.strip() == \"\")]\n return \"\\n\".join(datalines)\n\n\n@apply_to_text_file\ndef typogrify(data):\n if typo is None:\n req_missing(['typogrify'], 'use the typogrify filter')\n\n data = typo.amp(data)\n # disabled because typogrify widow prevention caused broken headline wrapping, see issue #1465\n # data = typo.widont(data)\n data = typo.smartypants(data)\n # Disabled because of typogrify bug where it breaks <title>\n # data = typo.caps(data)\n data = typo.initial_quotes(data)\n return data\n\n\n@apply_to_text_file\ndef php_template_injection(data):\n import re\n template = re.search('<\\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\\:(.*) checksum\\:(.*)__ -->', data)\n if template:\n source = template.group(1)\n with io.open(source, \"r\", encoding=\"utf-8\") as in_file:\n phpdata = in_file.read()\n _META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\\n' * 2) + '|' + (\"\\r\\n\" * 2) + ')'\n phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]\n phpdata = re.sub(template.group(0), phpdata, data)\n return phpdata\n else:\n return data\n", "path": "nikola/filters.py"}]}
| 2,297 | 152 |
gh_patches_debug_37674
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-200
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
new release for mne-bids
We should make a new release for mne-bids
refs:
https://github.com/mne-tools/mne-bids/commit/c43822ef754b58b28ccc8d90af565d1681ac5851
and
https://github.com/mne-tools/mne-bids/commit/eec284cbff44425c0c6fbdad1e32809c247cec05
and
https://github.com/mne-tools/mne-python/wiki/How-to-make-a-release
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2 from setuptools import setup, find_packages
3
4 descr = """Experimental code for BIDS using MNE."""
5
6 DISTNAME = 'mne-bids'
7 DESCRIPTION = descr
8 MAINTAINER = 'Mainak Jas'
9 MAINTAINER_EMAIL = '[email protected]'
10 URL = 'https://mne-tools.github.io/mne-bids/'
11 LICENSE = 'BSD (3-clause)'
12 DOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'
13 VERSION = '0.2.dev0'
14
15 if __name__ == "__main__":
16 setup(name=DISTNAME,
17 maintainer=MAINTAINER,
18 maintainer_email=MAINTAINER_EMAIL,
19 description=DESCRIPTION,
20 license=LICENSE,
21 url=URL,
22 version=VERSION,
23 download_url=DOWNLOAD_URL,
24 long_description=open('README.rst').read(),
25 long_description_content_type='text/x-rst',
26 classifiers=[
27 'Intended Audience :: Science/Research',
28 'Intended Audience :: Developers',
29 'License :: OSI Approved',
30 'Programming Language :: Python',
31 'Topic :: Software Development',
32 'Topic :: Scientific/Engineering',
33 'Operating System :: Microsoft :: Windows',
34 'Operating System :: POSIX',
35 'Operating System :: Unix',
36 'Operating System :: MacOS',
37 ],
38 platforms='any',
39 packages=find_packages(),
40 scripts=['bin/mne_bids']
41 )
42
```
Path: `mne_bids/__init__.py`
Content:
```
1 """MNE software for easily interacting with BIDS compatible datasets."""
2
3 __version__ = '0.2.dev0'
4
5
6 from .write import (write_raw_bids, make_bids_folders, make_bids_basename, # noqa: E501 F401
7 make_dataset_description) # noqa: F401
8 from .read import read_raw_bids # noqa: F401
9
```
Path: `doc/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # mne_bids documentation build configuration file, created by
4 # sphinx-quickstart on Wed Sep 6 04:42:26 2017.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 # import os
20 # import sys
21 # sys.path.insert(0, os.path.abspath('.'))
22
23 from datetime import date
24 import sphinx_gallery # noqa
25 import sphinx_bootstrap_theme
26
27 # -- General configuration ------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #
31 # needs_sphinx = '1.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 'sphinx.ext.autodoc',
38 'sphinx.ext.mathjax',
39 'sphinx.ext.viewcode',
40 'numpydoc',
41 'sphinx.ext.autosummary',
42 'sphinx.ext.doctest',
43 'sphinx_gallery.gen_gallery'
44 ]
45
46 # generate autosummary even if no references
47 autosummary_generate = True
48
49 # Add any paths that contain templates here, relative to this directory.
50 templates_path = ['_templates']
51
52 # The suffix(es) of source filenames.
53 # You can specify multiple suffix as a list of string:
54 #
55 # source_suffix = ['.rst', '.md']
56 source_suffix = '.rst'
57
58 # The master toctree document.
59 master_doc = 'index'
60
61 # General information about the project.
62 project = u'mne_bids'
63 td = date.today()
64 copyright = u'%s, MNE Developers. Last updated on %s' % (td.year,
65 td.isoformat())
66
67 author = u'Mainak Jas'
68
69 # The version info for the project you're documenting, acts as replacement for
70 # |version| and |release|, also used in various other places throughout the
71 # built documents.
72 #
73 # The short X.Y version.
74 version = u'0.2.dev0'
75 # The full version, including alpha/beta/rc tags.
76 release = u'0.2.dev0'
77
78 # The language for content autogenerated by Sphinx. Refer to documentation
79 # for a list of supported languages.
80 #
81 # This is also used if you do content translation via gettext catalogs.
82 # Usually you set "language" from the command line for these cases.
83 language = None
84
85 # List of patterns, relative to source directory, that match files and
86 # directories to ignore when looking for source files.
87 # This patterns also effect to html_static_path and html_extra_path
88 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
89
90 # The name of the Pygments (syntax highlighting) style to use.
91 pygments_style = 'sphinx'
92
93 # If true, `todo` and `todoList` produce output, else they produce nothing.
94 todo_include_todos = False
95
96
97 # -- Options for HTML output ----------------------------------------------
98
99 # The theme to use for HTML and HTML Help pages. See the documentation for
100 # a list of builtin themes.
101 #
102 html_theme = 'bootstrap'
103 html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
104
105 # Theme options are theme-specific and customize the look and feel of a theme
106 # further. For a list of options available for each theme, see the
107 # documentation.
108 #
109 html_theme_options = {
110 'navbar_title': 'MNE-BIDS',
111 'bootswatch_theme': "flatly",
112 'navbar_sidebarrel': False,
113 'bootstrap_version': "3",
114 'navbar_links': [
115 ("Gallery", "auto_examples/index"),
116 ("API", "api"),
117 ("What's new", "whats_new"),
118 ("Github", "https://github.com/mne-tools/mne-bids", True),
119 ]}
120
121 # Add any paths that contain custom static files (such as style sheets) here,
122 # relative to this directory. They are copied after the builtin static files,
123 # so a file named "default.css" will overwrite the builtin "default.css".
124 html_static_path = ['_static']
125
126
127 # -- Options for HTMLHelp output ------------------------------------------
128
129 # Output file base name for HTML help builder.
130 htmlhelp_basename = 'mne_bidsdoc'
131
132
133 # -- Options for LaTeX output ---------------------------------------------
134
135 latex_elements = {
136 # The paper size ('letterpaper' or 'a4paper').
137 #
138 # 'papersize': 'letterpaper',
139
140 # The font size ('10pt', '11pt' or '12pt').
141 #
142 # 'pointsize': '10pt',
143
144 # Additional stuff for the LaTeX preamble.
145 #
146 # 'preamble': '',
147
148 # Latex figure (float) alignment
149 #
150 # 'figure_align': 'htbp',
151 }
152
153 # Grouping the document tree into LaTeX files. List of tuples
154 # (source start file, target name, title,
155 # author, documentclass [howto, manual, or own class]).
156 latex_documents = [
157 (master_doc, 'mne_bids.tex', u'mne\\_bids Documentation',
158 u'Mainak Jas', 'manual'),
159 ]
160
161
162 # -- Options for manual page output ---------------------------------------
163
164 # One entry per manual page. List of tuples
165 # (source start file, name, description, authors, manual section).
166 man_pages = [
167 (master_doc, 'mne_bids', u'mne_bids Documentation',
168 [author], 1)
169 ]
170
171
172 # -- Options for Texinfo output -------------------------------------------
173
174 # Grouping the document tree into Texinfo files. List of tuples
175 # (source start file, target name, title, author,
176 # dir menu entry, description, category)
177 texinfo_documents = [
178 (master_doc, 'mne_bids', u'mne_bids Documentation',
179 author, 'mne_bids', 'One line description of project.',
180 'Miscellaneous'),
181 ]
182
183
184 sphinx_gallery_conf = {
185 'examples_dirs': '../examples',
186 'gallery_dirs': 'auto_examples',
187 'filename_pattern': '^((?!sgskip).)*$',
188 'backreferences_dir': 'generated',
189 'reference_url': {
190 'mne': 'http://mne-tools.github.io/stable/',
191 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',
192 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'
193 }
194 }
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -24,6 +24,8 @@
import sphinx_gallery # noqa
import sphinx_bootstrap_theme
+import mne_bids
+
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
@@ -64,16 +66,16 @@
copyright = u'%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
-author = u'Mainak Jas'
+author = u'MNE Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = u'0.2.dev0'
+version = mne_bids.__version__
# The full version, including alpha/beta/rc tags.
-release = u'0.2.dev0'
+release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py
--- a/mne_bids/__init__.py
+++ b/mne_bids/__init__.py
@@ -1,6 +1,6 @@
"""MNE software for easily interacting with BIDS compatible datasets."""
-__version__ = '0.2.dev0'
+__version__ = '0.2'
from .write import (write_raw_bids, make_bids_folders, make_bids_basename, # noqa: E501 F401
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,19 @@
#! /usr/bin/env python
+"""Setup MNE-BIDS."""
+import os
from setuptools import setup, find_packages
+# get the version
+version = None
+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
+ for line in (line.strip() for line in fid):
+ if line.startswith('__version__'):
+ version = line.split('=')[1].strip().strip('\'')
+ break
+if version is None:
+ raise RuntimeError('Could not determine version')
+
+
descr = """Experimental code for BIDS using MNE."""
DISTNAME = 'mne-bids'
@@ -10,7 +23,7 @@
URL = 'https://mne-tools.github.io/mne-bids/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'
-VERSION = '0.2.dev0'
+VERSION = version
if __name__ == "__main__":
setup(name=DISTNAME,
|
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -24,6 +24,8 @@\n import sphinx_gallery # noqa\n import sphinx_bootstrap_theme\n \n+import mne_bids\n+\n # -- General configuration ------------------------------------------------\n \n # If your documentation needs a minimal Sphinx version, state it here.\n@@ -64,16 +66,16 @@\n copyright = u'%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n \n-author = u'Mainak Jas'\n+author = u'MNE Developers'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n # built documents.\n #\n # The short X.Y version.\n-version = u'0.2.dev0'\n+version = mne_bids.__version__\n # The full version, including alpha/beta/rc tags.\n-release = u'0.2.dev0'\n+release = version\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/mne_bids/__init__.py b/mne_bids/__init__.py\n--- a/mne_bids/__init__.py\n+++ b/mne_bids/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n \n-__version__ = '0.2.dev0'\n+__version__ = '0.2'\n \n \n from .write import (write_raw_bids, make_bids_folders, make_bids_basename, # noqa: E501 F401\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,19 @@\n #! /usr/bin/env python\n+\"\"\"Setup MNE-BIDS.\"\"\"\n+import os\n from setuptools import setup, find_packages\n \n+# get the version\n+version = None\n+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n+ for line in (line.strip() for line in fid):\n+ if line.startswith('__version__'):\n+ version = line.split('=')[1].strip().strip('\\'')\n+ break\n+if version is None:\n+ raise RuntimeError('Could not determine version')\n+\n+\n descr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n \n DISTNAME = 'mne-bids'\n@@ -10,7 +23,7 @@\n URL = 'https://mne-tools.github.io/mne-bids/'\n LICENSE = 'BSD (3-clause)'\n DOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\n-VERSION = '0.2.dev0'\n+VERSION = version\n \n if __name__ == \"__main__\":\n setup(name=DISTNAME,\n", "issue": "new release for mne-bids\nWe should make a new release for mne-bids\r\n\r\nrefs:\r\n\r\nhttps://github.com/mne-tools/mne-bids/commit/c43822ef754b58b28ccc8d90af565d1681ac5851\r\n\r\nand\r\n\r\nhttps://github.com/mne-tools/mne-bids/commit/eec284cbff44425c0c6fbdad1e32809c247cec05\r\n\r\nand\r\n\r\nhttps://github.com/mne-tools/mne-python/wiki/How-to-make-a-release\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\n\ndescr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne-tools.github.io/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = '0.2.dev0'\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids']\n )\n", "path": "setup.py"}, {"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = '0.2.dev0'\n\n\nfrom .write import (write_raw_bids, make_bids_folders, make_bids_basename, # noqa: E501 F401\n make_dataset_description) # noqa: F401\nfrom .read import read_raw_bids # noqa: F401\n", "path": "mne_bids/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# mne_bids documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 6 04:42:26 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\nimport sphinx_gallery # noqa\nimport sphinx_bootstrap_theme\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'numpydoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx_gallery.gen_gallery'\n]\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'mne_bids'\ntd = date.today()\ncopyright = u'%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'Mainak Jas'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = u'0.2.dev0'\n# The full version, including alpha/beta/rc tags.\nrelease = u'0.2.dev0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'bootstrap'\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'navbar_title': 'MNE-BIDS',\n 'bootswatch_theme': \"flatly\",\n 'navbar_sidebarrel': False,\n 'bootstrap_version': \"3\",\n 'navbar_links': [\n (\"Gallery\", \"auto_examples/index\"),\n (\"API\", \"api\"),\n (\"What's new\", \"whats_new\"),\n (\"Github\", \"https://github.com/mne-tools/mne-bids\", True),\n ]}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'mne_bidsdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'mne_bids.tex', u'mne\\\\_bids Documentation',\n u'Mainak Jas', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n author, 'mne_bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\nsphinx_gallery_conf = {\n 'examples_dirs': '../examples',\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'backreferences_dir': 'generated',\n 'reference_url': {\n 'mne': 'http://mne-tools.github.io/stable/',\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',\n 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'\n }\n}\n", "path": "doc/conf.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = \"\"\"Experimental code for BIDS using MNE.\"\"\"\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne-tools.github.io/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'http://github.com/mne-tools/mne-bids'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n scripts=['bin/mne_bids']\n )\n", "path": "setup.py"}, {"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = '0.2'\n\n\nfrom .write import (write_raw_bids, make_bids_folders, make_bids_basename, # noqa: E501 F401\n make_dataset_description) # noqa: F401\nfrom .read import read_raw_bids # noqa: F401\n", "path": "mne_bids/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# mne_bids documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 6 04:42:26 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom datetime import date\nimport sphinx_gallery # noqa\nimport sphinx_bootstrap_theme\n\nimport mne_bids\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'numpydoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx_gallery.gen_gallery'\n]\n\n# generate autosummary even if no references\nautosummary_generate = True\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'mne_bids'\ntd = date.today()\ncopyright = u'%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'bootstrap'\nhtml_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n 'navbar_title': 'MNE-BIDS',\n 'bootswatch_theme': \"flatly\",\n 'navbar_sidebarrel': False,\n 'bootstrap_version': \"3\",\n 'navbar_links': [\n (\"Gallery\", \"auto_examples/index\"),\n (\"API\", \"api\"),\n (\"What's new\", \"whats_new\"),\n (\"Github\", \"https://github.com/mne-tools/mne-bids\", True),\n ]}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'mne_bidsdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'mne_bids.tex', u'mne\\\\_bids Documentation',\n u'Mainak Jas', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'mne_bids', u'mne_bids Documentation',\n author, 'mne_bids', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\nsphinx_gallery_conf = {\n 'examples_dirs': '../examples',\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'backreferences_dir': 'generated',\n 'reference_url': {\n 'mne': 'http://mne-tools.github.io/stable/',\n 'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1',\n 'scipy': 'http://docs.scipy.org/doc/scipy-0.17.0/reference'\n }\n}\n", "path": "doc/conf.py"}]}
| 2,869 | 632 |
gh_patches_debug_3405
|
rasdani/github-patches
|
git_diff
|
microsoft__PubSec-Info-Assistant-170
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error: Diagnostic settings does not support retention for new diagnostic settings
After a couple of attempts to deploy the PubSec suite (deploy, delete, repeat switching from australia east to eastus), I began to encounter this error 'Diagnostic settings does not support retention for new diagnostic settings.' and the deployment would fail. With each attempt I had deleted all of the services created by the previous attempt, and changed the WORKSPACE="" to be unique.
I happened to come across this article:
https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/migrate-to-azure-storage-lifecycle-policy
After changing lines 138, 146 and 156 in the main.bicep file, setting the days value to 0 (instead of the default value of 30) the deployment completed successfully.
based on the information in the article, we'll need to update these setting after September when the deprecation comes into effect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/backend/app.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # Licensed under the MIT license.
3
4 import logging
5 import mimetypes
6 import os
7 import urllib.parse
8 from datetime import datetime, timedelta
9
10 import openai
11 from approaches.chatreadretrieveread import ChatReadRetrieveReadApproach
12 from azure.core.credentials import AzureKeyCredential
13 from azure.identity import DefaultAzureCredential
14 from azure.search.documents import SearchClient
15 from azure.storage.blob import (
16 AccountSasPermissions,
17 BlobServiceClient,
18 ResourceTypes,
19 generate_account_sas,
20 )
21 from flask import Flask, jsonify, request
22 from shared_code.status_log import State, StatusLog
23
24 # Replace these with your own values, either in environment variables or directly here
25 AZURE_BLOB_STORAGE_ACCOUNT = (
26 os.environ.get("AZURE_BLOB_STORAGE_ACCOUNT") or "mystorageaccount"
27 )
28 AZURE_BLOB_STORAGE_KEY = os.environ.get("AZURE_BLOB_STORAGE_KEY")
29 AZURE_BLOB_STORAGE_CONTAINER = (
30 os.environ.get("AZURE_BLOB_STORAGE_CONTAINER") or "content"
31 )
32 AZURE_SEARCH_SERVICE = os.environ.get("AZURE_SEARCH_SERVICE") or "gptkb"
33 AZURE_SEARCH_SERVICE_KEY = os.environ.get("AZURE_SEARCH_SERVICE_KEY")
34 AZURE_SEARCH_INDEX = os.environ.get("AZURE_SEARCH_INDEX") or "gptkbindex"
35 AZURE_OPENAI_SERVICE = os.environ.get("AZURE_OPENAI_SERVICE") or "myopenai"
36 AZURE_OPENAI_CHATGPT_DEPLOYMENT = (
37 os.environ.get("AZURE_OPENAI_CHATGPT_DEPLOYMENT") or "chat"
38 )
39 AZURE_OPENAI_SERVICE_KEY = os.environ.get("AZURE_OPENAI_SERVICE_KEY")
40
41 KB_FIELDS_CONTENT = os.environ.get("KB_FIELDS_CONTENT") or "merged_content"
42 KB_FIELDS_CATEGORY = os.environ.get("KB_FIELDS_CATEGORY") or "category"
43 KB_FIELDS_SOURCEPAGE = os.environ.get("KB_FIELDS_SOURCEPAGE") or "file_storage_path"
44
45 COSMOSDB_URL = os.environ.get("COSMOSDB_URL")
46 COSMODB_KEY = os.environ.get("COSMOSDB_KEY")
47 COSMOSDB_DATABASE_NAME = os.environ.get("COSMOSDB_DATABASE_NAME") or "statusdb"
48 COSMOSDB_CONTAINER_NAME = os.environ.get("COSMOSDB_CONTAINER_NAME") or "statuscontainer"
49
50 QUERY_TERM_LANGUAGE = os.environ.get("QUERY_TERM_LANGUAGE") or "English"
51
52 # Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed,
53 # just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the
54 # keys for each service
55 # If you encounter a blocking error during a DefaultAzureCredntial resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True)
56 azure_credential = DefaultAzureCredential()
57 azure_search_key_credential = AzureKeyCredential(AZURE_SEARCH_SERVICE_KEY)
58
59 # Used by the OpenAI SDK
60 openai.api_type = "azure"
61 openai.api_base = f"https://{AZURE_OPENAI_SERVICE}.openai.azure.com"
62 openai.api_version = "2023-06-01-preview"
63
64 # Setup StatusLog to allow access to CosmosDB for logging
65 statusLog = StatusLog(
66 COSMOSDB_URL, COSMODB_KEY, COSMOSDB_DATABASE_NAME, COSMOSDB_CONTAINER_NAME
67 )
68
69 # Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead
70 # openai.api_type = "azure_ad"
71 # openai_token = azure_credential.get_token("https://cognitiveservices.azure.com/.default")
72 openai.api_key = AZURE_OPENAI_SERVICE_KEY
73
74 # Set up clients for Cognitive Search and Storage
75 search_client = SearchClient(
76 endpoint=f"https://{AZURE_SEARCH_SERVICE}.search.windows.net",
77 index_name=AZURE_SEARCH_INDEX,
78 credential=azure_search_key_credential,
79 )
80 blob_client = BlobServiceClient(
81 account_url=f"https://{AZURE_BLOB_STORAGE_ACCOUNT}.blob.core.windows.net",
82 credential=AZURE_BLOB_STORAGE_KEY,
83 )
84 blob_container = blob_client.get_container_client(AZURE_BLOB_STORAGE_CONTAINER)
85
86
87 chat_approaches = {
88 "rrr": ChatReadRetrieveReadApproach(
89 search_client,
90 AZURE_OPENAI_SERVICE,
91 AZURE_OPENAI_SERVICE_KEY,
92 AZURE_OPENAI_CHATGPT_DEPLOYMENT,
93 KB_FIELDS_SOURCEPAGE,
94 KB_FIELDS_CONTENT,
95 blob_client,
96 QUERY_TERM_LANGUAGE,
97 )
98 }
99
100 app = Flask(__name__)
101
102
103 @app.route("/", defaults={"path": "index.html"})
104 @app.route("/<path:path>")
105 def static_file(path):
106 return app.send_static_file(path)
107
108
109 # Return blob path with SAS token for citation access
110 @app.route("/content/<path:path>")
111 def content_file(path):
112 blob = blob_container.get_blob_client(path).download_blob()
113 mime_type = blob.properties["content_settings"]["content_type"]
114 file_extension = blob.properties["name"].split(".")[-1:]
115 if mime_type == "application/octet-stream":
116 mime_type = mimetypes.guess_type(path)[0] or "application/octet-stream"
117 if mime_type == "text/plain" and file_extension[0] in ["htm", "html"]:
118 mime_type = "text/html"
119 print(
120 "Using mime type: "
121 + mime_type
122 + "for file with extension: "
123 + file_extension[0]
124 )
125 return (
126 blob.readall(),
127 200,
128 {
129 "Content-Type": mime_type,
130 "Content-Disposition": f"inline; filename={urllib.parse.quote(path, safe='')}",
131 },
132 )
133
134
135 @app.route("/chat", methods=["POST"])
136 def chat():
137 approach = request.json["approach"]
138 try:
139 impl = chat_approaches.get(approach)
140 if not impl:
141 return jsonify({"error": "unknown approach"}), 400
142 r = impl.run(request.json["history"], request.json.get("overrides") or {})
143
144 # return jsonify(r)
145 # To fix citation bug,below code is added.aparmar
146 return jsonify(
147 {
148 "data_points": r["data_points"],
149 "answer": r["answer"],
150 "thoughts": r["thoughts"],
151 "citation_lookup": r["citation_lookup"],
152 }
153 )
154
155 except Exception as e:
156 logging.exception("Exception in /chat")
157 return jsonify({"error": str(e)}), 500
158
159
160 @app.route("/getblobclienturl")
161 def get_blob_client_url():
162 sas_token = generate_account_sas(
163 AZURE_BLOB_STORAGE_ACCOUNT,
164 AZURE_BLOB_STORAGE_KEY,
165 resource_types=ResourceTypes(object=True, service=True, container=True),
166 permission=AccountSasPermissions(
167 read=True,
168 write=True,
169 list=True,
170 delete=False,
171 add=True,
172 create=True,
173 update=True,
174 process=False,
175 ),
176 expiry=datetime.utcnow() + timedelta(hours=1),
177 )
178 return jsonify({"url": f"{blob_client.url}?{sas_token}"})
179
180
181 if __name__ == "__main__":
182 app.run()
183
184
185 @app.route("/getalluploadstatus", methods=["POST"])
186 def get_all_upload_status():
187 timeframe = request.json["timeframe"]
188 state = request.json["state"]
189 try:
190 results = statusLog.read_files_status_by_timeframe(timeframe, State[state])
191 except Exception as e:
192 logging.exception("Exception in /getalluploadstatus")
193 return jsonify({"error": str(e)}), 500
194 return jsonify(results)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/backend/app.py b/app/backend/app.py
--- a/app/backend/app.py
+++ b/app/backend/app.py
@@ -192,3 +192,10 @@
logging.exception("Exception in /getalluploadstatus")
return jsonify({"error": str(e)}), 500
return jsonify(results)
+
+
+# Return AZURE_OPENAI_CHATGPT_DEPLOYMENT
[email protected]("/getInfoData")
+def get_info_data():
+ response = jsonify({"AZURE_OPENAI_CHATGPT_DEPLOYMENT": f"{AZURE_OPENAI_CHATGPT_DEPLOYMENT}"})
+ return response
\ No newline at end of file
|
{"golden_diff": "diff --git a/app/backend/app.py b/app/backend/app.py\n--- a/app/backend/app.py\n+++ b/app/backend/app.py\n@@ -192,3 +192,10 @@\n logging.exception(\"Exception in /getalluploadstatus\")\n return jsonify({\"error\": str(e)}), 500\n return jsonify(results)\n+\n+\n+# Return AZURE_OPENAI_CHATGPT_DEPLOYMENT\[email protected](\"/getInfoData\")\n+def get_info_data():\n+ response = jsonify({\"AZURE_OPENAI_CHATGPT_DEPLOYMENT\": f\"{AZURE_OPENAI_CHATGPT_DEPLOYMENT}\"})\n+ return response\n\\ No newline at end of file\n", "issue": "Error: Diagnostic settings does not support retention for new diagnostic settings\nAfter a couple of attempts to deploy the PubSec suite (deploy, delete, repeat switching from australia east to eastus), I began to encounter this error 'Diagnostic settings does not support retention for new diagnostic settings.' and the deployment would fail. With each attempt I had deleted all of the services created by the previous attempt, and changed the WORKSPACE=\"\" to be unique. \r\n\r\nI happened to come across this article:\r\nhttps://learn.microsoft.com/en-us/azure/azure-monitor/essentials/migrate-to-azure-storage-lifecycle-policy\r\n\r\nAfter changing lines 138, 146 and 156 in the main.bicep file, setting the days value to 0 (instead of the default value of 30) the deployment completed successfully. \r\n\r\nbased on the information in the article, we'll need to update these setting after September when the deprecation comes into effect.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nimport mimetypes\nimport os\nimport urllib.parse\nfrom datetime import datetime, timedelta\n\nimport openai\nfrom approaches.chatreadretrieveread import ChatReadRetrieveReadApproach\nfrom azure.core.credentials import AzureKeyCredential\nfrom azure.identity import DefaultAzureCredential\nfrom azure.search.documents import SearchClient\nfrom azure.storage.blob import (\n AccountSasPermissions,\n BlobServiceClient,\n ResourceTypes,\n generate_account_sas,\n)\nfrom flask import Flask, jsonify, request\nfrom shared_code.status_log import State, StatusLog\n\n# Replace these with your own values, either in environment variables or directly here\nAZURE_BLOB_STORAGE_ACCOUNT = (\n os.environ.get(\"AZURE_BLOB_STORAGE_ACCOUNT\") or \"mystorageaccount\"\n)\nAZURE_BLOB_STORAGE_KEY = os.environ.get(\"AZURE_BLOB_STORAGE_KEY\")\nAZURE_BLOB_STORAGE_CONTAINER = (\n os.environ.get(\"AZURE_BLOB_STORAGE_CONTAINER\") or \"content\"\n)\nAZURE_SEARCH_SERVICE = os.environ.get(\"AZURE_SEARCH_SERVICE\") or \"gptkb\"\nAZURE_SEARCH_SERVICE_KEY = os.environ.get(\"AZURE_SEARCH_SERVICE_KEY\")\nAZURE_SEARCH_INDEX = os.environ.get(\"AZURE_SEARCH_INDEX\") or \"gptkbindex\"\nAZURE_OPENAI_SERVICE = os.environ.get(\"AZURE_OPENAI_SERVICE\") or \"myopenai\"\nAZURE_OPENAI_CHATGPT_DEPLOYMENT = (\n os.environ.get(\"AZURE_OPENAI_CHATGPT_DEPLOYMENT\") or \"chat\"\n)\nAZURE_OPENAI_SERVICE_KEY = os.environ.get(\"AZURE_OPENAI_SERVICE_KEY\")\n\nKB_FIELDS_CONTENT = os.environ.get(\"KB_FIELDS_CONTENT\") or \"merged_content\"\nKB_FIELDS_CATEGORY = os.environ.get(\"KB_FIELDS_CATEGORY\") or \"category\"\nKB_FIELDS_SOURCEPAGE = os.environ.get(\"KB_FIELDS_SOURCEPAGE\") or \"file_storage_path\"\n\nCOSMOSDB_URL = os.environ.get(\"COSMOSDB_URL\")\nCOSMODB_KEY = os.environ.get(\"COSMOSDB_KEY\")\nCOSMOSDB_DATABASE_NAME = os.environ.get(\"COSMOSDB_DATABASE_NAME\") or \"statusdb\"\nCOSMOSDB_CONTAINER_NAME = os.environ.get(\"COSMOSDB_CONTAINER_NAME\") or \"statuscontainer\"\n\nQUERY_TERM_LANGUAGE = os.environ.get(\"QUERY_TERM_LANGUAGE\") or \"English\"\n\n# Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed,\n# just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the\n# keys for each service\n# If you encounter a blocking error during a DefaultAzureCredntial resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True)\nazure_credential = DefaultAzureCredential()\nazure_search_key_credential = AzureKeyCredential(AZURE_SEARCH_SERVICE_KEY)\n\n# Used by the OpenAI SDK\nopenai.api_type = \"azure\"\nopenai.api_base = f\"https://{AZURE_OPENAI_SERVICE}.openai.azure.com\"\nopenai.api_version = \"2023-06-01-preview\"\n\n# Setup StatusLog to allow access to CosmosDB for logging\nstatusLog = StatusLog(\n COSMOSDB_URL, COSMODB_KEY, COSMOSDB_DATABASE_NAME, COSMOSDB_CONTAINER_NAME\n)\n\n# Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead\n# openai.api_type = \"azure_ad\"\n# openai_token = azure_credential.get_token(\"https://cognitiveservices.azure.com/.default\")\nopenai.api_key = AZURE_OPENAI_SERVICE_KEY\n\n# Set up clients for Cognitive Search and Storage\nsearch_client = SearchClient(\n endpoint=f\"https://{AZURE_SEARCH_SERVICE}.search.windows.net\",\n index_name=AZURE_SEARCH_INDEX,\n credential=azure_search_key_credential,\n)\nblob_client = BlobServiceClient(\n account_url=f\"https://{AZURE_BLOB_STORAGE_ACCOUNT}.blob.core.windows.net\",\n credential=AZURE_BLOB_STORAGE_KEY,\n)\nblob_container = blob_client.get_container_client(AZURE_BLOB_STORAGE_CONTAINER)\n\n\nchat_approaches = {\n \"rrr\": ChatReadRetrieveReadApproach(\n search_client,\n AZURE_OPENAI_SERVICE,\n AZURE_OPENAI_SERVICE_KEY,\n AZURE_OPENAI_CHATGPT_DEPLOYMENT,\n KB_FIELDS_SOURCEPAGE,\n KB_FIELDS_CONTENT,\n blob_client,\n QUERY_TERM_LANGUAGE,\n )\n}\n\napp = Flask(__name__)\n\n\[email protected](\"/\", defaults={\"path\": \"index.html\"})\[email protected](\"/<path:path>\")\ndef static_file(path):\n return app.send_static_file(path)\n\n\n# Return blob path with SAS token for citation access\[email protected](\"/content/<path:path>\")\ndef content_file(path):\n blob = blob_container.get_blob_client(path).download_blob()\n mime_type = blob.properties[\"content_settings\"][\"content_type\"]\n file_extension = blob.properties[\"name\"].split(\".\")[-1:]\n if mime_type == \"application/octet-stream\":\n mime_type = mimetypes.guess_type(path)[0] or \"application/octet-stream\"\n if mime_type == \"text/plain\" and file_extension[0] in [\"htm\", \"html\"]:\n mime_type = \"text/html\"\n print(\n \"Using mime type: \"\n + mime_type\n + \"for file with extension: \"\n + file_extension[0]\n )\n return (\n blob.readall(),\n 200,\n {\n \"Content-Type\": mime_type,\n \"Content-Disposition\": f\"inline; filename={urllib.parse.quote(path, safe='')}\",\n },\n )\n\n\[email protected](\"/chat\", methods=[\"POST\"])\ndef chat():\n approach = request.json[\"approach\"]\n try:\n impl = chat_approaches.get(approach)\n if not impl:\n return jsonify({\"error\": \"unknown approach\"}), 400\n r = impl.run(request.json[\"history\"], request.json.get(\"overrides\") or {})\n\n # return jsonify(r)\n # To fix citation bug,below code is added.aparmar\n return jsonify(\n {\n \"data_points\": r[\"data_points\"],\n \"answer\": r[\"answer\"],\n \"thoughts\": r[\"thoughts\"],\n \"citation_lookup\": r[\"citation_lookup\"],\n }\n )\n\n except Exception as e:\n logging.exception(\"Exception in /chat\")\n return jsonify({\"error\": str(e)}), 500\n\n\[email protected](\"/getblobclienturl\")\ndef get_blob_client_url():\n sas_token = generate_account_sas(\n AZURE_BLOB_STORAGE_ACCOUNT,\n AZURE_BLOB_STORAGE_KEY,\n resource_types=ResourceTypes(object=True, service=True, container=True),\n permission=AccountSasPermissions(\n read=True,\n write=True,\n list=True,\n delete=False,\n add=True,\n create=True,\n update=True,\n process=False,\n ),\n expiry=datetime.utcnow() + timedelta(hours=1),\n )\n return jsonify({\"url\": f\"{blob_client.url}?{sas_token}\"})\n\n\nif __name__ == \"__main__\":\n app.run()\n\n\[email protected](\"/getalluploadstatus\", methods=[\"POST\"])\ndef get_all_upload_status():\n timeframe = request.json[\"timeframe\"]\n state = request.json[\"state\"]\n try:\n results = statusLog.read_files_status_by_timeframe(timeframe, State[state])\n except Exception as e:\n logging.exception(\"Exception in /getalluploadstatus\")\n return jsonify({\"error\": str(e)}), 500\n return jsonify(results)\n", "path": "app/backend/app.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nimport logging\nimport mimetypes\nimport os\nimport urllib.parse\nfrom datetime import datetime, timedelta\n\nimport openai\nfrom approaches.chatreadretrieveread import ChatReadRetrieveReadApproach\nfrom azure.core.credentials import AzureKeyCredential\nfrom azure.identity import DefaultAzureCredential\nfrom azure.search.documents import SearchClient\nfrom azure.storage.blob import (\n AccountSasPermissions,\n BlobServiceClient,\n ResourceTypes,\n generate_account_sas,\n)\nfrom flask import Flask, jsonify, request\nfrom shared_code.status_log import State, StatusLog\n\n# Replace these with your own values, either in environment variables or directly here\nAZURE_BLOB_STORAGE_ACCOUNT = (\n os.environ.get(\"AZURE_BLOB_STORAGE_ACCOUNT\") or \"mystorageaccount\"\n)\nAZURE_BLOB_STORAGE_KEY = os.environ.get(\"AZURE_BLOB_STORAGE_KEY\")\nAZURE_BLOB_STORAGE_CONTAINER = (\n os.environ.get(\"AZURE_BLOB_STORAGE_CONTAINER\") or \"content\"\n)\nAZURE_SEARCH_SERVICE = os.environ.get(\"AZURE_SEARCH_SERVICE\") or \"gptkb\"\nAZURE_SEARCH_SERVICE_KEY = os.environ.get(\"AZURE_SEARCH_SERVICE_KEY\")\nAZURE_SEARCH_INDEX = os.environ.get(\"AZURE_SEARCH_INDEX\") or \"gptkbindex\"\nAZURE_OPENAI_SERVICE = os.environ.get(\"AZURE_OPENAI_SERVICE\") or \"myopenai\"\nAZURE_OPENAI_CHATGPT_DEPLOYMENT = (\n os.environ.get(\"AZURE_OPENAI_CHATGPT_DEPLOYMENT\") or \"chat\"\n)\nAZURE_OPENAI_SERVICE_KEY = os.environ.get(\"AZURE_OPENAI_SERVICE_KEY\")\n\nKB_FIELDS_CONTENT = os.environ.get(\"KB_FIELDS_CONTENT\") or \"merged_content\"\nKB_FIELDS_CATEGORY = os.environ.get(\"KB_FIELDS_CATEGORY\") or \"category\"\nKB_FIELDS_SOURCEPAGE = os.environ.get(\"KB_FIELDS_SOURCEPAGE\") or \"file_storage_path\"\n\nCOSMOSDB_URL = os.environ.get(\"COSMOSDB_URL\")\nCOSMODB_KEY = os.environ.get(\"COSMOSDB_KEY\")\nCOSMOSDB_DATABASE_NAME = os.environ.get(\"COSMOSDB_DATABASE_NAME\") or \"statusdb\"\nCOSMOSDB_CONTAINER_NAME = os.environ.get(\"COSMOSDB_CONTAINER_NAME\") or \"statuscontainer\"\n\nQUERY_TERM_LANGUAGE = os.environ.get(\"QUERY_TERM_LANGUAGE\") or \"English\"\n\n# Use the current user identity to authenticate with Azure OpenAI, Cognitive Search and Blob Storage (no secrets needed,\n# just use 'az login' locally, and managed identity when deployed on Azure). If you need to use keys, use separate AzureKeyCredential instances with the\n# keys for each service\n# If you encounter a blocking error during a DefaultAzureCredntial resolution, you can exclude the problematic credential by using a parameter (ex. exclude_shared_token_cache_credential=True)\nazure_credential = DefaultAzureCredential()\nazure_search_key_credential = AzureKeyCredential(AZURE_SEARCH_SERVICE_KEY)\n\n# Used by the OpenAI SDK\nopenai.api_type = \"azure\"\nopenai.api_base = f\"https://{AZURE_OPENAI_SERVICE}.openai.azure.com\"\nopenai.api_version = \"2023-06-01-preview\"\n\n# Setup StatusLog to allow access to CosmosDB for logging\nstatusLog = StatusLog(\n COSMOSDB_URL, COSMODB_KEY, COSMOSDB_DATABASE_NAME, COSMOSDB_CONTAINER_NAME\n)\n\n# Comment these two lines out if using keys, set your API key in the OPENAI_API_KEY environment variable instead\n# openai.api_type = \"azure_ad\"\n# openai_token = azure_credential.get_token(\"https://cognitiveservices.azure.com/.default\")\nopenai.api_key = AZURE_OPENAI_SERVICE_KEY\n\n# Set up clients for Cognitive Search and Storage\nsearch_client = SearchClient(\n endpoint=f\"https://{AZURE_SEARCH_SERVICE}.search.windows.net\",\n index_name=AZURE_SEARCH_INDEX,\n credential=azure_search_key_credential,\n)\nblob_client = BlobServiceClient(\n account_url=f\"https://{AZURE_BLOB_STORAGE_ACCOUNT}.blob.core.windows.net\",\n credential=AZURE_BLOB_STORAGE_KEY,\n)\nblob_container = blob_client.get_container_client(AZURE_BLOB_STORAGE_CONTAINER)\n\n\nchat_approaches = {\n \"rrr\": ChatReadRetrieveReadApproach(\n search_client,\n AZURE_OPENAI_SERVICE,\n AZURE_OPENAI_SERVICE_KEY,\n AZURE_OPENAI_CHATGPT_DEPLOYMENT,\n KB_FIELDS_SOURCEPAGE,\n KB_FIELDS_CONTENT,\n blob_client,\n QUERY_TERM_LANGUAGE,\n )\n}\n\napp = Flask(__name__)\n\n\[email protected](\"/\", defaults={\"path\": \"index.html\"})\[email protected](\"/<path:path>\")\ndef static_file(path):\n return app.send_static_file(path)\n\n\n# Return blob path with SAS token for citation access\[email protected](\"/content/<path:path>\")\ndef content_file(path):\n blob = blob_container.get_blob_client(path).download_blob()\n mime_type = blob.properties[\"content_settings\"][\"content_type\"]\n file_extension = blob.properties[\"name\"].split(\".\")[-1:]\n if mime_type == \"application/octet-stream\":\n mime_type = mimetypes.guess_type(path)[0] or \"application/octet-stream\"\n if mime_type == \"text/plain\" and file_extension[0] in [\"htm\", \"html\"]:\n mime_type = \"text/html\"\n print(\n \"Using mime type: \"\n + mime_type\n + \"for file with extension: \"\n + file_extension[0]\n )\n return (\n blob.readall(),\n 200,\n {\n \"Content-Type\": mime_type,\n \"Content-Disposition\": f\"inline; filename={urllib.parse.quote(path, safe='')}\",\n },\n )\n\n\[email protected](\"/chat\", methods=[\"POST\"])\ndef chat():\n approach = request.json[\"approach\"]\n try:\n impl = chat_approaches.get(approach)\n if not impl:\n return jsonify({\"error\": \"unknown approach\"}), 400\n r = impl.run(request.json[\"history\"], request.json.get(\"overrides\") or {})\n\n # return jsonify(r)\n # To fix citation bug,below code is added.aparmar\n return jsonify(\n {\n \"data_points\": r[\"data_points\"],\n \"answer\": r[\"answer\"],\n \"thoughts\": r[\"thoughts\"],\n \"citation_lookup\": r[\"citation_lookup\"],\n }\n )\n\n except Exception as e:\n logging.exception(\"Exception in /chat\")\n return jsonify({\"error\": str(e)}), 500\n\n\[email protected](\"/getblobclienturl\")\ndef get_blob_client_url():\n sas_token = generate_account_sas(\n AZURE_BLOB_STORAGE_ACCOUNT,\n AZURE_BLOB_STORAGE_KEY,\n resource_types=ResourceTypes(object=True, service=True, container=True),\n permission=AccountSasPermissions(\n read=True,\n write=True,\n list=True,\n delete=False,\n add=True,\n create=True,\n update=True,\n process=False,\n ),\n expiry=datetime.utcnow() + timedelta(hours=1),\n )\n return jsonify({\"url\": f\"{blob_client.url}?{sas_token}\"})\n\n\nif __name__ == \"__main__\":\n app.run()\n\n\[email protected](\"/getalluploadstatus\", methods=[\"POST\"])\ndef get_all_upload_status():\n timeframe = request.json[\"timeframe\"]\n state = request.json[\"state\"]\n try:\n results = statusLog.read_files_status_by_timeframe(timeframe, State[state])\n except Exception as e:\n logging.exception(\"Exception in /getalluploadstatus\")\n return jsonify({\"error\": str(e)}), 500\n return jsonify(results)\n\n\n# Return AZURE_OPENAI_CHATGPT_DEPLOYMENT\[email protected](\"/getInfoData\")\ndef get_info_data():\n response = jsonify({\"AZURE_OPENAI_CHATGPT_DEPLOYMENT\": f\"{AZURE_OPENAI_CHATGPT_DEPLOYMENT}\"})\n return response", "path": "app/backend/app.py"}]}
| 2,572 | 149 |
gh_patches_debug_18799
|
rasdani/github-patches
|
git_diff
|
mindee__doctr-30
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[documents] Add basic document reader
For documents to be analyzed, we first need to add a utility for document reading (PDF mostly). The following specs would be nice to have:
- inherit for a shared reader class ("DocumentReader" for instance)
- to be located in the `doctr.documents.reader` module
The following formats should be handled:
- [x] PDF (#8, #25): this resource would be nice to check: https://github.com/pymupdf/PyMuPDF
- [x] PNG (#30)
- [x] JPG (#30)
cc @charlesmindee
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doctr/documents/reader.py`
Content:
```
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 import fitz
7 import numpy as np
8 import cv2
9 from typing import List, Tuple, Optional, Any
10
11 __all__ = ['read_pdf']
12
13
14 def read_pdf(file_path: str, **kwargs: Any) -> List[np.ndarray]:
15 """Read a PDF file and convert it into an image in numpy format
16
17 Example::
18 >>> from doctr.documents import read_pdf
19 >>> doc = read_pdf("path/to/your/doc.pdf")
20
21 Args:
22 file_path: the path to the PDF file
23 Returns:
24 the list of pages decoded as numpy ndarray of shape H x W x 3
25 """
26
27 # Read pages with fitz and convert them to numpy ndarrays
28 return [convert_page_to_numpy(page, **kwargs) for page in fitz.open(file_path)]
29
30
31 def convert_page_to_numpy(
32 page: fitz.fitz.Page,
33 output_size: Optional[Tuple[int, int]] = None,
34 rgb_output: bool = True,
35 ) -> np.ndarray:
36 """Convert a fitz page to a numpy-formatted image
37
38 Args:
39 page: the page of a file read with PyMuPDF
40 output_size: the expected output size of each page in format H x W
41 rgb_output: whether the output ndarray channel order should be RGB instead of BGR.
42
43 Returns:
44 the rendered image in numpy format
45 """
46
47 transform_matrix = None
48
49 # If no output size is specified, keep the origin one
50 if output_size is not None:
51 scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3])
52 transform_matrix = fitz.Matrix(*scales)
53
54 # Generate the pixel map using the transformation matrix
55 stream = page.getPixmap(matrix=transform_matrix).getImageData()
56 # Decode it into a numpy
57 img = cv2.imdecode(np.frombuffer(stream, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
58
59 # Switch the channel order
60 if rgb_output:
61 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
62
63 return img
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/doctr/documents/reader.py b/doctr/documents/reader.py
--- a/doctr/documents/reader.py
+++ b/doctr/documents/reader.py
@@ -8,7 +8,36 @@
import cv2
from typing import List, Tuple, Optional, Any
-__all__ = ['read_pdf']
+__all__ = ['read_pdf', 'read_img']
+
+
+def read_img(
+ file_path: str,
+ output_size: Optional[Tuple[int, int]] = None,
+ rgb_output: bool = True,
+) -> np.ndarray:
+ """Read an image file into numpy format
+
+ Example::
+ >>> from doctr.documents import read_img
+ >>> page = read_img("path/to/your/doc.jpg")
+
+ Args:
+ file_path: the path to the image file
+ output_size: the expected output size of each page in format H x W
+ rgb_output: whether the output ndarray channel order should be RGB instead of BGR.
+ Returns:
+ the page decoded as numpy ndarray of shape H x W x 3
+ """
+
+ img = cv2.imread(file_path, cv2.IMREAD_COLOR)
+ # Resizing
+ if isinstance(output_size, tuple):
+ img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)
+ # Switch the channel order
+ if rgb_output:
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+ return img
def read_pdf(file_path: str, **kwargs: Any) -> List[np.ndarray]:
|
{"golden_diff": "diff --git a/doctr/documents/reader.py b/doctr/documents/reader.py\n--- a/doctr/documents/reader.py\n+++ b/doctr/documents/reader.py\n@@ -8,7 +8,36 @@\n import cv2\n from typing import List, Tuple, Optional, Any\n \n-__all__ = ['read_pdf']\n+__all__ = ['read_pdf', 'read_img']\n+\n+\n+def read_img(\n+ file_path: str,\n+ output_size: Optional[Tuple[int, int]] = None,\n+ rgb_output: bool = True,\n+) -> np.ndarray:\n+ \"\"\"Read an image file into numpy format\n+\n+ Example::\n+ >>> from doctr.documents import read_img\n+ >>> page = read_img(\"path/to/your/doc.jpg\")\n+\n+ Args:\n+ file_path: the path to the image file\n+ output_size: the expected output size of each page in format H x W\n+ rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n+ Returns:\n+ the page decoded as numpy ndarray of shape H x W x 3\n+ \"\"\"\n+\n+ img = cv2.imread(file_path, cv2.IMREAD_COLOR)\n+ # Resizing\n+ if isinstance(output_size, tuple):\n+ img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)\n+ # Switch the channel order\n+ if rgb_output:\n+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n+ return img\n \n \n def read_pdf(file_path: str, **kwargs: Any) -> List[np.ndarray]:\n", "issue": "[documents] Add basic document reader\nFor documents to be analyzed, we first need to add a utility for document reading (PDF mostly). The following specs would be nice to have:\r\n- inherit for a shared reader class (\"DocumentReader\" for instance)\r\n- to be located in the `doctr.documents.reader` module\r\n\r\nThe following formats should be handled:\r\n- [x] PDF (#8, #25): this resource would be nice to check: https://github.com/pymupdf/PyMuPDF\r\n- [x] PNG (#30)\r\n- [x] JPG (#30)\r\n\r\n\r\ncc @charlesmindee \n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport fitz\nimport numpy as np\nimport cv2\nfrom typing import List, Tuple, Optional, Any\n\n__all__ = ['read_pdf']\n\n\ndef read_pdf(file_path: str, **kwargs: Any) -> List[np.ndarray]:\n \"\"\"Read a PDF file and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import read_pdf\n >>> doc = read_pdf(\"path/to/your/doc.pdf\")\n\n Args:\n file_path: the path to the PDF file\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n # Read pages with fitz and convert them to numpy ndarrays\n return [convert_page_to_numpy(page, **kwargs) for page in fitz.open(file_path)]\n\n\ndef convert_page_to_numpy(\n page: fitz.fitz.Page,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Convert a fitz page to a numpy-formatted image\n\n Args:\n page: the page of a file read with PyMuPDF\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n\n Returns:\n the rendered image in numpy format\n \"\"\"\n\n transform_matrix = None\n\n # If no output size is specified, keep the origin one\n if output_size is not None:\n scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3])\n transform_matrix = fitz.Matrix(*scales)\n\n # Generate the pixel map using the transformation matrix\n stream = page.getPixmap(matrix=transform_matrix).getImageData()\n # Decode it into a numpy\n img = cv2.imdecode(np.frombuffer(stream, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n", "path": "doctr/documents/reader.py"}], "after_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nimport fitz\nimport numpy as np\nimport cv2\nfrom typing import List, Tuple, Optional, Any\n\n__all__ = ['read_pdf', 'read_img']\n\n\ndef read_img(\n file_path: str,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Read an image file into numpy format\n\n Example::\n >>> from doctr.documents import read_img\n >>> page = read_img(\"path/to/your/doc.jpg\")\n\n Args:\n file_path: the path to the image file\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n Returns:\n the page decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n img = cv2.imread(file_path, cv2.IMREAD_COLOR)\n # Resizing\n if isinstance(output_size, tuple):\n img = cv2.resize(img, output_size[::-1], interpolation=cv2.INTER_LINEAR)\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\ndef read_pdf(file_path: str, **kwargs: Any) -> List[np.ndarray]:\n \"\"\"Read a PDF file and convert it into an image in numpy format\n\n Example::\n >>> from doctr.documents import read_pdf\n >>> doc = read_pdf(\"path/to/your/doc.pdf\")\n\n Args:\n file_path: the path to the PDF file\n Returns:\n the list of pages decoded as numpy ndarray of shape H x W x 3\n \"\"\"\n\n # Read pages with fitz and convert them to numpy ndarrays\n return [convert_page_to_numpy(page, **kwargs) for page in fitz.open(file_path)]\n\n\ndef convert_page_to_numpy(\n page: fitz.fitz.Page,\n output_size: Optional[Tuple[int, int]] = None,\n rgb_output: bool = True,\n) -> np.ndarray:\n \"\"\"Convert a fitz page to a numpy-formatted image\n\n Args:\n page: the page of a file read with PyMuPDF\n output_size: the expected output size of each page in format H x W\n rgb_output: whether the output ndarray channel order should be RGB instead of BGR.\n\n Returns:\n the rendered image in numpy format\n \"\"\"\n\n transform_matrix = None\n\n # If no output size is specified, keep the origin one\n if output_size is not None:\n scales = (output_size[1] / page.MediaBox[2], output_size[0] / page.MediaBox[3])\n transform_matrix = fitz.Matrix(*scales)\n\n # Generate the pixel map using the transformation matrix\n stream = page.getPixmap(matrix=transform_matrix).getImageData()\n # Decode it into a numpy\n img = cv2.imdecode(np.frombuffer(stream, dtype=np.uint8), cv2.IMREAD_UNCHANGED)\n\n # Switch the channel order\n if rgb_output:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n return img\n", "path": "doctr/documents/reader.py"}]}
| 1,021 | 352 |
gh_patches_debug_29138
|
rasdani/github-patches
|
git_diff
|
bentoml__BentoML-4136
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: Triton Integration failed with an AttributeError
### Describe the bug
I try to integrate Triton Inference Server into BentoML and I follow both the official documentation(https://docs.bentoml.org/en/latest/integrations/triton.html) and the following example(https://github.com/bentoml/BentoML/tree/main/examples/triton/onnx). However, I get an AttributeError with the following message:
" '_TritonRunner' object has no attribute 'onnx_fp_16'", where onnx_fp_16 is my model deployed in Triton.
The code in my service.py file is:
```
import bentoml
from bentoml.io import JSON
from src.tokenizer import RDATokenizer
from src.request import RDARequest
from src.response import RDAResponse
from src.config import load_config
from src.utils.model import load_topics_and_mapping, get_multi_label_binarizer, post_predict, load_taxonomy_root,\
load_parent_child_relationship
configs = load_config()
rda_triton_runner = bentoml.triton.Runner("rdav2",
model_repository="src/model_repository",
cli_args=["--model-control-mode=explicit",
"--load-model=onnx_fp_16",
"--log-verbose=1"])
svc = bentoml.Service("rdav2", runners=[rda_triton_runner])
tokenizer = RDATokenizer(tokenizer_path=configs.tokenizer_path)
labels_binarizer = get_multi_label_binarizer(config=configs)
topics, topics_mapping = load_topics_and_mapping(config=configs)
tax_root = load_taxonomy_root(configs.tax_root_path)
parent_child = load_parent_child_relationship(config=configs)
id_to_fos = {id_: fos for fos, id_ in topics_mapping.items()}
@svc.api(input=JSON(pydantic_model=RDARequest), output=JSON(pydantic_model=RDAResponse))
async def predict(rda_request: RDARequest) -> RDAResponse:
"""
The input will be the title, abstract, k and, threshold and the output will be the prediction of the model
:param rda_request: the request object
:return: a dictionary where the key is the metadata name and the value is the value of the metadata.
the metadata will be a list with the tags, the tags ids, the probabilities, the ancestros and the ancestors id
"""
title = rda_request.title
abstract = rda_request.abstract
k = rda_request.k
threshold = rda_request.threshold
text = title + ". " + abstract
encoded_text = await tokenizer.tokenize(text=text)
logits = await rda_triton_runner.onnx_fp_16.async_run(encoded_text)
logits = logits[0].squeeze()
response = await post_predict(logits=logits,
labels_binarizer=labels_binarizer,
k=k,
threshold=threshold,
topics=topics,
topics_mapping=topics_mapping,
tax_roots=tax_root,
parent_child=parent_child,
id_to_fos=id_to_fos)
return response
```
My _bentofile.yaml_ is the following:
```
service: "service:svc"
include:
- "*.py"
- "/model_repository"
- "/configs"
- "/checkpoints"
python:
requirements_txt: "requirements.txt"
docker:
base_image: nvcr.io/nvidia/tritonserver:22.12-py3
```
I tried to run the service with a container and the commands I typed to do so were the followings:
- bentoml build --version 0.0.2
- bentoml containerize rdav2:0.0.2
- docker run -it --rm -p 3000:3000 rdav2:0.0.2 serve --production
I guess there is something wrong in __rda_triton_runner__ object, because when I debug the service, I get an empty list in the __models__ parameter. I am not sure about that, and that's the reason I opened this issue.
### To reproduce
_No response_
### Expected behavior
_No response_
### Environment
bentoml[triton]: 1.0.32
python: 3.8
platform: Ubuntu: 20.10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/bentoml/triton.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 import typing as t
5 from functools import cached_property
6
7 import attr
8 from simple_di import Provide as _Provide
9 from simple_di import inject as _inject
10
11 from ._internal.configuration import get_debug_mode as _get_debug_mode
12 from ._internal.configuration.containers import BentoMLContainer as _BentoMLContainer
13 from ._internal.runner.runnable import RunnableMethodConfig as _RunnableMethodConfig
14 from ._internal.runner.runner import AbstractRunner as _AbstractRunner
15 from ._internal.runner.runner import RunnerMethod as _RunnerMethod
16 from ._internal.runner.runner import object_setattr as _object_setattr
17 from ._internal.runner.runner_handle import DummyRunnerHandle as _DummyRunnerHandle
18 from ._internal.runner.runner_handle.remote import TRITON_EXC_MSG as _TRITON_EXC_MSG
19 from ._internal.runner.runner_handle.remote import (
20 handle_triton_exception as _handle_triton_exception,
21 )
22 from ._internal.utils import LazyLoader as _LazyLoader
23
24 if t.TYPE_CHECKING:
25 import tritonclient.grpc.aio as _tritongrpcclient
26 import tritonclient.http.aio as _tritonhttpclient
27
28 from ._internal.runner.runner_handle import RunnerHandle
29
30 _P = t.ParamSpec("_P")
31
32 _LogFormat = t.Literal["default", "ISO8601"]
33 _GrpcInferResponseCompressionLevel = t.Literal["none", "low", "medium", "high"]
34 _TraceLevel = t.Literal["OFF", "TIMESTAMPS", "TENSORS"]
35 _RateLimit = t.Literal["execution_count", "off"]
36 _TritonServerType = t.Literal["grpc", "http"]
37
38 _ClientMethod = t.Literal[
39 "get_cuda_shared_memory_status",
40 "get_inference_statistics",
41 "get_log_settings",
42 "get_model_config",
43 "get_model_metadata",
44 "get_model_repository_index",
45 "get_server_metadata",
46 "get_system_shared_memory_status",
47 "get_trace_settings",
48 "infer",
49 "is_model_ready",
50 "is_server_live",
51 "is_server_ready",
52 "load_model",
53 "register_cuda_shared_memory",
54 "register_system_shared_memory",
55 "stream_infer",
56 "unload_model",
57 "unregister_cuda_shared_memory",
58 "unregister_system_shared_memory",
59 "update_log_settings",
60 "update_trace_settings",
61 ]
62 _ModelName = t.Annotated[str, t.LiteralString]
63
64 else:
65 _P = t.TypeVar("_P")
66
67 _LogFormat = _GrpcInferResponseCompressionLevel = _TraceLevel = _RateLimit = str
68
69 _tritongrpcclient = _LazyLoader(
70 "_tritongrpcclient", globals(), "tritonclient.grpc.aio", exc_msg=_TRITON_EXC_MSG
71 )
72 _tritonhttpclient = _LazyLoader(
73 "_tritonhttpclient", globals(), "tritonclient.http.aio", exc_msg=_TRITON_EXC_MSG
74 )
75
76 _logger = logging.getLogger(__name__)
77
78 __all__ = ["Runner"]
79
80
81 @attr.define(slots=False, frozen=True, eq=False)
82 class _TritonRunner(_AbstractRunner):
83 repository_path: str
84
85 tritonserver_type: _TritonServerType = attr.field(
86 default="grpc", validator=attr.validators.in_(["grpc", "http"])
87 )
88 cli_args: list[str] = attr.field(factory=list)
89
90 _runner_handle: RunnerHandle = attr.field(init=False, factory=_DummyRunnerHandle)
91
92 @_inject
93 async def runner_handle_is_ready(
94 self,
95 timeout: int = _Provide[
96 _BentoMLContainer.api_server_config.runner_probe.timeout
97 ],
98 ) -> bool:
99 """
100 Check if given runner handle is ready. This will be used as readiness probe in Kubernetes.
101 """
102 return await self._runner_handle.is_ready(timeout)
103
104 def __init__(
105 self,
106 name: str,
107 model_repository: str,
108 tritonserver_type: _TritonServerType = "grpc",
109 cli_args: list[str] | None = None,
110 ):
111 if cli_args is None:
112 cli_args = []
113
114 cli_args.append(f"--model-repository={model_repository}")
115
116 if tritonserver_type == "http":
117 cli_args.extend(
118 [
119 "--allow-grpc=False",
120 "--http-address=127.0.0.1",
121 ]
122 )
123 elif tritonserver_type == "grpc":
124 cli_args.extend(
125 [
126 "--reuse-grpc-port=1",
127 "--allow-http=False",
128 "--grpc-address=0.0.0.0",
129 ]
130 )
131
132 # default settings, disable metrics
133 cli_args.extend([f"--log-verbose={1 if _get_debug_mode() else 0}"])
134
135 if not all(s.startswith("--") for s in cli_args):
136 raise ValueError(
137 "cli_args should be a list of strings starting with '--' for TritonRunner."
138 )
139
140 self.__attrs_init__(
141 name=name,
142 models=None,
143 resource_config=None,
144 runnable_class=self.__class__,
145 repository_path=model_repository,
146 tritonserver_type=tritonserver_type,
147 cli_args=cli_args,
148 embedded=False, # NOTE: TritonRunner shouldn't be used as embedded.
149 )
150
151 @cached_property
152 def protocol_address(self):
153 from ._internal.utils import reserve_free_port
154
155 if self.tritonserver_type == "http":
156 with reserve_free_port(host="127.0.0.1") as port:
157 pass
158 return f"127.0.0.1:{port}"
159 elif self.tritonserver_type == "grpc":
160 with reserve_free_port(host="0.0.0.0", enable_so_reuseport=True) as port:
161 pass
162 return f"0.0.0.0:{port}"
163 else:
164 raise ValueError(f"Invalid Triton Server type: {self.tritonserver_type}")
165
166 def init_local(self, quiet: bool = False) -> None:
167 _logger.warning(
168 "TritonRunner '%s' will not be available for development mode.", self.name
169 )
170
171 def init_client(
172 self,
173 handle_class: type[RunnerHandle] | None = None,
174 *args: t.Any,
175 **kwargs: t.Any,
176 ):
177 from ._internal.runner.runner_handle.remote import TritonRunnerHandle
178
179 if handle_class is None:
180 handle_class = TritonRunnerHandle
181
182 super().init_client(handle_class=handle_class, *args, **kwargs)
183
184 def destroy(self):
185 _object_setattr(self, "_runner_handle", _DummyRunnerHandle())
186
187 # Even though the below overload overlaps, it is ok to ignore the warning since types
188 # for TritonRunner can handle both function from client and LiteralString from model name.
189 @t.overload
190 def __getattr__(self, item: t.Literal["__attrs_init__"]) -> t.Callable[..., None]: # type: ignore (overload warning)
191 ...
192
193 @t.overload
194 def __getattr__(
195 self, item: _ClientMethod
196 ) -> t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]]:
197 ...
198
199 @t.overload
200 def __getattr__(
201 self, item: _ModelName
202 ) -> _RunnerMethod[
203 t.Any, _P, _tritongrpcclient.InferResult | _tritonhttpclient.InferResult
204 ]:
205 ...
206
207 def __getattr__(self, item: str) -> t.Any:
208 from ._internal.runner.runner_handle.remote import TritonRunnerHandle
209
210 if isinstance(self._runner_handle, TritonRunnerHandle):
211 if item in self._runner_handle.client_methods:
212 # NOTE: auto wrap triton methods to its respective clients
213 if self.tritonserver_type == "grpc":
214 return _handle_triton_exception(
215 getattr(self._runner_handle.grpc_client, item)
216 )
217 else:
218 return _handle_triton_exception(
219 getattr(self._runner_handle.http_client, item)
220 )
221 else:
222 # if given item is not a client method, then we assume it is a model name.
223 # Hence, we will return a RunnerMethod that will be responsible for this model handle.
224 RT = (
225 _tritonhttpclient.InferResult
226 if self.tritonserver_type == "http"
227 else _tritongrpcclient.InferResult
228 )
229 return _RunnerMethod[t.Any, _P, RT](
230 runner=self,
231 name=item,
232 config=_RunnableMethodConfig(batchable=True, batch_dim=(0, 0)),
233 max_batch_size=0,
234 max_latency_ms=10000,
235 )
236
237 return super().__getattribute__(item)
238
239
240 Runner = _TritonRunner
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/bentoml/triton.py b/src/bentoml/triton.py
--- a/src/bentoml/triton.py
+++ b/src/bentoml/triton.py
@@ -20,6 +20,7 @@
handle_triton_exception as _handle_triton_exception,
)
from ._internal.utils import LazyLoader as _LazyLoader
+from .exceptions import StateException as _StateException
if t.TYPE_CHECKING:
import tritonclient.grpc.aio as _tritongrpcclient
@@ -168,18 +169,27 @@
"TritonRunner '%s' will not be available for development mode.", self.name
)
+ def _set_handle(
+ self, handle_class: type[RunnerHandle], *args: t.Any, **kwargs: t.Any
+ ) -> None:
+ if not isinstance(self._runner_handle, _DummyRunnerHandle):
+ raise _StateException("Runner already initialized")
+
+ runner_handle = handle_class(self, *args, **kwargs)
+ _object_setattr(self, "_runner_handle", runner_handle)
+
def init_client(
self,
handle_class: type[RunnerHandle] | None = None,
*args: t.Any,
**kwargs: t.Any,
):
- from ._internal.runner.runner_handle.remote import TritonRunnerHandle
-
if handle_class is None:
- handle_class = TritonRunnerHandle
+ from ._internal.runner.runner_handle.remote import TritonRunnerHandle
- super().init_client(handle_class=handle_class, *args, **kwargs)
+ self._set_handle(TritonRunnerHandle)
+ else:
+ self._set_handle(handle_class, *args, **kwargs)
def destroy(self):
_object_setattr(self, "_runner_handle", _DummyRunnerHandle())
|
{"golden_diff": "diff --git a/src/bentoml/triton.py b/src/bentoml/triton.py\n--- a/src/bentoml/triton.py\n+++ b/src/bentoml/triton.py\n@@ -20,6 +20,7 @@\n handle_triton_exception as _handle_triton_exception,\n )\n from ._internal.utils import LazyLoader as _LazyLoader\n+from .exceptions import StateException as _StateException\n \n if t.TYPE_CHECKING:\n import tritonclient.grpc.aio as _tritongrpcclient\n@@ -168,18 +169,27 @@\n \"TritonRunner '%s' will not be available for development mode.\", self.name\n )\n \n+ def _set_handle(\n+ self, handle_class: type[RunnerHandle], *args: t.Any, **kwargs: t.Any\n+ ) -> None:\n+ if not isinstance(self._runner_handle, _DummyRunnerHandle):\n+ raise _StateException(\"Runner already initialized\")\n+\n+ runner_handle = handle_class(self, *args, **kwargs)\n+ _object_setattr(self, \"_runner_handle\", runner_handle)\n+\n def init_client(\n self,\n handle_class: type[RunnerHandle] | None = None,\n *args: t.Any,\n **kwargs: t.Any,\n ):\n- from ._internal.runner.runner_handle.remote import TritonRunnerHandle\n-\n if handle_class is None:\n- handle_class = TritonRunnerHandle\n+ from ._internal.runner.runner_handle.remote import TritonRunnerHandle\n \n- super().init_client(handle_class=handle_class, *args, **kwargs)\n+ self._set_handle(TritonRunnerHandle)\n+ else:\n+ self._set_handle(handle_class, *args, **kwargs)\n \n def destroy(self):\n _object_setattr(self, \"_runner_handle\", _DummyRunnerHandle())\n", "issue": "bug: Triton Integration failed with an AttributeError\n### Describe the bug\n\nI try to integrate Triton Inference Server into BentoML and I follow both the official documentation(https://docs.bentoml.org/en/latest/integrations/triton.html) and the following example(https://github.com/bentoml/BentoML/tree/main/examples/triton/onnx). However, I get an AttributeError with the following message:\r\n\" '_TritonRunner' object has no attribute 'onnx_fp_16'\", where onnx_fp_16 is my model deployed in Triton.\r\nThe code in my service.py file is:\r\n```\r\nimport bentoml\r\nfrom bentoml.io import JSON\r\nfrom src.tokenizer import RDATokenizer\r\nfrom src.request import RDARequest\r\nfrom src.response import RDAResponse\r\nfrom src.config import load_config\r\nfrom src.utils.model import load_topics_and_mapping, get_multi_label_binarizer, post_predict, load_taxonomy_root,\\\r\n load_parent_child_relationship\r\n\r\nconfigs = load_config()\r\nrda_triton_runner = bentoml.triton.Runner(\"rdav2\",\r\n model_repository=\"src/model_repository\",\r\n cli_args=[\"--model-control-mode=explicit\",\r\n \"--load-model=onnx_fp_16\",\r\n \"--log-verbose=1\"])\r\n\r\nsvc = bentoml.Service(\"rdav2\", runners=[rda_triton_runner])\r\ntokenizer = RDATokenizer(tokenizer_path=configs.tokenizer_path)\r\nlabels_binarizer = get_multi_label_binarizer(config=configs)\r\ntopics, topics_mapping = load_topics_and_mapping(config=configs)\r\ntax_root = load_taxonomy_root(configs.tax_root_path)\r\nparent_child = load_parent_child_relationship(config=configs)\r\nid_to_fos = {id_: fos for fos, id_ in topics_mapping.items()}\r\n\r\n\r\[email protected](input=JSON(pydantic_model=RDARequest), output=JSON(pydantic_model=RDAResponse))\r\nasync def predict(rda_request: RDARequest) -> RDAResponse:\r\n \"\"\"\r\n The input will be the title, abstract, k and, threshold and the output will be the prediction of the model\r\n :param rda_request: the request object\r\n :return: a dictionary where the key is the metadata name and the value is the value of the metadata.\r\n the metadata will be a list with the tags, the tags ids, the probabilities, the ancestros and the ancestors id\r\n \"\"\"\r\n title = rda_request.title\r\n abstract = rda_request.abstract\r\n k = rda_request.k\r\n threshold = rda_request.threshold\r\n text = title + \". \" + abstract\r\n encoded_text = await tokenizer.tokenize(text=text)\r\n logits = await rda_triton_runner.onnx_fp_16.async_run(encoded_text)\r\n logits = logits[0].squeeze()\r\n response = await post_predict(logits=logits,\r\n labels_binarizer=labels_binarizer,\r\n k=k,\r\n threshold=threshold,\r\n topics=topics,\r\n topics_mapping=topics_mapping,\r\n tax_roots=tax_root,\r\n parent_child=parent_child,\r\n id_to_fos=id_to_fos)\r\n return response\r\n```\r\nMy _bentofile.yaml_ is the following:\r\n```\r\nservice: \"service:svc\"\r\ninclude:\r\n- \"*.py\"\r\n- \"/model_repository\"\r\n- \"/configs\"\r\n- \"/checkpoints\"\r\npython:\r\n requirements_txt: \"requirements.txt\"\r\ndocker:\r\n base_image: nvcr.io/nvidia/tritonserver:22.12-py3\r\n```\r\nI tried to run the service with a container and the commands I typed to do so were the followings:\r\n- bentoml build --version 0.0.2\r\n- bentoml containerize rdav2:0.0.2\r\n- docker run -it --rm -p 3000:3000 rdav2:0.0.2 serve --production\r\n\r\nI guess there is something wrong in __rda_triton_runner__ object, because when I debug the service, I get an empty list in the __models__ parameter. I am not sure about that, and that's the reason I opened this issue.\n\n### To reproduce\n\n_No response_\n\n### Expected behavior\n\n_No response_\n\n### Environment\n\nbentoml[triton]: 1.0.32\r\npython: 3.8\r\nplatform: Ubuntu: 20.10\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport typing as t\nfrom functools import cached_property\n\nimport attr\nfrom simple_di import Provide as _Provide\nfrom simple_di import inject as _inject\n\nfrom ._internal.configuration import get_debug_mode as _get_debug_mode\nfrom ._internal.configuration.containers import BentoMLContainer as _BentoMLContainer\nfrom ._internal.runner.runnable import RunnableMethodConfig as _RunnableMethodConfig\nfrom ._internal.runner.runner import AbstractRunner as _AbstractRunner\nfrom ._internal.runner.runner import RunnerMethod as _RunnerMethod\nfrom ._internal.runner.runner import object_setattr as _object_setattr\nfrom ._internal.runner.runner_handle import DummyRunnerHandle as _DummyRunnerHandle\nfrom ._internal.runner.runner_handle.remote import TRITON_EXC_MSG as _TRITON_EXC_MSG\nfrom ._internal.runner.runner_handle.remote import (\n handle_triton_exception as _handle_triton_exception,\n)\nfrom ._internal.utils import LazyLoader as _LazyLoader\n\nif t.TYPE_CHECKING:\n import tritonclient.grpc.aio as _tritongrpcclient\n import tritonclient.http.aio as _tritonhttpclient\n\n from ._internal.runner.runner_handle import RunnerHandle\n\n _P = t.ParamSpec(\"_P\")\n\n _LogFormat = t.Literal[\"default\", \"ISO8601\"]\n _GrpcInferResponseCompressionLevel = t.Literal[\"none\", \"low\", \"medium\", \"high\"]\n _TraceLevel = t.Literal[\"OFF\", \"TIMESTAMPS\", \"TENSORS\"]\n _RateLimit = t.Literal[\"execution_count\", \"off\"]\n _TritonServerType = t.Literal[\"grpc\", \"http\"]\n\n _ClientMethod = t.Literal[\n \"get_cuda_shared_memory_status\",\n \"get_inference_statistics\",\n \"get_log_settings\",\n \"get_model_config\",\n \"get_model_metadata\",\n \"get_model_repository_index\",\n \"get_server_metadata\",\n \"get_system_shared_memory_status\",\n \"get_trace_settings\",\n \"infer\",\n \"is_model_ready\",\n \"is_server_live\",\n \"is_server_ready\",\n \"load_model\",\n \"register_cuda_shared_memory\",\n \"register_system_shared_memory\",\n \"stream_infer\",\n \"unload_model\",\n \"unregister_cuda_shared_memory\",\n \"unregister_system_shared_memory\",\n \"update_log_settings\",\n \"update_trace_settings\",\n ]\n _ModelName = t.Annotated[str, t.LiteralString]\n\nelse:\n _P = t.TypeVar(\"_P\")\n\n _LogFormat = _GrpcInferResponseCompressionLevel = _TraceLevel = _RateLimit = str\n\n _tritongrpcclient = _LazyLoader(\n \"_tritongrpcclient\", globals(), \"tritonclient.grpc.aio\", exc_msg=_TRITON_EXC_MSG\n )\n _tritonhttpclient = _LazyLoader(\n \"_tritonhttpclient\", globals(), \"tritonclient.http.aio\", exc_msg=_TRITON_EXC_MSG\n )\n\n_logger = logging.getLogger(__name__)\n\n__all__ = [\"Runner\"]\n\n\[email protected](slots=False, frozen=True, eq=False)\nclass _TritonRunner(_AbstractRunner):\n repository_path: str\n\n tritonserver_type: _TritonServerType = attr.field(\n default=\"grpc\", validator=attr.validators.in_([\"grpc\", \"http\"])\n )\n cli_args: list[str] = attr.field(factory=list)\n\n _runner_handle: RunnerHandle = attr.field(init=False, factory=_DummyRunnerHandle)\n\n @_inject\n async def runner_handle_is_ready(\n self,\n timeout: int = _Provide[\n _BentoMLContainer.api_server_config.runner_probe.timeout\n ],\n ) -> bool:\n \"\"\"\n Check if given runner handle is ready. This will be used as readiness probe in Kubernetes.\n \"\"\"\n return await self._runner_handle.is_ready(timeout)\n\n def __init__(\n self,\n name: str,\n model_repository: str,\n tritonserver_type: _TritonServerType = \"grpc\",\n cli_args: list[str] | None = None,\n ):\n if cli_args is None:\n cli_args = []\n\n cli_args.append(f\"--model-repository={model_repository}\")\n\n if tritonserver_type == \"http\":\n cli_args.extend(\n [\n \"--allow-grpc=False\",\n \"--http-address=127.0.0.1\",\n ]\n )\n elif tritonserver_type == \"grpc\":\n cli_args.extend(\n [\n \"--reuse-grpc-port=1\",\n \"--allow-http=False\",\n \"--grpc-address=0.0.0.0\",\n ]\n )\n\n # default settings, disable metrics\n cli_args.extend([f\"--log-verbose={1 if _get_debug_mode() else 0}\"])\n\n if not all(s.startswith(\"--\") for s in cli_args):\n raise ValueError(\n \"cli_args should be a list of strings starting with '--' for TritonRunner.\"\n )\n\n self.__attrs_init__(\n name=name,\n models=None,\n resource_config=None,\n runnable_class=self.__class__,\n repository_path=model_repository,\n tritonserver_type=tritonserver_type,\n cli_args=cli_args,\n embedded=False, # NOTE: TritonRunner shouldn't be used as embedded.\n )\n\n @cached_property\n def protocol_address(self):\n from ._internal.utils import reserve_free_port\n\n if self.tritonserver_type == \"http\":\n with reserve_free_port(host=\"127.0.0.1\") as port:\n pass\n return f\"127.0.0.1:{port}\"\n elif self.tritonserver_type == \"grpc\":\n with reserve_free_port(host=\"0.0.0.0\", enable_so_reuseport=True) as port:\n pass\n return f\"0.0.0.0:{port}\"\n else:\n raise ValueError(f\"Invalid Triton Server type: {self.tritonserver_type}\")\n\n def init_local(self, quiet: bool = False) -> None:\n _logger.warning(\n \"TritonRunner '%s' will not be available for development mode.\", self.name\n )\n\n def init_client(\n self,\n handle_class: type[RunnerHandle] | None = None,\n *args: t.Any,\n **kwargs: t.Any,\n ):\n from ._internal.runner.runner_handle.remote import TritonRunnerHandle\n\n if handle_class is None:\n handle_class = TritonRunnerHandle\n\n super().init_client(handle_class=handle_class, *args, **kwargs)\n\n def destroy(self):\n _object_setattr(self, \"_runner_handle\", _DummyRunnerHandle())\n\n # Even though the below overload overlaps, it is ok to ignore the warning since types\n # for TritonRunner can handle both function from client and LiteralString from model name.\n @t.overload\n def __getattr__(self, item: t.Literal[\"__attrs_init__\"]) -> t.Callable[..., None]: # type: ignore (overload warning)\n ...\n\n @t.overload\n def __getattr__(\n self, item: _ClientMethod\n ) -> t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]]:\n ...\n\n @t.overload\n def __getattr__(\n self, item: _ModelName\n ) -> _RunnerMethod[\n t.Any, _P, _tritongrpcclient.InferResult | _tritonhttpclient.InferResult\n ]:\n ...\n\n def __getattr__(self, item: str) -> t.Any:\n from ._internal.runner.runner_handle.remote import TritonRunnerHandle\n\n if isinstance(self._runner_handle, TritonRunnerHandle):\n if item in self._runner_handle.client_methods:\n # NOTE: auto wrap triton methods to its respective clients\n if self.tritonserver_type == \"grpc\":\n return _handle_triton_exception(\n getattr(self._runner_handle.grpc_client, item)\n )\n else:\n return _handle_triton_exception(\n getattr(self._runner_handle.http_client, item)\n )\n else:\n # if given item is not a client method, then we assume it is a model name.\n # Hence, we will return a RunnerMethod that will be responsible for this model handle.\n RT = (\n _tritonhttpclient.InferResult\n if self.tritonserver_type == \"http\"\n else _tritongrpcclient.InferResult\n )\n return _RunnerMethod[t.Any, _P, RT](\n runner=self,\n name=item,\n config=_RunnableMethodConfig(batchable=True, batch_dim=(0, 0)),\n max_batch_size=0,\n max_latency_ms=10000,\n )\n\n return super().__getattribute__(item)\n\n\nRunner = _TritonRunner\n", "path": "src/bentoml/triton.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport typing as t\nfrom functools import cached_property\n\nimport attr\nfrom simple_di import Provide as _Provide\nfrom simple_di import inject as _inject\n\nfrom ._internal.configuration import get_debug_mode as _get_debug_mode\nfrom ._internal.configuration.containers import BentoMLContainer as _BentoMLContainer\nfrom ._internal.runner.runnable import RunnableMethodConfig as _RunnableMethodConfig\nfrom ._internal.runner.runner import AbstractRunner as _AbstractRunner\nfrom ._internal.runner.runner import RunnerMethod as _RunnerMethod\nfrom ._internal.runner.runner import object_setattr as _object_setattr\nfrom ._internal.runner.runner_handle import DummyRunnerHandle as _DummyRunnerHandle\nfrom ._internal.runner.runner_handle.remote import TRITON_EXC_MSG as _TRITON_EXC_MSG\nfrom ._internal.runner.runner_handle.remote import (\n handle_triton_exception as _handle_triton_exception,\n)\nfrom ._internal.utils import LazyLoader as _LazyLoader\nfrom .exceptions import StateException as _StateException\n\nif t.TYPE_CHECKING:\n import tritonclient.grpc.aio as _tritongrpcclient\n import tritonclient.http.aio as _tritonhttpclient\n\n from ._internal.runner.runner_handle import RunnerHandle\n\n _P = t.ParamSpec(\"_P\")\n\n _LogFormat = t.Literal[\"default\", \"ISO8601\"]\n _GrpcInferResponseCompressionLevel = t.Literal[\"none\", \"low\", \"medium\", \"high\"]\n _TraceLevel = t.Literal[\"OFF\", \"TIMESTAMPS\", \"TENSORS\"]\n _RateLimit = t.Literal[\"execution_count\", \"off\"]\n _TritonServerType = t.Literal[\"grpc\", \"http\"]\n\n _ClientMethod = t.Literal[\n \"get_cuda_shared_memory_status\",\n \"get_inference_statistics\",\n \"get_log_settings\",\n \"get_model_config\",\n \"get_model_metadata\",\n \"get_model_repository_index\",\n \"get_server_metadata\",\n \"get_system_shared_memory_status\",\n \"get_trace_settings\",\n \"infer\",\n \"is_model_ready\",\n \"is_server_live\",\n \"is_server_ready\",\n \"load_model\",\n \"register_cuda_shared_memory\",\n \"register_system_shared_memory\",\n \"stream_infer\",\n \"unload_model\",\n \"unregister_cuda_shared_memory\",\n \"unregister_system_shared_memory\",\n \"update_log_settings\",\n \"update_trace_settings\",\n ]\n _ModelName = t.Annotated[str, t.LiteralString]\n\nelse:\n _P = t.TypeVar(\"_P\")\n\n _LogFormat = _GrpcInferResponseCompressionLevel = _TraceLevel = _RateLimit = str\n\n _tritongrpcclient = _LazyLoader(\n \"_tritongrpcclient\", globals(), \"tritonclient.grpc.aio\", exc_msg=_TRITON_EXC_MSG\n )\n _tritonhttpclient = _LazyLoader(\n \"_tritonhttpclient\", globals(), \"tritonclient.http.aio\", exc_msg=_TRITON_EXC_MSG\n )\n\n_logger = logging.getLogger(__name__)\n\n__all__ = [\"Runner\"]\n\n\[email protected](slots=False, frozen=True, eq=False)\nclass _TritonRunner(_AbstractRunner):\n repository_path: str\n\n tritonserver_type: _TritonServerType = attr.field(\n default=\"grpc\", validator=attr.validators.in_([\"grpc\", \"http\"])\n )\n cli_args: list[str] = attr.field(factory=list)\n\n _runner_handle: RunnerHandle = attr.field(init=False, factory=_DummyRunnerHandle)\n\n @_inject\n async def runner_handle_is_ready(\n self,\n timeout: int = _Provide[\n _BentoMLContainer.api_server_config.runner_probe.timeout\n ],\n ) -> bool:\n \"\"\"\n Check if given runner handle is ready. This will be used as readiness probe in Kubernetes.\n \"\"\"\n return await self._runner_handle.is_ready(timeout)\n\n def __init__(\n self,\n name: str,\n model_repository: str,\n tritonserver_type: _TritonServerType = \"grpc\",\n cli_args: list[str] | None = None,\n ):\n if cli_args is None:\n cli_args = []\n\n cli_args.append(f\"--model-repository={model_repository}\")\n\n if tritonserver_type == \"http\":\n cli_args.extend(\n [\n \"--allow-grpc=False\",\n \"--http-address=127.0.0.1\",\n ]\n )\n elif tritonserver_type == \"grpc\":\n cli_args.extend(\n [\n \"--reuse-grpc-port=1\",\n \"--allow-http=False\",\n \"--grpc-address=0.0.0.0\",\n ]\n )\n\n # default settings, disable metrics\n cli_args.extend([f\"--log-verbose={1 if _get_debug_mode() else 0}\"])\n\n if not all(s.startswith(\"--\") for s in cli_args):\n raise ValueError(\n \"cli_args should be a list of strings starting with '--' for TritonRunner.\"\n )\n\n self.__attrs_init__(\n name=name,\n models=None,\n resource_config=None,\n runnable_class=self.__class__,\n repository_path=model_repository,\n tritonserver_type=tritonserver_type,\n cli_args=cli_args,\n embedded=False, # NOTE: TritonRunner shouldn't be used as embedded.\n )\n\n @cached_property\n def protocol_address(self):\n from ._internal.utils import reserve_free_port\n\n if self.tritonserver_type == \"http\":\n with reserve_free_port(host=\"127.0.0.1\") as port:\n pass\n return f\"127.0.0.1:{port}\"\n elif self.tritonserver_type == \"grpc\":\n with reserve_free_port(host=\"0.0.0.0\", enable_so_reuseport=True) as port:\n pass\n return f\"0.0.0.0:{port}\"\n else:\n raise ValueError(f\"Invalid Triton Server type: {self.tritonserver_type}\")\n\n def init_local(self, quiet: bool = False) -> None:\n _logger.warning(\n \"TritonRunner '%s' will not be available for development mode.\", self.name\n )\n\n def _set_handle(\n self, handle_class: type[RunnerHandle], *args: t.Any, **kwargs: t.Any\n ) -> None:\n if not isinstance(self._runner_handle, _DummyRunnerHandle):\n raise _StateException(\"Runner already initialized\")\n\n runner_handle = handle_class(self, *args, **kwargs)\n _object_setattr(self, \"_runner_handle\", runner_handle)\n\n def init_client(\n self,\n handle_class: type[RunnerHandle] | None = None,\n *args: t.Any,\n **kwargs: t.Any,\n ):\n if handle_class is None:\n from ._internal.runner.runner_handle.remote import TritonRunnerHandle\n\n self._set_handle(TritonRunnerHandle)\n else:\n self._set_handle(handle_class, *args, **kwargs)\n\n def destroy(self):\n _object_setattr(self, \"_runner_handle\", _DummyRunnerHandle())\n\n # Even though the below overload overlaps, it is ok to ignore the warning since types\n # for TritonRunner can handle both function from client and LiteralString from model name.\n @t.overload\n def __getattr__(self, item: t.Literal[\"__attrs_init__\"]) -> t.Callable[..., None]: # type: ignore (overload warning)\n ...\n\n @t.overload\n def __getattr__(\n self, item: _ClientMethod\n ) -> t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]]:\n ...\n\n @t.overload\n def __getattr__(\n self, item: _ModelName\n ) -> _RunnerMethod[\n t.Any, _P, _tritongrpcclient.InferResult | _tritonhttpclient.InferResult\n ]:\n ...\n\n def __getattr__(self, item: str) -> t.Any:\n from ._internal.runner.runner_handle.remote import TritonRunnerHandle\n\n if isinstance(self._runner_handle, TritonRunnerHandle):\n if item in self._runner_handle.client_methods:\n # NOTE: auto wrap triton methods to its respective clients\n if self.tritonserver_type == \"grpc\":\n return _handle_triton_exception(\n getattr(self._runner_handle.grpc_client, item)\n )\n else:\n return _handle_triton_exception(\n getattr(self._runner_handle.http_client, item)\n )\n else:\n # if given item is not a client method, then we assume it is a model name.\n # Hence, we will return a RunnerMethod that will be responsible for this model handle.\n RT = (\n _tritonhttpclient.InferResult\n if self.tritonserver_type == \"http\"\n else _tritongrpcclient.InferResult\n )\n return _RunnerMethod[t.Any, _P, RT](\n runner=self,\n name=item,\n config=_RunnableMethodConfig(batchable=True, batch_dim=(0, 0)),\n max_batch_size=0,\n max_latency_ms=10000,\n )\n\n return super().__getattribute__(item)\n\n\nRunner = _TritonRunner\n", "path": "src/bentoml/triton.py"}]}
| 3,736 | 404 |
gh_patches_debug_11528
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-590
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyTorch security vulnerability
See https://github.com/advisories/GHSA-47fc-vmwq-366v
Need to upgrade to PyTorch 1.13.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import sys, re
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 try:
13 with open("README.md") as readme_file:
14 readme = readme_file.read()
15 except Exception as error:
16 readme = "No README information found."
17 sys.stderr.write("Warning: Could not open '%s' due %s\n" % ("README.md", error))
18
19
20 class CustomInstallCommand(install):
21 def run(self):
22 install.run(self)
23
24
25 class CustomDevelopCommand(develop):
26 def run(self):
27 develop.run(self)
28
29
30 class CustomEggInfoCommand(egg_info):
31 def run(self):
32 egg_info.run(self)
33
34
35 try:
36 filepath = "GANDLF/version.py"
37 version_file = open(filepath)
38 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
39
40 except Exception as error:
41 __version__ = "0.0.1"
42 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
43
44 requirements = [
45 "black",
46 "numpy==1.22.0",
47 "scipy",
48 "SimpleITK!=2.0.*",
49 "SimpleITK!=2.2.1", # https://github.com/mlcommons/GaNDLF/issues/536
50 "torchvision",
51 "tqdm",
52 "torchio==0.18.75",
53 "pandas",
54 "scikit-learn>=0.23.2",
55 "scikit-image>=0.19.1",
56 "setuptools",
57 "seaborn",
58 "pyyaml",
59 "tiffslide",
60 "matplotlib",
61 "requests>=2.25.0",
62 "pytest",
63 "coverage",
64 "pytest-cov",
65 "psutil",
66 "medcam",
67 "opencv-python",
68 "torchmetrics==0.5.1", # newer versions have changed api for f1 invocation
69 "OpenPatchMiner==0.1.8",
70 "zarr==2.10.3",
71 "pydicom",
72 "onnx",
73 "torchinfo==1.7.0",
74 "segmentation-models-pytorch==0.3.0",
75 "ACSConv==0.1.1",
76 "docker",
77 "dicom-anonymizer",
78 "twine",
79 "zarr",
80 "keyring",
81 ]
82
83 # pytorch doesn't have LTS support on OSX - https://github.com/mlcommons/GaNDLF/issues/389
84 if sys.platform == "darwin":
85 requirements.append("torch==1.11.0")
86 else:
87 requirements.append("torch==1.11.0")
88
89 if __name__ == "__main__":
90 setup(
91 name="GANDLF",
92 version=__version__,
93 author="MLCommons",
94 author_email="[email protected]",
95 python_requires=">=3.8",
96 packages=find_packages(),
97 cmdclass={
98 "install": CustomInstallCommand,
99 "develop": CustomDevelopCommand,
100 "egg_info": CustomEggInfoCommand,
101 },
102 scripts=[
103 "gandlf_run",
104 "gandlf_constructCSV",
105 "gandlf_collectStats",
106 "gandlf_patchMiner",
107 "gandlf_preprocess",
108 "gandlf_anonymizer",
109 "gandlf_verifyInstall",
110 "gandlf_configGenerator",
111 "gandlf_recoverConfig",
112 "gandlf_deploy",
113 ],
114 classifiers=[
115 "Development Status :: 3 - Alpha",
116 "Intended Audience :: Science/Research",
117 "License :: OSI Approved :: Apache Software License",
118 "Natural Language :: English",
119 "Operating System :: OS Independent",
120 "Programming Language :: Python :: 3.8",
121 "Programming Language :: Python :: 3.9",
122 "Programming Language :: Python :: 3.10",
123 "Topic :: Scientific/Engineering :: Medical Science Apps",
124 ],
125 description=(
126 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
127 ),
128 install_requires=requirements,
129 license="Apache-2.0",
130 long_description=readme,
131 long_description_content_type="text/markdown",
132 include_package_data=True,
133 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch",
134 zip_safe=False,
135 )
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,6 +42,7 @@
sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
requirements = [
+ "torch==1.13.1",
"black",
"numpy==1.22.0",
"scipy",
@@ -80,12 +81,6 @@
"keyring",
]
-# pytorch doesn't have LTS support on OSX - https://github.com/mlcommons/GaNDLF/issues/389
-if sys.platform == "darwin":
- requirements.append("torch==1.11.0")
-else:
- requirements.append("torch==1.11.0")
-
if __name__ == "__main__":
setup(
name="GANDLF",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,6 +42,7 @@\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n \n requirements = [\n+ \"torch==1.13.1\",\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n@@ -80,12 +81,6 @@\n \"keyring\",\n ]\n \n-# pytorch doesn't have LTS support on OSX - https://github.com/mlcommons/GaNDLF/issues/389\n-if sys.platform == \"darwin\":\n- requirements.append(\"torch==1.11.0\")\n-else:\n- requirements.append(\"torch==1.11.0\")\n-\n if __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n", "issue": "PyTorch security vulnerability\nSee https://github.com/advisories/GHSA-47fc-vmwq-366v\r\n\r\nNeed to upgrade to PyTorch 1.13.1\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error))\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.75\",\n \"pandas\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.0\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/mlcommons/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.11.0\")\nelse:\n requirements.append(\"torch==1.11.0\")\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">=3.8\",\n packages=find_packages(),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport sys, re\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\ntry:\n with open(\"README.md\") as readme_file:\n readme = readme_file.read()\nexcept Exception as error:\n readme = \"No README information found.\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (\"README.md\", error))\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"torch==1.13.1\",\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"SimpleITK!=2.2.1\", # https://github.com/mlcommons/GaNDLF/issues/536\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.75\",\n \"pandas\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n \"segmentation-models-pytorch==0.3.0\",\n \"ACSConv==0.1.1\",\n \"docker\",\n \"dicom-anonymizer\",\n \"twine\",\n \"zarr\",\n \"keyring\",\n]\n\nif __name__ == \"__main__\":\n setup(\n name=\"GANDLF\",\n version=__version__,\n author=\"MLCommons\",\n author_email=\"[email protected]\",\n python_requires=\">=3.8\",\n packages=find_packages(),\n cmdclass={\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n \"gandlf_configGenerator\",\n \"gandlf_recoverConfig\",\n \"gandlf_deploy\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"Apache-2.0\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging, clinical-workflows, deep-learning, pytorch\",\n zip_safe=False,\n )\n", "path": "setup.py"}]}
| 1,610 | 197 |
gh_patches_debug_16711
|
rasdani/github-patches
|
git_diff
|
google__TensorNetwork-489
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tn.set_default_backend should raise exception
`tn.set_default_backend(backend_name)` should raise if `backend_name` is not a valid backend.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensornetwork/backend_contextmanager.py`
Content:
```
1 from typing import Text, Union
2 from tensornetwork.backends.base_backend import BaseBackend
3
4 class DefaultBackend():
5 """Context manager for setting up backend for nodes"""
6
7 def __init__(self, backend: Union[Text, BaseBackend]) -> None:
8 if not isinstance(backend, (Text, BaseBackend)):
9 raise ValueError("Item passed to DefaultBackend "
10 "must be Text or BaseBackend")
11 self.backend = backend
12
13 def __enter__(self):
14 _default_backend_stack.stack.append(self)
15
16 def __exit__(self, exc_type, exc_val, exc_tb):
17 _default_backend_stack.stack.pop()
18
19 class _DefaultBackendStack():
20 """A stack to keep track default backends context manager"""
21
22 def __init__(self):
23 self.stack = []
24 self.default_backend = "numpy"
25
26 def get_current_backend(self):
27 return self.stack[-1].backend if self.stack else self.default_backend
28
29 _default_backend_stack = _DefaultBackendStack()
30
31 def get_default_backend():
32 return _default_backend_stack.get_current_backend()
33
34 def set_default_backend(backend: Union[Text, BaseBackend]) -> None:
35 if _default_backend_stack.stack:
36 raise AssertionError("The default backend should not be changed "
37 "inside the backend context manager")
38 if not isinstance(backend, (Text, BaseBackend)):
39 raise ValueError("Item passed to set_default_backend "
40 "must be Text or BaseBackend")
41 _default_backend_stack.default_backend = backend
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensornetwork/backend_contextmanager.py b/tensornetwork/backend_contextmanager.py
--- a/tensornetwork/backend_contextmanager.py
+++ b/tensornetwork/backend_contextmanager.py
@@ -1,5 +1,6 @@
from typing import Text, Union
from tensornetwork.backends.base_backend import BaseBackend
+from tensornetwork.backends import backend_factory
class DefaultBackend():
"""Context manager for setting up backend for nodes"""
@@ -38,4 +39,6 @@
if not isinstance(backend, (Text, BaseBackend)):
raise ValueError("Item passed to set_default_backend "
"must be Text or BaseBackend")
+ if isinstance(backend, Text) and backend not in backend_factory._BACKENDS:
+ raise ValueError(f"Backend '{backend}' was not found.")
_default_backend_stack.default_backend = backend
|
{"golden_diff": "diff --git a/tensornetwork/backend_contextmanager.py b/tensornetwork/backend_contextmanager.py\n--- a/tensornetwork/backend_contextmanager.py\n+++ b/tensornetwork/backend_contextmanager.py\n@@ -1,5 +1,6 @@\n from typing import Text, Union\n from tensornetwork.backends.base_backend import BaseBackend\n+from tensornetwork.backends import backend_factory\n \n class DefaultBackend():\n \"\"\"Context manager for setting up backend for nodes\"\"\"\n@@ -38,4 +39,6 @@\n if not isinstance(backend, (Text, BaseBackend)):\n raise ValueError(\"Item passed to set_default_backend \"\n \"must be Text or BaseBackend\")\n+ if isinstance(backend, Text) and backend not in backend_factory._BACKENDS:\n+ raise ValueError(f\"Backend '{backend}' was not found.\")\n _default_backend_stack.default_backend = backend\n", "issue": "tn.set_default_backend should raise exception\n`tn.set_default_backend(backend_name)` should raise if `backend_name` is not a valid backend.\n", "before_files": [{"content": "from typing import Text, Union\nfrom tensornetwork.backends.base_backend import BaseBackend\n\nclass DefaultBackend():\n \"\"\"Context manager for setting up backend for nodes\"\"\"\n\n def __init__(self, backend: Union[Text, BaseBackend]) -> None:\n if not isinstance(backend, (Text, BaseBackend)):\n raise ValueError(\"Item passed to DefaultBackend \"\n \"must be Text or BaseBackend\")\n self.backend = backend\n\n def __enter__(self):\n _default_backend_stack.stack.append(self)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n _default_backend_stack.stack.pop()\n\nclass _DefaultBackendStack():\n \"\"\"A stack to keep track default backends context manager\"\"\"\n\n def __init__(self):\n self.stack = []\n self.default_backend = \"numpy\"\n\n def get_current_backend(self):\n return self.stack[-1].backend if self.stack else self.default_backend\n\n_default_backend_stack = _DefaultBackendStack()\n\ndef get_default_backend():\n return _default_backend_stack.get_current_backend()\n\ndef set_default_backend(backend: Union[Text, BaseBackend]) -> None:\n if _default_backend_stack.stack:\n raise AssertionError(\"The default backend should not be changed \"\n \"inside the backend context manager\")\n if not isinstance(backend, (Text, BaseBackend)):\n raise ValueError(\"Item passed to set_default_backend \"\n \"must be Text or BaseBackend\")\n _default_backend_stack.default_backend = backend\n", "path": "tensornetwork/backend_contextmanager.py"}], "after_files": [{"content": "from typing import Text, Union\nfrom tensornetwork.backends.base_backend import BaseBackend\nfrom tensornetwork.backends import backend_factory\n\nclass DefaultBackend():\n \"\"\"Context manager for setting up backend for nodes\"\"\"\n\n def __init__(self, backend: Union[Text, BaseBackend]) -> None:\n if not isinstance(backend, (Text, BaseBackend)):\n raise ValueError(\"Item passed to DefaultBackend \"\n \"must be Text or BaseBackend\")\n self.backend = backend\n\n def __enter__(self):\n _default_backend_stack.stack.append(self)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n _default_backend_stack.stack.pop()\n\nclass _DefaultBackendStack():\n \"\"\"A stack to keep track default backends context manager\"\"\"\n\n def __init__(self):\n self.stack = []\n self.default_backend = \"numpy\"\n\n def get_current_backend(self):\n return self.stack[-1].backend if self.stack else self.default_backend\n\n_default_backend_stack = _DefaultBackendStack()\n\ndef get_default_backend():\n return _default_backend_stack.get_current_backend()\n\ndef set_default_backend(backend: Union[Text, BaseBackend]) -> None:\n if _default_backend_stack.stack:\n raise AssertionError(\"The default backend should not be changed \"\n \"inside the backend context manager\")\n if not isinstance(backend, (Text, BaseBackend)):\n raise ValueError(\"Item passed to set_default_backend \"\n \"must be Text or BaseBackend\")\n if isinstance(backend, Text) and backend not in backend_factory._BACKENDS:\n raise ValueError(f\"Backend '{backend}' was not found.\")\n _default_backend_stack.default_backend = backend\n", "path": "tensornetwork/backend_contextmanager.py"}]}
| 682 | 186 |
gh_patches_debug_9249
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-490
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] CamelCase instead of snace_case
`documentChanges` argument on the left https://github.com/tomv564/LSP/blob/5a472ba6f23d70f6f8f1ebaabb83015c066ce198/plugin/rename.py#L69
should be `document_changes`, like `LspApplyWorkspaceEditCommand` expects:
https://github.com/tomv564/LSP/blob/5a472ba6f23d70f6f8f1ebaabb83015c066ce198/plugin/core/edit.py#L19
When doing a rename, this popped up in the console
```
LSP: --> textDocument/rename
Traceback (most recent call last):
File "/opt/sublime_text/sublime_plugin.py", line 1034, in run_
return self.run(**args)
TypeError: run() got an unexpected keyword argument 'documentChanges'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/rename.py`
Content:
```
1 import sublime_plugin
2 from .core.registry import client_for_view, LspTextCommand
3 from .core.protocol import Request
4 from .core.documents import get_document_position, get_position, is_at_word
5 try:
6 from typing import List, Dict, Optional
7 assert List and Dict and Optional
8 except ImportError:
9 pass
10
11
12 class RenameSymbolInputHandler(sublime_plugin.TextInputHandler):
13 def __init__(self, view):
14 self.view = view
15
16 def name(self):
17 return "new_name"
18
19 def placeholder(self):
20 return self.get_current_symbol_name()
21
22 def initial_text(self):
23 return self.get_current_symbol_name()
24
25 def validate(self, name):
26 return len(name) > 0
27
28 def get_current_symbol_name(self):
29 pos = get_position(self.view)
30 current_name = self.view.substr(self.view.word(pos))
31 # Is this check necessary?
32 if not current_name:
33 current_name = ""
34 return current_name
35
36
37 class LspSymbolRenameCommand(LspTextCommand):
38 def __init__(self, view):
39 super().__init__(view)
40
41 def is_enabled(self, event=None):
42 # TODO: check what kind of scope we're in.
43 if self.has_client_with_capability('renameProvider'):
44 return is_at_word(self.view, event)
45 return False
46
47 def input(self, args):
48 if "new_name" not in args:
49 return RenameSymbolInputHandler(self.view)
50 else:
51 return None
52
53 def run(self, edit, new_name, event=None):
54 pos = get_position(self.view, event)
55 params = get_document_position(self.view, pos)
56
57 self.request_rename(params, new_name)
58
59 def request_rename(self, params, new_name) -> None:
60 client = client_for_view(self.view)
61 if client:
62 params["newName"] = new_name
63 client.send_request(Request.rename(params), self.handle_response)
64
65 def handle_response(self, response: 'Optional[Dict]') -> None:
66 if response:
67 self.view.window().run_command('lsp_apply_workspace_edit',
68 {'changes': response.get('changes'),
69 'documentChanges': response.get('documentChanges')})
70 else:
71 self.view.window().status_message('No rename edits returned')
72
73 def want_event(self):
74 return True
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/rename.py b/plugin/rename.py
--- a/plugin/rename.py
+++ b/plugin/rename.py
@@ -66,7 +66,7 @@
if response:
self.view.window().run_command('lsp_apply_workspace_edit',
{'changes': response.get('changes'),
- 'documentChanges': response.get('documentChanges')})
+ 'document_changes': response.get('documentChanges')})
else:
self.view.window().status_message('No rename edits returned')
|
{"golden_diff": "diff --git a/plugin/rename.py b/plugin/rename.py\n--- a/plugin/rename.py\n+++ b/plugin/rename.py\n@@ -66,7 +66,7 @@\n if response:\n self.view.window().run_command('lsp_apply_workspace_edit',\n {'changes': response.get('changes'),\n- 'documentChanges': response.get('documentChanges')})\n+ 'document_changes': response.get('documentChanges')})\n else:\n self.view.window().status_message('No rename edits returned')\n", "issue": "[bug] CamelCase instead of snace_case \n`documentChanges` argument on the left https://github.com/tomv564/LSP/blob/5a472ba6f23d70f6f8f1ebaabb83015c066ce198/plugin/rename.py#L69\r\nshould be `document_changes`, like `LspApplyWorkspaceEditCommand` expects:\r\nhttps://github.com/tomv564/LSP/blob/5a472ba6f23d70f6f8f1ebaabb83015c066ce198/plugin/core/edit.py#L19\r\n\r\nWhen doing a rename, this popped up in the console\r\n```\r\nLSP: --> textDocument/rename\r\nTraceback (most recent call last):\r\n File \"/opt/sublime_text/sublime_plugin.py\", line 1034, in run_\r\n return self.run(**args)\r\nTypeError: run() got an unexpected keyword argument 'documentChanges'\r\n```\n", "before_files": [{"content": "import sublime_plugin\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.protocol import Request\nfrom .core.documents import get_document_position, get_position, is_at_word\ntry:\n from typing import List, Dict, Optional\n assert List and Dict and Optional\nexcept ImportError:\n pass\n\n\nclass RenameSymbolInputHandler(sublime_plugin.TextInputHandler):\n def __init__(self, view):\n self.view = view\n\n def name(self):\n return \"new_name\"\n\n def placeholder(self):\n return self.get_current_symbol_name()\n\n def initial_text(self):\n return self.get_current_symbol_name()\n\n def validate(self, name):\n return len(name) > 0\n\n def get_current_symbol_name(self):\n pos = get_position(self.view)\n current_name = self.view.substr(self.view.word(pos))\n # Is this check necessary?\n if not current_name:\n current_name = \"\"\n return current_name\n\n\nclass LspSymbolRenameCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n # TODO: check what kind of scope we're in.\n if self.has_client_with_capability('renameProvider'):\n return is_at_word(self.view, event)\n return False\n\n def input(self, args):\n if \"new_name\" not in args:\n return RenameSymbolInputHandler(self.view)\n else:\n return None\n\n def run(self, edit, new_name, event=None):\n pos = get_position(self.view, event)\n params = get_document_position(self.view, pos)\n\n self.request_rename(params, new_name)\n\n def request_rename(self, params, new_name) -> None:\n client = client_for_view(self.view)\n if client:\n params[\"newName\"] = new_name\n client.send_request(Request.rename(params), self.handle_response)\n\n def handle_response(self, response: 'Optional[Dict]') -> None:\n if response:\n self.view.window().run_command('lsp_apply_workspace_edit',\n {'changes': response.get('changes'),\n 'documentChanges': response.get('documentChanges')})\n else:\n self.view.window().status_message('No rename edits returned')\n\n def want_event(self):\n return True\n", "path": "plugin/rename.py"}], "after_files": [{"content": "import sublime_plugin\nfrom .core.registry import client_for_view, LspTextCommand\nfrom .core.protocol import Request\nfrom .core.documents import get_document_position, get_position, is_at_word\ntry:\n from typing import List, Dict, Optional\n assert List and Dict and Optional\nexcept ImportError:\n pass\n\n\nclass RenameSymbolInputHandler(sublime_plugin.TextInputHandler):\n def __init__(self, view):\n self.view = view\n\n def name(self):\n return \"new_name\"\n\n def placeholder(self):\n return self.get_current_symbol_name()\n\n def initial_text(self):\n return self.get_current_symbol_name()\n\n def validate(self, name):\n return len(name) > 0\n\n def get_current_symbol_name(self):\n pos = get_position(self.view)\n current_name = self.view.substr(self.view.word(pos))\n # Is this check necessary?\n if not current_name:\n current_name = \"\"\n return current_name\n\n\nclass LspSymbolRenameCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_enabled(self, event=None):\n # TODO: check what kind of scope we're in.\n if self.has_client_with_capability('renameProvider'):\n return is_at_word(self.view, event)\n return False\n\n def input(self, args):\n if \"new_name\" not in args:\n return RenameSymbolInputHandler(self.view)\n else:\n return None\n\n def run(self, edit, new_name, event=None):\n pos = get_position(self.view, event)\n params = get_document_position(self.view, pos)\n\n self.request_rename(params, new_name)\n\n def request_rename(self, params, new_name) -> None:\n client = client_for_view(self.view)\n if client:\n params[\"newName\"] = new_name\n client.send_request(Request.rename(params), self.handle_response)\n\n def handle_response(self, response: 'Optional[Dict]') -> None:\n if response:\n self.view.window().run_command('lsp_apply_workspace_edit',\n {'changes': response.get('changes'),\n 'document_changes': response.get('documentChanges')})\n else:\n self.view.window().status_message('No rename edits returned')\n\n def want_event(self):\n return True\n", "path": "plugin/rename.py"}]}
| 1,120 | 109 |
gh_patches_debug_34991
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-411
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Convolve should have sampling rate
For events with duration shorter than 1/50, `Convolve` from a sparse variable will produce all 0s. This can be fixed by inserting `ToDense(sampling_rate=200)` (or whatever), but this should be immediately accessible from `Convolve`.
cc @yarikoptic @AdinaWagner
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bids/analysis/transformations/compute.py`
Content:
```
1 '''
2 Transformations that primarily involve numerical computation on variables.
3 '''
4
5 import numpy as np
6 import pandas as pd
7 from bids.utils import listify
8 from .base import Transformation
9 from bids.analysis import hrf
10 from bids.variables import SparseRunVariable, DenseRunVariable
11
12
13 class Convolve(Transformation):
14 """Convolve the input variable with an HRF.
15
16 Args:
17 var (Variable): The variable to convolve.
18 model (str): The name of the HRF model to apply. Must be one of 'spm',
19 'glover', or 'fir'.
20 derivative (bool): Whether or not to include the temporal derivative.
21 dispersion (bool): Whether or not to include the dispersion derivative.
22 fir_delays (iterable): A list or iterable of delays to use if model is
23 'fir' (ignored otherwise). Spacing between delays must be fixed.
24
25 Note: Uses the HRF convolution functions implemented in nistats.
26 """
27
28 _input_type = 'variable'
29 _return_type = 'variable'
30
31 def _transform(self, var, model='spm', derivative=False, dispersion=False,
32 fir_delays=None):
33
34 model = model.lower()
35
36 if isinstance(var, SparseRunVariable):
37 sr = self.collection.sampling_rate
38 var = var.to_dense(sr)
39
40 df = var.to_df(entities=False)
41 onsets = df['onset'].values
42 vals = df[['onset', 'duration', 'amplitude']].values.T
43
44 if model in ['spm', 'glover']:
45 if derivative:
46 model += ' + derivative'
47 if dispersion:
48 model += ' + dispersion'
49 elif model != 'fir':
50 raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.")
51
52 convolved = hrf.compute_regressor(vals, model, onsets,
53 fir_delays=fir_delays, min_onset=0)
54
55 return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info,
56 source=var.source, sampling_rate=var.sampling_rate)
57
58
59 class Demean(Transformation):
60
61 def _transform(self, data):
62 return data - data.mean()
63
64
65 class Orthogonalize(Transformation):
66
67 _variables_used = ('variables', 'other')
68 _densify = ('variables', 'other')
69 _align = ('other')
70
71 def _transform(self, var, other):
72
73 other = listify(other)
74
75 # Set up X matrix and slice into it based on target variable indices
76 X = np.array([self._variables[c].values.values.squeeze()
77 for c in other]).T
78 X = X[var.index, :]
79 assert len(X) == len(var)
80 y = var.values
81 _aX = np.c_[np.ones(len(y)), X]
82 coefs, resids, rank, s = np.linalg.lstsq(_aX, y)
83 result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index)
84 return result
85
86
87 class Product(Transformation):
88
89 _loopable = False
90 _groupable = False
91 _align = True
92 _output_required = True
93
94 def _transform(self, data):
95 data = pd.concat(data, axis=1, sort=True)
96 return data.product(1)
97
98
99 class Scale(Transformation):
100 ''' Scale a variable.
101
102 Args:
103 data (Series/DF): The variables to scale.
104 demean (bool): If True, demean each column.
105 rescale (bool): If True, divide variables by their standard deviation.
106 replace_na (str): Whether/when to replace missing values with 0. If
107 None, no replacement is performed. If 'before', missing values are
108 replaced with 0's before scaling. If 'after', missing values are
109 replaced with 0 after scaling.
110
111 '''
112
113 def _transform(self, data, demean=True, rescale=True, replace_na=None):
114 if replace_na == 'before':
115 data = data.fillna(0.)
116 if demean:
117 data -= data.mean()
118 if rescale:
119 data /= data.std()
120 if replace_na == 'after':
121 data = data.fillna(0.)
122 return data
123
124
125 class Sum(Transformation):
126
127 _loopable = False
128 _groupable = False
129 _align = True
130 _output_required = True
131
132 def _transform(self, data, weights=None):
133 data = pd.concat(data, axis=1, sort=True)
134 if weights is None:
135 weights = np.ones(data.shape[1])
136 else:
137 weights = np.array(weights)
138 if len(weights.ravel()) != data.shape[1]:
139 raise ValueError("If weights are passed to sum(), the number "
140 "of elements must equal number of variables"
141 "being summed.")
142 return (data * weights).sum(axis=1)
143
144
145
146 class Threshold(Transformation):
147 ''' Threshold and/or binarize a variable.
148
149 Args:
150 data (Series/DF): The pandas structure to threshold.
151 threshold (float): The value to binarize around (values above will
152 be assigned 1, values below will be assigned 0).
153 binarize (bool): If True, binarizes all non-zero values (i.e., every
154 non-zero value will be set to 1).
155 above (bool): Specifies which values to retain with respect to the
156 cut-off. If True, all value above the threshold will be kept; if
157 False, all values below the threshold will be kept. Defaults to
158 True.
159 signed (bool): Specifies whether to treat the threshold as signed
160 (default) or unsigned. For example, when passing above=True and
161 threshold=3, if signed=True, all and only values above +3 would be
162 retained. If signed=False, all absolute values > 3 would be retained
163 (i.e.,values in the range -3 < X < 3 would be set to 0).
164
165 '''
166
167 _groupable = False
168
169 def _transform(self, data, threshold=0., binarize=False, above=True,
170 signed=True):
171 if not signed:
172 threshold = np.abs(threshold)
173 data = data.abs()
174 keep = data >= threshold if above else data <= threshold
175 data[~keep] = 0
176 if binarize:
177 data[keep] = 1
178 return data
179
180
181 class And(Transformation):
182 ''' Logical AND on two or more variables.
183
184 Args:
185 dfs (list of DFs): variables to enter into the conjunction.
186 '''
187
188 _loopable = False
189 _groupable = False
190 _output_required = True
191
192 def _transform(self, dfs):
193 df = pd.concat(dfs, axis=1, sort=True)
194 return df.all(axis=1).astype(int)
195
196
197 class Not(Transformation):
198 ''' Logical negation of a variable.
199
200 Args:
201 var (Series): Variable to negate. Must be convertible to bool.
202 '''
203
204 _loopable = True
205 _groupable = False
206
207 def _transform(self, var):
208 return ~var.astype(bool)
209
210
211 class Or(Transformation):
212 ''' Logical OR (inclusive) on two or more variables.
213
214 Args:
215 dfs (list of DFs): variables to enter into the disjunction.
216 '''
217
218 _loopable = False
219 _groupable = False
220 _output_required = True
221
222 def _transform(self, dfs):
223 df = pd.concat(dfs, axis=1, sort=True)
224 return df.any(axis=1).astype(int)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bids/analysis/transformations/compute.py b/bids/analysis/transformations/compute.py
--- a/bids/analysis/transformations/compute.py
+++ b/bids/analysis/transformations/compute.py
@@ -1,7 +1,8 @@
'''
Transformations that primarily involve numerical computation on variables.
'''
-
+from __future__ import division
+import math
import numpy as np
import pandas as pd
from bids.utils import listify
@@ -33,12 +34,18 @@
model = model.lower()
+ df = var.to_df(entities=False)
+
if isinstance(var, SparseRunVariable):
- sr = self.collection.sampling_rate
- var = var.to_dense(sr)
+ sampling_rate = self.collection.sampling_rate
+ dur = var.get_duration()
+ resample_frames = np.linspace(
+ 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False)
+
+ else:
+ resample_frames = df['onset'].values
+ sampling_rate = var.sampling_rate
- df = var.to_df(entities=False)
- onsets = df['onset'].values
vals = df[['onset', 'duration', 'amplitude']].values.T
if model in ['spm', 'glover']:
@@ -49,11 +56,23 @@
elif model != 'fir':
raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.")
- convolved = hrf.compute_regressor(vals, model, onsets,
- fir_delays=fir_delays, min_onset=0)
-
- return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info,
- source=var.source, sampling_rate=var.sampling_rate)
+ # Minimum interval between event onsets/duration
+ # Used to compute oversampling factor to prevent information loss
+ unique_onsets = np.unique(np.sort(df.onset))
+ if len(unique_onsets) > 1:
+ min_interval = min(np.ediff1d(unique_onsets).min(),
+ df.duration.min())
+ oversampling = np.ceil(2*(1 / (min_interval * sampling_rate)))
+ else:
+ oversampling = 2
+ convolved = hrf.compute_regressor(
+ vals, model, resample_frames, fir_delays=fir_delays, min_onset=0,
+ oversampling=oversampling
+ )
+
+ return DenseRunVariable(
+ name=var.name, values=convolved[0], run_info=var.run_info,
+ source=var.source, sampling_rate=sampling_rate)
class Demean(Transformation):
|
{"golden_diff": "diff --git a/bids/analysis/transformations/compute.py b/bids/analysis/transformations/compute.py\n--- a/bids/analysis/transformations/compute.py\n+++ b/bids/analysis/transformations/compute.py\n@@ -1,7 +1,8 @@\n '''\n Transformations that primarily involve numerical computation on variables.\n '''\n-\n+from __future__ import division\n+import math\n import numpy as np\n import pandas as pd\n from bids.utils import listify\n@@ -33,12 +34,18 @@\n \n model = model.lower()\n \n+ df = var.to_df(entities=False)\n+\n if isinstance(var, SparseRunVariable):\n- sr = self.collection.sampling_rate\n- var = var.to_dense(sr)\n+ sampling_rate = self.collection.sampling_rate\n+ dur = var.get_duration()\n+ resample_frames = np.linspace(\n+ 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False)\n+\n+ else:\n+ resample_frames = df['onset'].values\n+ sampling_rate = var.sampling_rate\n \n- df = var.to_df(entities=False)\n- onsets = df['onset'].values\n vals = df[['onset', 'duration', 'amplitude']].values.T\n \n if model in ['spm', 'glover']:\n@@ -49,11 +56,23 @@\n elif model != 'fir':\n raise ValueError(\"Model must be one of 'spm', 'glover', or 'fir'.\")\n \n- convolved = hrf.compute_regressor(vals, model, onsets,\n- fir_delays=fir_delays, min_onset=0)\n-\n- return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info,\n- source=var.source, sampling_rate=var.sampling_rate)\n+ # Minimum interval between event onsets/duration\n+ # Used to compute oversampling factor to prevent information loss\n+ unique_onsets = np.unique(np.sort(df.onset))\n+ if len(unique_onsets) > 1:\n+ min_interval = min(np.ediff1d(unique_onsets).min(),\n+ df.duration.min())\n+ oversampling = np.ceil(2*(1 / (min_interval * sampling_rate)))\n+ else:\n+ oversampling = 2\n+ convolved = hrf.compute_regressor(\n+ vals, model, resample_frames, fir_delays=fir_delays, min_onset=0,\n+ oversampling=oversampling\n+ )\n+\n+ return DenseRunVariable(\n+ name=var.name, values=convolved[0], run_info=var.run_info,\n+ source=var.source, sampling_rate=sampling_rate)\n \n \n class Demean(Transformation):\n", "issue": "Convolve should have sampling rate\nFor events with duration shorter than 1/50, `Convolve` from a sparse variable will produce all 0s. This can be fixed by inserting `ToDense(sampling_rate=200)` (or whatever), but this should be immediately accessible from `Convolve`.\r\n\r\ncc @yarikoptic @AdinaWagner \n", "before_files": [{"content": "'''\nTransformations that primarily involve numerical computation on variables.\n'''\n\nimport numpy as np\nimport pandas as pd\nfrom bids.utils import listify\nfrom .base import Transformation\nfrom bids.analysis import hrf\nfrom bids.variables import SparseRunVariable, DenseRunVariable\n\n\nclass Convolve(Transformation):\n \"\"\"Convolve the input variable with an HRF.\n\n Args:\n var (Variable): The variable to convolve.\n model (str): The name of the HRF model to apply. Must be one of 'spm',\n 'glover', or 'fir'.\n derivative (bool): Whether or not to include the temporal derivative.\n dispersion (bool): Whether or not to include the dispersion derivative.\n fir_delays (iterable): A list or iterable of delays to use if model is\n 'fir' (ignored otherwise). Spacing between delays must be fixed.\n\n Note: Uses the HRF convolution functions implemented in nistats.\n \"\"\"\n\n _input_type = 'variable'\n _return_type = 'variable'\n\n def _transform(self, var, model='spm', derivative=False, dispersion=False,\n fir_delays=None):\n\n model = model.lower()\n\n if isinstance(var, SparseRunVariable):\n sr = self.collection.sampling_rate\n var = var.to_dense(sr)\n\n df = var.to_df(entities=False)\n onsets = df['onset'].values\n vals = df[['onset', 'duration', 'amplitude']].values.T\n\n if model in ['spm', 'glover']:\n if derivative:\n model += ' + derivative'\n if dispersion:\n model += ' + dispersion'\n elif model != 'fir':\n raise ValueError(\"Model must be one of 'spm', 'glover', or 'fir'.\")\n\n convolved = hrf.compute_regressor(vals, model, onsets,\n fir_delays=fir_delays, min_onset=0)\n\n return DenseRunVariable(name=var.name, values=convolved[0], run_info=var.run_info,\n source=var.source, sampling_rate=var.sampling_rate)\n\n\nclass Demean(Transformation):\n\n def _transform(self, data):\n return data - data.mean()\n\n\nclass Orthogonalize(Transformation):\n\n _variables_used = ('variables', 'other')\n _densify = ('variables', 'other')\n _align = ('other')\n\n def _transform(self, var, other):\n\n other = listify(other)\n\n # Set up X matrix and slice into it based on target variable indices\n X = np.array([self._variables[c].values.values.squeeze()\n for c in other]).T\n X = X[var.index, :]\n assert len(X) == len(var)\n y = var.values\n _aX = np.c_[np.ones(len(y)), X]\n coefs, resids, rank, s = np.linalg.lstsq(_aX, y)\n result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index)\n return result\n\n\nclass Product(Transformation):\n\n _loopable = False\n _groupable = False\n _align = True\n _output_required = True\n\n def _transform(self, data):\n data = pd.concat(data, axis=1, sort=True)\n return data.product(1)\n\n\nclass Scale(Transformation):\n ''' Scale a variable.\n\n Args:\n data (Series/DF): The variables to scale.\n demean (bool): If True, demean each column.\n rescale (bool): If True, divide variables by their standard deviation.\n replace_na (str): Whether/when to replace missing values with 0. If\n None, no replacement is performed. If 'before', missing values are\n replaced with 0's before scaling. If 'after', missing values are\n replaced with 0 after scaling.\n\n '''\n\n def _transform(self, data, demean=True, rescale=True, replace_na=None):\n if replace_na == 'before':\n data = data.fillna(0.)\n if demean:\n data -= data.mean()\n if rescale:\n data /= data.std()\n if replace_na == 'after':\n data = data.fillna(0.)\n return data\n\n\nclass Sum(Transformation):\n\n _loopable = False\n _groupable = False\n _align = True\n _output_required = True\n\n def _transform(self, data, weights=None):\n data = pd.concat(data, axis=1, sort=True)\n if weights is None:\n weights = np.ones(data.shape[1])\n else:\n weights = np.array(weights)\n if len(weights.ravel()) != data.shape[1]:\n raise ValueError(\"If weights are passed to sum(), the number \"\n \"of elements must equal number of variables\"\n \"being summed.\")\n return (data * weights).sum(axis=1)\n\n\n\nclass Threshold(Transformation):\n ''' Threshold and/or binarize a variable.\n\n Args:\n data (Series/DF): The pandas structure to threshold.\n threshold (float): The value to binarize around (values above will\n be assigned 1, values below will be assigned 0).\n binarize (bool): If True, binarizes all non-zero values (i.e., every\n non-zero value will be set to 1).\n above (bool): Specifies which values to retain with respect to the\n cut-off. If True, all value above the threshold will be kept; if\n False, all values below the threshold will be kept. Defaults to\n True.\n signed (bool): Specifies whether to treat the threshold as signed\n (default) or unsigned. For example, when passing above=True and\n threshold=3, if signed=True, all and only values above +3 would be\n retained. If signed=False, all absolute values > 3 would be retained\n (i.e.,values in the range -3 < X < 3 would be set to 0).\n\n '''\n\n _groupable = False\n\n def _transform(self, data, threshold=0., binarize=False, above=True,\n signed=True):\n if not signed:\n threshold = np.abs(threshold)\n data = data.abs()\n keep = data >= threshold if above else data <= threshold\n data[~keep] = 0\n if binarize:\n data[keep] = 1\n return data\n\n\nclass And(Transformation):\n ''' Logical AND on two or more variables.\n\n Args:\n dfs (list of DFs): variables to enter into the conjunction.\n '''\n\n _loopable = False\n _groupable = False\n _output_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.all(axis=1).astype(int)\n\n\nclass Not(Transformation):\n ''' Logical negation of a variable.\n\n Args:\n var (Series): Variable to negate. Must be convertible to bool.\n '''\n\n _loopable = True\n _groupable = False\n\n def _transform(self, var):\n return ~var.astype(bool)\n\n\nclass Or(Transformation):\n ''' Logical OR (inclusive) on two or more variables.\n\n Args:\n dfs (list of DFs): variables to enter into the disjunction.\n '''\n\n _loopable = False\n _groupable = False\n _output_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.any(axis=1).astype(int)\n", "path": "bids/analysis/transformations/compute.py"}], "after_files": [{"content": "'''\nTransformations that primarily involve numerical computation on variables.\n'''\nfrom __future__ import division\nimport math\nimport numpy as np\nimport pandas as pd\nfrom bids.utils import listify\nfrom .base import Transformation\nfrom bids.analysis import hrf\nfrom bids.variables import SparseRunVariable, DenseRunVariable\n\n\nclass Convolve(Transformation):\n \"\"\"Convolve the input variable with an HRF.\n\n Args:\n var (Variable): The variable to convolve.\n model (str): The name of the HRF model to apply. Must be one of 'spm',\n 'glover', or 'fir'.\n derivative (bool): Whether or not to include the temporal derivative.\n dispersion (bool): Whether or not to include the dispersion derivative.\n fir_delays (iterable): A list or iterable of delays to use if model is\n 'fir' (ignored otherwise). Spacing between delays must be fixed.\n\n Note: Uses the HRF convolution functions implemented in nistats.\n \"\"\"\n\n _input_type = 'variable'\n _return_type = 'variable'\n\n def _transform(self, var, model='spm', derivative=False, dispersion=False,\n fir_delays=None):\n\n model = model.lower()\n\n df = var.to_df(entities=False)\n\n if isinstance(var, SparseRunVariable):\n sampling_rate = self.collection.sampling_rate\n dur = var.get_duration()\n resample_frames = np.linspace(\n 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False)\n\n else:\n resample_frames = df['onset'].values\n sampling_rate = var.sampling_rate\n\n vals = df[['onset', 'duration', 'amplitude']].values.T\n\n if model in ['spm', 'glover']:\n if derivative:\n model += ' + derivative'\n if dispersion:\n model += ' + dispersion'\n elif model != 'fir':\n raise ValueError(\"Model must be one of 'spm', 'glover', or 'fir'.\")\n\n # Minimum interval between event onsets/duration\n # Used to compute oversampling factor to prevent information loss\n unique_onsets = np.unique(np.sort(df.onset))\n if len(unique_onsets) > 1:\n min_interval = min(np.ediff1d(unique_onsets).min(),\n df.duration.min())\n oversampling = np.ceil(2*(1 / (min_interval * sampling_rate)))\n else:\n oversampling = 2\n convolved = hrf.compute_regressor(\n vals, model, resample_frames, fir_delays=fir_delays, min_onset=0,\n oversampling=oversampling\n )\n\n return DenseRunVariable(\n name=var.name, values=convolved[0], run_info=var.run_info,\n source=var.source, sampling_rate=sampling_rate)\n\n\nclass Demean(Transformation):\n\n def _transform(self, data):\n return data - data.mean()\n\n\nclass Orthogonalize(Transformation):\n\n _variables_used = ('variables', 'other')\n _densify = ('variables', 'other')\n _align = ('other')\n\n def _transform(self, var, other):\n\n other = listify(other)\n\n # Set up X matrix and slice into it based on target variable indices\n X = np.array([self._variables[c].values.values.squeeze()\n for c in other]).T\n X = X[var.index, :]\n assert len(X) == len(var)\n y = var.values\n _aX = np.c_[np.ones(len(y)), X]\n coefs, resids, rank, s = np.linalg.lstsq(_aX, y)\n result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index)\n return result\n\n\nclass Product(Transformation):\n\n _loopable = False\n _groupable = False\n _align = True\n _output_required = True\n\n def _transform(self, data):\n data = pd.concat(data, axis=1, sort=True)\n return data.product(1)\n\n\nclass Scale(Transformation):\n ''' Scale a variable.\n\n Args:\n data (Series/DF): The variables to scale.\n demean (bool): If True, demean each column.\n rescale (bool): If True, divide variables by their standard deviation.\n replace_na (str): Whether/when to replace missing values with 0. If\n None, no replacement is performed. If 'before', missing values are\n replaced with 0's before scaling. If 'after', missing values are\n replaced with 0 after scaling.\n\n '''\n\n def _transform(self, data, demean=True, rescale=True, replace_na=None):\n if replace_na == 'before':\n data = data.fillna(0.)\n if demean:\n data -= data.mean()\n if rescale:\n data /= data.std()\n if replace_na == 'after':\n data = data.fillna(0.)\n return data\n\n\nclass Sum(Transformation):\n\n _loopable = False\n _groupable = False\n _align = True\n _output_required = True\n\n def _transform(self, data, weights=None):\n data = pd.concat(data, axis=1, sort=True)\n if weights is None:\n weights = np.ones(data.shape[1])\n else:\n weights = np.array(weights)\n if len(weights.ravel()) != data.shape[1]:\n raise ValueError(\"If weights are passed to sum(), the number \"\n \"of elements must equal number of variables\"\n \"being summed.\")\n return (data * weights).sum(axis=1)\n\n\n\nclass Threshold(Transformation):\n ''' Threshold and/or binarize a variable.\n\n Args:\n data (Series/DF): The pandas structure to threshold.\n threshold (float): The value to binarize around (values above will\n be assigned 1, values below will be assigned 0).\n binarize (bool): If True, binarizes all non-zero values (i.e., every\n non-zero value will be set to 1).\n above (bool): Specifies which values to retain with respect to the\n cut-off. If True, all value above the threshold will be kept; if\n False, all values below the threshold will be kept. Defaults to\n True.\n signed (bool): Specifies whether to treat the threshold as signed\n (default) or unsigned. For example, when passing above=True and\n threshold=3, if signed=True, all and only values above +3 would be\n retained. If signed=False, all absolute values > 3 would be retained\n (i.e.,values in the range -3 < X < 3 would be set to 0).\n\n '''\n\n _groupable = False\n\n def _transform(self, data, threshold=0., binarize=False, above=True,\n signed=True):\n if not signed:\n threshold = np.abs(threshold)\n data = data.abs()\n keep = data >= threshold if above else data <= threshold\n data[~keep] = 0\n if binarize:\n data[keep] = 1\n return data\n\n\nclass And(Transformation):\n ''' Logical AND on two or more variables.\n\n Args:\n dfs (list of DFs): variables to enter into the conjunction.\n '''\n\n _loopable = False\n _groupable = False\n _output_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.all(axis=1).astype(int)\n\n\nclass Not(Transformation):\n ''' Logical negation of a variable.\n\n Args:\n var (Series): Variable to negate. Must be convertible to bool.\n '''\n\n _loopable = True\n _groupable = False\n\n def _transform(self, var):\n return ~var.astype(bool)\n\n\nclass Or(Transformation):\n ''' Logical OR (inclusive) on two or more variables.\n\n Args:\n dfs (list of DFs): variables to enter into the disjunction.\n '''\n\n _loopable = False\n _groupable = False\n _output_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.any(axis=1).astype(int)\n", "path": "bids/analysis/transformations/compute.py"}]}
| 2,572 | 609 |
gh_patches_debug_20606
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-279
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remote follow silently failing
When I try to follow a remote user, the remote user is not notified and the relationship is not confirmed. No errors show up locally that I've found.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/signatures.py`
Content:
```
1 ''' signs activitypub activities '''
2 import hashlib
3 from urllib.parse import urlparse
4 import datetime
5 from base64 import b64encode, b64decode
6
7 from Crypto import Random
8 from Crypto.PublicKey import RSA
9 from Crypto.Signature import pkcs1_15 #pylint: disable=no-name-in-module
10 from Crypto.Hash import SHA256
11
12 MAX_SIGNATURE_AGE = 300
13
14 def create_key_pair():
15 ''' a new public/private key pair, used for creating new users '''
16 random_generator = Random.new().read
17 key = RSA.generate(1024, random_generator)
18 private_key = key.export_key().decode('utf8')
19 public_key = key.publickey().export_key().decode('utf8')
20
21 return private_key, public_key
22
23
24 def make_signature(sender, destination, date, digest):
25 ''' uses a private key to sign an outgoing message '''
26 inbox_parts = urlparse(destination)
27 signature_headers = [
28 '(request-target): post %s' % inbox_parts.path,
29 'host: %s' % inbox_parts.netloc,
30 'date: %s' % date,
31 'digest: %s' % digest,
32 ]
33 message_to_sign = '\n'.join(signature_headers)
34 signer = pkcs1_15.new(RSA.import_key(sender.private_key))
35 signed_message = signer.sign(SHA256.new(message_to_sign.encode('utf8')))
36 signature = {
37 'keyId': '%s#main-key' % sender.remote_id,
38 'algorithm': 'rsa-sha256',
39 'headers': '(request-target) host date digest',
40 'signature': b64encode(signed_message).decode('utf8'),
41 }
42 return ','.join('%s="%s"' % (k, v) for (k, v) in signature.items())
43
44
45 def make_digest(data):
46 ''' creates a message digest for signing '''
47 return 'SHA-256=' + b64encode(hashlib.sha256(data).digest()).decode('utf-8')
48
49
50 def verify_digest(request):
51 ''' checks if a digest is syntactically valid and matches the message '''
52 algorithm, digest = request.headers['digest'].split('=', 1)
53 if algorithm == 'SHA-256':
54 hash_function = hashlib.sha256
55 elif algorithm == 'SHA-512':
56 hash_function = hashlib.sha512
57 else:
58 raise ValueError("Unsupported hash function: {}".format(algorithm))
59
60 expected = hash_function(request.body).digest()
61 if b64decode(digest) != expected:
62 raise ValueError("Invalid HTTP Digest header")
63
64 class Signature:
65 ''' read and validate incoming signatures '''
66 def __init__(self, key_id, headers, signature):
67 self.key_id = key_id
68 self.headers = headers
69 self.signature = signature
70
71 @classmethod
72 def parse(cls, request):
73 ''' extract and parse a signature from an http request '''
74 signature_dict = {}
75 for pair in request.headers['Signature'].split(','):
76 k, v = pair.split('=', 1)
77 v = v.replace('"', '')
78 signature_dict[k] = v
79
80 try:
81 key_id = signature_dict['keyId']
82 headers = signature_dict['headers']
83 signature = b64decode(signature_dict['signature'])
84 except KeyError:
85 raise ValueError('Invalid auth header')
86
87 return cls(key_id, headers, signature)
88
89 def verify(self, public_key, request):
90 ''' verify rsa signature '''
91 if http_date_age(request.headers['date']) > MAX_SIGNATURE_AGE:
92 raise ValueError(
93 "Request too old: %s" % (request.headers['date'],))
94 public_key = RSA.import_key(public_key)
95
96 comparison_string = []
97 for signed_header_name in self.headers.split(' '):
98 if signed_header_name == '(request-target)':
99 comparison_string.append(
100 '(request-target): post %s' % request.path)
101 else:
102 if signed_header_name == 'digest':
103 verify_digest(request)
104 comparison_string.append('%s: %s' % (
105 signed_header_name,
106 request.headers[signed_header_name]
107 ))
108 comparison_string = '\n'.join(comparison_string)
109
110 signer = pkcs1_15.new(public_key)
111 digest = SHA256.new()
112 digest.update(comparison_string.encode())
113
114 # raises a ValueError if it fails
115 signer.verify(digest, self.signature)
116
117
118 def http_date_age(datestr):
119 ''' age of a signature in seconds '''
120 parsed = datetime.datetime.strptime(datestr, '%a, %d %b %Y %H:%M:%S GMT')
121 delta = datetime.datetime.utcnow() - parsed
122 return delta.total_seconds()
123
```
Path: `bookwyrm/broadcast.py`
Content:
```
1 ''' send out activitypub messages '''
2 import json
3 from django.utils.http import http_date
4 import requests
5
6 from bookwyrm import models
7 from bookwyrm.activitypub import ActivityEncoder
8 from bookwyrm.tasks import app
9 from bookwyrm.signatures import make_signature, make_digest
10
11
12 def get_public_recipients(user, software=None):
13 ''' everybody and their public inboxes '''
14 followers = user.followers.filter(local=False)
15 if software:
16 followers = followers.filter(bookwyrm_user=(software == 'bookwyrm'))
17
18 # we want shared inboxes when available
19 shared = followers.filter(
20 shared_inbox__isnull=False
21 ).values_list('shared_inbox', flat=True).distinct()
22
23 # if a user doesn't have a shared inbox, we need their personal inbox
24 # iirc pixelfed doesn't have shared inboxes
25 inboxes = followers.filter(
26 shared_inbox__isnull=True
27 ).values_list('inbox', flat=True)
28
29 return list(shared) + list(inboxes)
30
31
32 def broadcast(sender, activity, software=None, \
33 privacy='public', direct_recipients=None):
34 ''' send out an event '''
35 # start with parsing the direct recipients
36 recipients = [u.inbox for u in direct_recipients or []]
37 # and then add any other recipients
38 if privacy == 'public':
39 recipients += get_public_recipients(sender, software=software)
40 broadcast_task.delay(
41 sender.id,
42 json.dumps(activity, cls=ActivityEncoder),
43 recipients
44 )
45
46
47 @app.task
48 def broadcast_task(sender_id, activity, recipients):
49 ''' the celery task for broadcast '''
50 sender = models.User.objects.get(id=sender_id)
51 errors = []
52 for recipient in recipients:
53 try:
54 sign_and_send(sender, activity, recipient)
55 except requests.exceptions.HTTPError as e:
56 errors.append({
57 'error': str(e),
58 'recipient': recipient,
59 'activity': activity,
60 })
61 return errors
62
63
64 def sign_and_send(sender, activity, destination):
65 ''' crpyto whatever and http junk '''
66 now = http_date()
67
68 if not sender.private_key:
69 # this shouldn't happen. it would be bad if it happened.
70 raise ValueError('No private key found for sender')
71
72 data = json.dumps(activity).encode('utf-8')
73 digest = make_digest(data)
74
75 response = requests.post(
76 destination,
77 data=data,
78 headers={
79 'Date': now,
80 'Digest': digest,
81 'Signature': make_signature(sender, destination, now, digest),
82 'Content-Type': 'application/activity+json; charset=utf-8',
83 },
84 )
85 if not response.ok:
86 response.raise_for_status()
87 return response
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/broadcast.py b/bookwyrm/broadcast.py
--- a/bookwyrm/broadcast.py
+++ b/bookwyrm/broadcast.py
@@ -61,7 +61,7 @@
return errors
-def sign_and_send(sender, activity, destination):
+def sign_and_send(sender, data, destination):
''' crpyto whatever and http junk '''
now = http_date()
@@ -69,7 +69,6 @@
# this shouldn't happen. it would be bad if it happened.
raise ValueError('No private key found for sender')
- data = json.dumps(activity).encode('utf-8')
digest = make_digest(data)
response = requests.post(
diff --git a/bookwyrm/signatures.py b/bookwyrm/signatures.py
--- a/bookwyrm/signatures.py
+++ b/bookwyrm/signatures.py
@@ -44,7 +44,8 @@
def make_digest(data):
''' creates a message digest for signing '''
- return 'SHA-256=' + b64encode(hashlib.sha256(data).digest()).decode('utf-8')
+ return 'SHA-256=' + b64encode(hashlib.sha256(data.encode('utf-8'))\
+ .digest()).decode('utf-8')
def verify_digest(request):
|
{"golden_diff": "diff --git a/bookwyrm/broadcast.py b/bookwyrm/broadcast.py\n--- a/bookwyrm/broadcast.py\n+++ b/bookwyrm/broadcast.py\n@@ -61,7 +61,7 @@\n return errors\n \n \n-def sign_and_send(sender, activity, destination):\n+def sign_and_send(sender, data, destination):\n ''' crpyto whatever and http junk '''\n now = http_date()\n \n@@ -69,7 +69,6 @@\n # this shouldn't happen. it would be bad if it happened.\n raise ValueError('No private key found for sender')\n \n- data = json.dumps(activity).encode('utf-8')\n digest = make_digest(data)\n \n response = requests.post(\ndiff --git a/bookwyrm/signatures.py b/bookwyrm/signatures.py\n--- a/bookwyrm/signatures.py\n+++ b/bookwyrm/signatures.py\n@@ -44,7 +44,8 @@\n \n def make_digest(data):\n ''' creates a message digest for signing '''\n- return 'SHA-256=' + b64encode(hashlib.sha256(data).digest()).decode('utf-8')\n+ return 'SHA-256=' + b64encode(hashlib.sha256(data.encode('utf-8'))\\\n+ .digest()).decode('utf-8')\n \n \n def verify_digest(request):\n", "issue": "Remote follow silently failing\nWhen I try to follow a remote user, the remote user is not notified and the relationship is not confirmed. No errors show up locally that I've found.\n", "before_files": [{"content": "''' signs activitypub activities '''\nimport hashlib\nfrom urllib.parse import urlparse\nimport datetime\nfrom base64 import b64encode, b64decode\n\nfrom Crypto import Random\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import pkcs1_15 #pylint: disable=no-name-in-module\nfrom Crypto.Hash import SHA256\n\nMAX_SIGNATURE_AGE = 300\n\ndef create_key_pair():\n ''' a new public/private key pair, used for creating new users '''\n random_generator = Random.new().read\n key = RSA.generate(1024, random_generator)\n private_key = key.export_key().decode('utf8')\n public_key = key.publickey().export_key().decode('utf8')\n\n return private_key, public_key\n\n\ndef make_signature(sender, destination, date, digest):\n ''' uses a private key to sign an outgoing message '''\n inbox_parts = urlparse(destination)\n signature_headers = [\n '(request-target): post %s' % inbox_parts.path,\n 'host: %s' % inbox_parts.netloc,\n 'date: %s' % date,\n 'digest: %s' % digest,\n ]\n message_to_sign = '\\n'.join(signature_headers)\n signer = pkcs1_15.new(RSA.import_key(sender.private_key))\n signed_message = signer.sign(SHA256.new(message_to_sign.encode('utf8')))\n signature = {\n 'keyId': '%s#main-key' % sender.remote_id,\n 'algorithm': 'rsa-sha256',\n 'headers': '(request-target) host date digest',\n 'signature': b64encode(signed_message).decode('utf8'),\n }\n return ','.join('%s=\"%s\"' % (k, v) for (k, v) in signature.items())\n\n\ndef make_digest(data):\n ''' creates a message digest for signing '''\n return 'SHA-256=' + b64encode(hashlib.sha256(data).digest()).decode('utf-8')\n\n\ndef verify_digest(request):\n ''' checks if a digest is syntactically valid and matches the message '''\n algorithm, digest = request.headers['digest'].split('=', 1)\n if algorithm == 'SHA-256':\n hash_function = hashlib.sha256\n elif algorithm == 'SHA-512':\n hash_function = hashlib.sha512\n else:\n raise ValueError(\"Unsupported hash function: {}\".format(algorithm))\n\n expected = hash_function(request.body).digest()\n if b64decode(digest) != expected:\n raise ValueError(\"Invalid HTTP Digest header\")\n\nclass Signature:\n ''' read and validate incoming signatures '''\n def __init__(self, key_id, headers, signature):\n self.key_id = key_id\n self.headers = headers\n self.signature = signature\n\n @classmethod\n def parse(cls, request):\n ''' extract and parse a signature from an http request '''\n signature_dict = {}\n for pair in request.headers['Signature'].split(','):\n k, v = pair.split('=', 1)\n v = v.replace('\"', '')\n signature_dict[k] = v\n\n try:\n key_id = signature_dict['keyId']\n headers = signature_dict['headers']\n signature = b64decode(signature_dict['signature'])\n except KeyError:\n raise ValueError('Invalid auth header')\n\n return cls(key_id, headers, signature)\n\n def verify(self, public_key, request):\n ''' verify rsa signature '''\n if http_date_age(request.headers['date']) > MAX_SIGNATURE_AGE:\n raise ValueError(\n \"Request too old: %s\" % (request.headers['date'],))\n public_key = RSA.import_key(public_key)\n\n comparison_string = []\n for signed_header_name in self.headers.split(' '):\n if signed_header_name == '(request-target)':\n comparison_string.append(\n '(request-target): post %s' % request.path)\n else:\n if signed_header_name == 'digest':\n verify_digest(request)\n comparison_string.append('%s: %s' % (\n signed_header_name,\n request.headers[signed_header_name]\n ))\n comparison_string = '\\n'.join(comparison_string)\n\n signer = pkcs1_15.new(public_key)\n digest = SHA256.new()\n digest.update(comparison_string.encode())\n\n # raises a ValueError if it fails\n signer.verify(digest, self.signature)\n\n\ndef http_date_age(datestr):\n ''' age of a signature in seconds '''\n parsed = datetime.datetime.strptime(datestr, '%a, %d %b %Y %H:%M:%S GMT')\n delta = datetime.datetime.utcnow() - parsed\n return delta.total_seconds()\n", "path": "bookwyrm/signatures.py"}, {"content": "''' send out activitypub messages '''\nimport json\nfrom django.utils.http import http_date\nimport requests\n\nfrom bookwyrm import models\nfrom bookwyrm.activitypub import ActivityEncoder\nfrom bookwyrm.tasks import app\nfrom bookwyrm.signatures import make_signature, make_digest\n\n\ndef get_public_recipients(user, software=None):\n ''' everybody and their public inboxes '''\n followers = user.followers.filter(local=False)\n if software:\n followers = followers.filter(bookwyrm_user=(software == 'bookwyrm'))\n\n # we want shared inboxes when available\n shared = followers.filter(\n shared_inbox__isnull=False\n ).values_list('shared_inbox', flat=True).distinct()\n\n # if a user doesn't have a shared inbox, we need their personal inbox\n # iirc pixelfed doesn't have shared inboxes\n inboxes = followers.filter(\n shared_inbox__isnull=True\n ).values_list('inbox', flat=True)\n\n return list(shared) + list(inboxes)\n\n\ndef broadcast(sender, activity, software=None, \\\n privacy='public', direct_recipients=None):\n ''' send out an event '''\n # start with parsing the direct recipients\n recipients = [u.inbox for u in direct_recipients or []]\n # and then add any other recipients\n if privacy == 'public':\n recipients += get_public_recipients(sender, software=software)\n broadcast_task.delay(\n sender.id,\n json.dumps(activity, cls=ActivityEncoder),\n recipients\n )\n\n\[email protected]\ndef broadcast_task(sender_id, activity, recipients):\n ''' the celery task for broadcast '''\n sender = models.User.objects.get(id=sender_id)\n errors = []\n for recipient in recipients:\n try:\n sign_and_send(sender, activity, recipient)\n except requests.exceptions.HTTPError as e:\n errors.append({\n 'error': str(e),\n 'recipient': recipient,\n 'activity': activity,\n })\n return errors\n\n\ndef sign_and_send(sender, activity, destination):\n ''' crpyto whatever and http junk '''\n now = http_date()\n\n if not sender.private_key:\n # this shouldn't happen. it would be bad if it happened.\n raise ValueError('No private key found for sender')\n\n data = json.dumps(activity).encode('utf-8')\n digest = make_digest(data)\n\n response = requests.post(\n destination,\n data=data,\n headers={\n 'Date': now,\n 'Digest': digest,\n 'Signature': make_signature(sender, destination, now, digest),\n 'Content-Type': 'application/activity+json; charset=utf-8',\n },\n )\n if not response.ok:\n response.raise_for_status()\n return response\n", "path": "bookwyrm/broadcast.py"}], "after_files": [{"content": "''' signs activitypub activities '''\nimport hashlib\nfrom urllib.parse import urlparse\nimport datetime\nfrom base64 import b64encode, b64decode\n\nfrom Crypto import Random\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import pkcs1_15 #pylint: disable=no-name-in-module\nfrom Crypto.Hash import SHA256\n\nMAX_SIGNATURE_AGE = 300\n\ndef create_key_pair():\n ''' a new public/private key pair, used for creating new users '''\n random_generator = Random.new().read\n key = RSA.generate(1024, random_generator)\n private_key = key.export_key().decode('utf8')\n public_key = key.publickey().export_key().decode('utf8')\n\n return private_key, public_key\n\n\ndef make_signature(sender, destination, date, digest):\n ''' uses a private key to sign an outgoing message '''\n inbox_parts = urlparse(destination)\n signature_headers = [\n '(request-target): post %s' % inbox_parts.path,\n 'host: %s' % inbox_parts.netloc,\n 'date: %s' % date,\n 'digest: %s' % digest,\n ]\n message_to_sign = '\\n'.join(signature_headers)\n signer = pkcs1_15.new(RSA.import_key(sender.private_key))\n signed_message = signer.sign(SHA256.new(message_to_sign.encode('utf8')))\n signature = {\n 'keyId': '%s#main-key' % sender.remote_id,\n 'algorithm': 'rsa-sha256',\n 'headers': '(request-target) host date digest',\n 'signature': b64encode(signed_message).decode('utf8'),\n }\n return ','.join('%s=\"%s\"' % (k, v) for (k, v) in signature.items())\n\n\ndef make_digest(data):\n ''' creates a message digest for signing '''\n return 'SHA-256=' + b64encode(hashlib.sha256(data.encode('utf-8'))\\\n .digest()).decode('utf-8')\n\n\ndef verify_digest(request):\n ''' checks if a digest is syntactically valid and matches the message '''\n algorithm, digest = request.headers['digest'].split('=', 1)\n if algorithm == 'SHA-256':\n hash_function = hashlib.sha256\n elif algorithm == 'SHA-512':\n hash_function = hashlib.sha512\n else:\n raise ValueError(\"Unsupported hash function: {}\".format(algorithm))\n\n expected = hash_function(request.body).digest()\n if b64decode(digest) != expected:\n raise ValueError(\"Invalid HTTP Digest header\")\n\nclass Signature:\n ''' read and validate incoming signatures '''\n def __init__(self, key_id, headers, signature):\n self.key_id = key_id\n self.headers = headers\n self.signature = signature\n\n @classmethod\n def parse(cls, request):\n ''' extract and parse a signature from an http request '''\n signature_dict = {}\n for pair in request.headers['Signature'].split(','):\n k, v = pair.split('=', 1)\n v = v.replace('\"', '')\n signature_dict[k] = v\n\n try:\n key_id = signature_dict['keyId']\n headers = signature_dict['headers']\n signature = b64decode(signature_dict['signature'])\n except KeyError:\n raise ValueError('Invalid auth header')\n\n return cls(key_id, headers, signature)\n\n def verify(self, public_key, request):\n ''' verify rsa signature '''\n if http_date_age(request.headers['date']) > MAX_SIGNATURE_AGE:\n raise ValueError(\n \"Request too old: %s\" % (request.headers['date'],))\n public_key = RSA.import_key(public_key)\n\n comparison_string = []\n for signed_header_name in self.headers.split(' '):\n if signed_header_name == '(request-target)':\n comparison_string.append(\n '(request-target): post %s' % request.path)\n else:\n if signed_header_name == 'digest':\n verify_digest(request)\n comparison_string.append('%s: %s' % (\n signed_header_name,\n request.headers[signed_header_name]\n ))\n comparison_string = '\\n'.join(comparison_string)\n\n signer = pkcs1_15.new(public_key)\n digest = SHA256.new()\n digest.update(comparison_string.encode())\n\n # raises a ValueError if it fails\n signer.verify(digest, self.signature)\n\n\ndef http_date_age(datestr):\n ''' age of a signature in seconds '''\n parsed = datetime.datetime.strptime(datestr, '%a, %d %b %Y %H:%M:%S GMT')\n delta = datetime.datetime.utcnow() - parsed\n return delta.total_seconds()\n", "path": "bookwyrm/signatures.py"}, {"content": "''' send out activitypub messages '''\nimport json\nfrom django.utils.http import http_date\nimport requests\n\nfrom bookwyrm import models\nfrom bookwyrm.activitypub import ActivityEncoder\nfrom bookwyrm.tasks import app\nfrom bookwyrm.signatures import make_signature, make_digest\n\n\ndef get_public_recipients(user, software=None):\n ''' everybody and their public inboxes '''\n followers = user.followers.filter(local=False)\n if software:\n followers = followers.filter(bookwyrm_user=(software == 'bookwyrm'))\n\n # we want shared inboxes when available\n shared = followers.filter(\n shared_inbox__isnull=False\n ).values_list('shared_inbox', flat=True).distinct()\n\n # if a user doesn't have a shared inbox, we need their personal inbox\n # iirc pixelfed doesn't have shared inboxes\n inboxes = followers.filter(\n shared_inbox__isnull=True\n ).values_list('inbox', flat=True)\n\n return list(shared) + list(inboxes)\n\n\ndef broadcast(sender, activity, software=None, \\\n privacy='public', direct_recipients=None):\n ''' send out an event '''\n # start with parsing the direct recipients\n recipients = [u.inbox for u in direct_recipients or []]\n # and then add any other recipients\n if privacy == 'public':\n recipients += get_public_recipients(sender, software=software)\n broadcast_task.delay(\n sender.id,\n json.dumps(activity, cls=ActivityEncoder),\n recipients\n )\n\n\[email protected]\ndef broadcast_task(sender_id, activity, recipients):\n ''' the celery task for broadcast '''\n sender = models.User.objects.get(id=sender_id)\n errors = []\n for recipient in recipients:\n try:\n sign_and_send(sender, activity, recipient)\n except requests.exceptions.HTTPError as e:\n errors.append({\n 'error': str(e),\n 'recipient': recipient,\n 'activity': activity,\n })\n return errors\n\n\ndef sign_and_send(sender, data, destination):\n ''' crpyto whatever and http junk '''\n now = http_date()\n\n if not sender.private_key:\n # this shouldn't happen. it would be bad if it happened.\n raise ValueError('No private key found for sender')\n\n digest = make_digest(data)\n\n response = requests.post(\n destination,\n data=data,\n headers={\n 'Date': now,\n 'Digest': digest,\n 'Signature': make_signature(sender, destination, now, digest),\n 'Content-Type': 'application/activity+json; charset=utf-8',\n },\n )\n if not response.ok:\n response.raise_for_status()\n return response\n", "path": "bookwyrm/broadcast.py"}]}
| 2,347 | 298 |
gh_patches_debug_9490
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2255
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide a "slow" transport for Postfix
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [ ] docker swarm
### Versions
1.9
## Description
Orange, a mainstream french ISP, and a few others, have a rate limit : without a slow transport, I get deferred messages with this : "Too many connections, slow down." It is a known issue https://blog.network-studio.fr/2011/06/30/too-many-connections-slow-down/
I managed to get it done with the overrides/ files :
overrides/postfix.cf :
```
transport_maps = socketmap:unix:/tmp/podop.socket:transport lmdb:/etc/postfix/transport.map
slow_destination_concurrency_limit = 1
slow_destination_recipient_limit = 20
slow_destination_rate_delay = 5s
slow_destination_concurrency_failed_cohort_limit=10
```
overrides/postfix.master :
```
slow/unix= slow unix - - n - 5 smtp -o syslog_name=postfix-slow
```
overrides/transport.map :
```
wanadoo.com slow:
wanadoo.fr slow:
orange.com slow:
orange.fr slow:
laposte.net slow:
free.fr slow:
hotmail.fr slow:
outlook.fr slow:
yahoo.fr slow:
```
I did not have time to fully test it, but it seems to work. Configuration values may need a fine tuning...
It would be nice to have such "slow" transport built in in Mailu, with an override possibility to edit the domain list.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/postfix/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9
10 from podop import run_server
11 from pwd import getpwnam
12 from socrate import system, conf
13
14 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
15
16 def start_podop():
17 os.setuid(getpwnam('postfix').pw_uid)
18 os.mkdir('/dev/shm/postfix',mode=0o700)
19 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
20 # TODO: Remove verbosity setting from Podop?
21 run_server(0, "postfix", "/tmp/podop.socket", [
22 ("transport", "url", url + "transport/§"),
23 ("alias", "url", url + "alias/§"),
24 ("dane", "url", url + "dane/§"),
25 ("domain", "url", url + "domain/§"),
26 ("mailbox", "url", url + "mailbox/§"),
27 ("recipientmap", "url", url + "recipient/map/§"),
28 ("sendermap", "url", url + "sender/map/§"),
29 ("senderaccess", "url", url + "sender/access/§"),
30 ("senderlogin", "url", url + "sender/login/§"),
31 ("senderrate", "url", url + "sender/rate/§")
32 ])
33
34 def start_mta_sts_daemon():
35 os.chmod("/root/", 0o755) # read access to /root/.netrc required
36 os.setuid(getpwnam('postfix').pw_uid)
37 from postfix_mta_sts_resolver import daemon
38 daemon.main()
39
40 def is_valid_postconf_line(line):
41 return not line.startswith("#") \
42 and not line == ''
43
44 # Actual startup script
45 os.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'
46 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
47 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
48 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
49 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
50 os.environ["POSTFIX_LOG_SYSLOG"] = os.environ.get("POSTFIX_LOG_SYSLOG","local")
51 os.environ["POSTFIX_LOG_FILE"] = os.environ.get("POSTFIX_LOG_FILE", "")
52
53 for postfix_file in glob.glob("/conf/*.cf"):
54 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
55
56 if os.path.exists("/overrides/postfix.cf"):
57 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
58 if is_valid_postconf_line(line):
59 os.system('postconf -e "{}"'.format(line))
60
61 if os.path.exists("/overrides/postfix.master"):
62 for line in open("/overrides/postfix.master").read().strip().split("\n"):
63 if is_valid_postconf_line(line):
64 os.system('postconf -Me "{}"'.format(line))
65
66 for map_file in glob.glob("/overrides/*.map"):
67 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
68 shutil.copyfile(map_file, destination)
69 os.system("postmap {}".format(destination))
70 os.remove(destination)
71
72 if os.path.exists("/overrides/mta-sts-daemon.yml"):
73 shutil.copyfile("/overrides/mta-sts-daemon.yml", "/etc/mta-sts-daemon.yml")
74 else:
75 conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml")
76
77 if not os.path.exists("/etc/postfix/tls_policy.map.lmdb"):
78 open("/etc/postfix/tls_policy.map", "a").close()
79 os.system("postmap /etc/postfix/tls_policy.map")
80
81 if "RELAYUSER" in os.environ:
82 path = "/etc/postfix/sasl_passwd"
83 conf.jinja("/conf/sasl_passwd", os.environ, path)
84 os.system("postmap {}".format(path))
85
86 # Configure and start local rsyslog server
87 conf.jinja("/conf/rsyslog.conf", os.environ, "/etc/rsyslog.conf")
88 os.system("/usr/sbin/rsyslogd -niNONE &")
89 # Configure logrotate and start crond
90 if os.environ["POSTFIX_LOG_FILE"] != "":
91 conf.jinja("/conf/logrotate.conf", os.environ, "/etc/logrotate.d/postfix.conf")
92 os.system("/usr/sbin/crond")
93 if os.path.exists("/overrides/logrotate.conf"):
94 shutil.copyfile("/overrides/logrotate.conf", "/etc/logrotate.d/postfix.conf")
95
96 # Run Podop and Postfix
97 multiprocessing.Process(target=start_podop).start()
98 multiprocessing.Process(target=start_mta_sts_daemon).start()
99 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
100 # Before starting postfix, we need to check permissions on /queue
101 # in the event that postfix,postdrop id have changed
102 os.system("postfix set-permissions")
103 os.system("postfix start-fg")
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -74,9 +74,10 @@
else:
conf.jinja("/conf/mta-sts-daemon.yml", os.environ, "/etc/mta-sts-daemon.yml")
-if not os.path.exists("/etc/postfix/tls_policy.map.lmdb"):
- open("/etc/postfix/tls_policy.map", "a").close()
- os.system("postmap /etc/postfix/tls_policy.map")
+for policy in ['tls_policy', 'transport']:
+ if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):
+ open(f'/etc/postfix/{policy}.map', 'a').close()
+ os.system(f'postmap /etc/postfix/{policy}.map')
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
|
{"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -74,9 +74,10 @@\n else:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n \n-if not os.path.exists(\"/etc/postfix/tls_policy.map.lmdb\"):\n- open(\"/etc/postfix/tls_policy.map\", \"a\").close()\n- os.system(\"postmap /etc/postfix/tls_policy.map\")\n+for policy in ['tls_policy', 'transport']:\n+ if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):\n+ open(f'/etc/postfix/{policy}.map', 'a').close()\n+ os.system(f'postmap /etc/postfix/{policy}.map')\n \n if \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n", "issue": "Provide a \"slow\" transport for Postfix\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [ ] docker swarm\r\n\r\n### Versions\r\n1.9\r\n\r\n## Description\r\nOrange, a mainstream french ISP, and a few others, have a rate limit : without a slow transport, I get deferred messages with this : \"Too many connections, slow down.\" It is a known issue https://blog.network-studio.fr/2011/06/30/too-many-connections-slow-down/\r\n\r\nI managed to get it done with the overrides/ files :\r\n\r\noverrides/postfix.cf :\r\n\r\n```\r\ntransport_maps = socketmap:unix:/tmp/podop.socket:transport lmdb:/etc/postfix/transport.map\r\n\r\nslow_destination_concurrency_limit = 1\r\nslow_destination_recipient_limit = 20\r\nslow_destination_rate_delay = 5s\r\nslow_destination_concurrency_failed_cohort_limit=10\r\n\r\n```\r\noverrides/postfix.master :\r\n\r\n```\r\nslow/unix= slow unix - - n - 5 smtp -o syslog_name=postfix-slow\r\n```\r\n\r\noverrides/transport.map :\r\n\r\n```\r\nwanadoo.com slow:\r\nwanadoo.fr slow:\r\norange.com slow:\r\norange.fr slow:\r\nlaposte.net slow:\r\nfree.fr slow:\r\nhotmail.fr slow:\r\noutlook.fr slow:\r\nyahoo.fr slow:\r\n```\r\nI did not have time to fully test it, but it seems to work. Configuration values may need a fine tuning...\r\n\r\nIt would be nice to have such \"slow\" transport built in in Mailu, with an override possibility to edit the domain list.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom pwd import getpwnam\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.mkdir('/dev/shm/postfix',mode=0o700)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n os.setuid(getpwnam('postfix').pw_uid)\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"POSTFIX_LOG_SYSLOG\"] = os.environ.get(\"POSTFIX_LOG_SYSLOG\",\"local\")\nos.environ[\"POSTFIX_LOG_FILE\"] = os.environ.get(\"POSTFIX_LOG_FILE\", \"\")\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nif not os.path.exists(\"/etc/postfix/tls_policy.map.lmdb\"):\n open(\"/etc/postfix/tls_policy.map\", \"a\").close()\n os.system(\"postmap /etc/postfix/tls_policy.map\")\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Configure and start local rsyslog server\nconf.jinja(\"/conf/rsyslog.conf\", os.environ, \"/etc/rsyslog.conf\")\nos.system(\"/usr/sbin/rsyslogd -niNONE &\")\n# Configure logrotate and start crond\nif os.environ[\"POSTFIX_LOG_FILE\"] != \"\":\n conf.jinja(\"/conf/logrotate.conf\", os.environ, \"/etc/logrotate.d/postfix.conf\")\n os.system(\"/usr/sbin/crond\")\n if os.path.exists(\"/overrides/logrotate.conf\"):\n shutil.copyfile(\"/overrides/logrotate.conf\", \"/etc/logrotate.d/postfix.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom pwd import getpwnam\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(getpwnam('postfix').pw_uid)\n os.mkdir('/dev/shm/postfix',mode=0o700)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n (\"transport\", \"url\", url + \"transport/\u00a7\"),\n (\"alias\", \"url\", url + \"alias/\u00a7\"),\n (\"dane\", \"url\", url + \"dane/\u00a7\"),\n (\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\"),\n (\"senderrate\", \"url\", url + \"sender/rate/\u00a7\")\n ])\n\ndef start_mta_sts_daemon():\n os.chmod(\"/root/\", 0o755) # read access to /root/.netrc required\n os.setuid(getpwnam('postfix').pw_uid)\n from postfix_mta_sts_resolver import daemon\n daemon.main()\n\ndef is_valid_postconf_line(line):\n return not line.startswith(\"#\") \\\n and not line == ''\n\n# Actual startup script\nos.environ['DEFER_ON_TLS_ERROR'] = os.environ['DEFER_ON_TLS_ERROR'] if 'DEFER_ON_TLS_ERROR' in os.environ else 'True'\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\nos.environ[\"POSTFIX_LOG_SYSLOG\"] = os.environ.get(\"POSTFIX_LOG_SYSLOG\",\"local\")\nos.environ[\"POSTFIX_LOG_FILE\"] = os.environ.get(\"POSTFIX_LOG_FILE\", \"\")\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n if is_valid_postconf_line(line):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif os.path.exists(\"/overrides/mta-sts-daemon.yml\"):\n shutil.copyfile(\"/overrides/mta-sts-daemon.yml\", \"/etc/mta-sts-daemon.yml\")\nelse:\n conf.jinja(\"/conf/mta-sts-daemon.yml\", os.environ, \"/etc/mta-sts-daemon.yml\")\n\nfor policy in ['tls_policy', 'transport']:\n if not os.path.exists(f'/etc/postfix/{policy}.map.lmdb'):\n open(f'/etc/postfix/{policy}.map', 'a').close()\n os.system(f'postmap /etc/postfix/{policy}.map')\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Configure and start local rsyslog server\nconf.jinja(\"/conf/rsyslog.conf\", os.environ, \"/etc/rsyslog.conf\")\nos.system(\"/usr/sbin/rsyslogd -niNONE &\")\n# Configure logrotate and start crond\nif os.environ[\"POSTFIX_LOG_FILE\"] != \"\":\n conf.jinja(\"/conf/logrotate.conf\", os.environ, \"/etc/logrotate.d/postfix.conf\")\n os.system(\"/usr/sbin/crond\")\n if os.path.exists(\"/overrides/logrotate.conf\"):\n shutil.copyfile(\"/overrides/logrotate.conf\", \"/etc/logrotate.d/postfix.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nmultiprocessing.Process(target=start_mta_sts_daemon).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\n# Before starting postfix, we need to check permissions on /queue\n# in the event that postfix,postdrop id have changed\nos.system(\"postfix set-permissions\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]}
| 1,974 | 212 |
gh_patches_debug_41023
|
rasdani/github-patches
|
git_diff
|
pyload__pyload-180
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implemented StreamcloudEu plugin based on XFileSharingPro
Resolves #128
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `module/plugins/hoster/StreamcloudEu.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
3 import re
4
5 class StreamcloudEu(XFileSharingPro):
6 __name__ = "StreamcloudEu"
7 __type__ = "hoster"
8 __pattern__ = r"http://(www\.)?streamcloud\.eu/\S+"
9 __version__ = "0.01"
10 __description__ = """Streamcloud.eu hoster plugin"""
11 __author_name__ = ("seoester")
12 __author_mail__ = ("[email protected]")
13
14 HOSTER_NAME = "streamcloud.eu"
15 DIRECT_LINK_PATTERN = r'file: "(http://(stor|cdn)\d+\.streamcloud.eu:?\d*/.*/video\.mp4)",'
16
17 def setup(self):
18 super(XFileSharingPro, self).setup()
19 self.multiDL = True
20
21 def getDownloadLink(self):
22 found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)
23 if found:
24 return found.group(1)
25
26 return super(XFileSharingPro, self).getDownloadLink()
27
28 getInfo = create_getInfo(StreamcloudEu)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/module/plugins/hoster/StreamcloudEu.py b/module/plugins/hoster/StreamcloudEu.py
--- a/module/plugins/hoster/StreamcloudEu.py
+++ b/module/plugins/hoster/StreamcloudEu.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
+from module.network.HTTPRequest import HTTPRequest
+from time import sleep
import re
class StreamcloudEu(XFileSharingPro):
@@ -15,7 +17,7 @@
DIRECT_LINK_PATTERN = r'file: "(http://(stor|cdn)\d+\.streamcloud.eu:?\d*/.*/video\.mp4)",'
def setup(self):
- super(XFileSharingPro, self).setup()
+ super(StreamcloudEu, self).setup()
self.multiDL = True
def getDownloadLink(self):
@@ -23,6 +25,87 @@
if found:
return found.group(1)
- return super(XFileSharingPro, self).getDownloadLink()
+ for i in range(5):
+ self.logDebug("Getting download link: #%d" % i)
+ data = self.getPostParameters()
+ httpRequest = HTTPRequest(options=self.req.options)
+ httpRequest.cj = self.req.cj
+ sleep(10)
+ self.html = httpRequest.load(self.pyfile.url, post = data, referer=False, cookies=True, decode = True)
+ self.header = httpRequest.header
+
+ found = re.search("Location\s*:\s*(.*)", self.header, re.I)
+ if found:
+ break
+
+ found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)
+ if found:
+ break
+
+ else:
+ if self.errmsg and 'captcha' in self.errmsg:
+ self.fail("No valid captcha code entered")
+ else:
+ self.fail("Download link not found")
+
+ return found.group(1)
+
+ def getPostParameters(self):
+ for i in range(3):
+ if not self.errmsg: self.checkErrors()
+
+ if hasattr(self,"FORM_PATTERN"):
+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)
+ else:
+ action, inputs = self.parseHtmlForm(input_names={"op": re.compile("^download")})
+
+ if not inputs:
+ action, inputs = self.parseHtmlForm('F1')
+ if not inputs:
+ if self.errmsg:
+ self.retry()
+ else:
+ self.parseError("Form not found")
+
+ self.logDebug(self.HOSTER_NAME, inputs)
+
+ if 'op' in inputs and inputs['op'] in ('download1', 'download2', 'download3'):
+ if "password" in inputs:
+ if self.passwords:
+ inputs['password'] = self.passwords.pop(0)
+ else:
+ self.fail("No or invalid passport")
+
+ if not self.premium:
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found:
+ wait_time = int(found.group(1)) + 1
+ self.setWait(wait_time, False)
+ else:
+ wait_time = 0
+
+ self.captcha = self.handleCaptcha(inputs)
+
+ if wait_time: self.wait()
+
+ self.errmsg = None
+ self.logDebug("getPostParameters {0}".format(i))
+ return inputs
+
+ else:
+ inputs['referer'] = self.pyfile.url
+
+ if self.premium:
+ inputs['method_premium'] = "Premium Download"
+ if 'method_free' in inputs: del inputs['method_free']
+ else:
+ inputs['method_free'] = "Free Download"
+ if 'method_premium' in inputs: del inputs['method_premium']
+
+ self.html = self.load(self.pyfile.url, post = inputs, ref = False)
+ self.errmsg = None
+
+ else: self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))
+
getInfo = create_getInfo(StreamcloudEu)
|
{"golden_diff": "diff --git a/module/plugins/hoster/StreamcloudEu.py b/module/plugins/hoster/StreamcloudEu.py\n--- a/module/plugins/hoster/StreamcloudEu.py\n+++ b/module/plugins/hoster/StreamcloudEu.py\n@@ -1,5 +1,7 @@\n # -*- coding: utf-8 -*-\n from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo\n+from module.network.HTTPRequest import HTTPRequest\n+from time import sleep\n import re\n \n class StreamcloudEu(XFileSharingPro):\n@@ -15,7 +17,7 @@\n DIRECT_LINK_PATTERN = r'file: \"(http://(stor|cdn)\\d+\\.streamcloud.eu:?\\d*/.*/video\\.mp4)\",'\n \n def setup(self):\n- super(XFileSharingPro, self).setup()\n+ super(StreamcloudEu, self).setup()\n self.multiDL = True\n \n def getDownloadLink(self):\n@@ -23,6 +25,87 @@\n if found:\n return found.group(1)\n \n- return super(XFileSharingPro, self).getDownloadLink()\n+ for i in range(5):\n+ self.logDebug(\"Getting download link: #%d\" % i)\n+ data = self.getPostParameters()\n+ httpRequest = HTTPRequest(options=self.req.options)\n+ httpRequest.cj = self.req.cj\n+ sleep(10)\n+ self.html = httpRequest.load(self.pyfile.url, post = data, referer=False, cookies=True, decode = True)\n+ self.header = httpRequest.header\n+\n+ found = re.search(\"Location\\s*:\\s*(.*)\", self.header, re.I)\n+ if found:\n+ break\n+\n+ found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)\n+ if found:\n+ break\n+\n+ else:\n+ if self.errmsg and 'captcha' in self.errmsg:\n+ self.fail(\"No valid captcha code entered\")\n+ else:\n+ self.fail(\"Download link not found\")\n+\n+ return found.group(1)\n+\n+ def getPostParameters(self):\n+ for i in range(3):\n+ if not self.errmsg: self.checkErrors()\n+\n+ if hasattr(self,\"FORM_PATTERN\"):\n+ action, inputs = self.parseHtmlForm(self.FORM_PATTERN)\n+ else:\n+ action, inputs = self.parseHtmlForm(input_names={\"op\": re.compile(\"^download\")})\n+\n+ if not inputs:\n+ action, inputs = self.parseHtmlForm('F1')\n+ if not inputs:\n+ if self.errmsg:\n+ self.retry()\n+ else:\n+ self.parseError(\"Form not found\")\n+\n+ self.logDebug(self.HOSTER_NAME, inputs)\n+\n+ if 'op' in inputs and inputs['op'] in ('download1', 'download2', 'download3'):\n+ if \"password\" in inputs:\n+ if self.passwords:\n+ inputs['password'] = self.passwords.pop(0)\n+ else:\n+ self.fail(\"No or invalid passport\")\n+\n+ if not self.premium:\n+ found = re.search(self.WAIT_PATTERN, self.html)\n+ if found:\n+ wait_time = int(found.group(1)) + 1\n+ self.setWait(wait_time, False)\n+ else:\n+ wait_time = 0\n+\n+ self.captcha = self.handleCaptcha(inputs)\n+\n+ if wait_time: self.wait()\n+\n+ self.errmsg = None\n+ self.logDebug(\"getPostParameters {0}\".format(i))\n+ return inputs\n+\n+ else:\n+ inputs['referer'] = self.pyfile.url\n+\n+ if self.premium:\n+ inputs['method_premium'] = \"Premium Download\"\n+ if 'method_free' in inputs: del inputs['method_free']\n+ else:\n+ inputs['method_free'] = \"Free Download\"\n+ if 'method_premium' in inputs: del inputs['method_premium']\n+\n+ self.html = self.load(self.pyfile.url, post = inputs, ref = False)\n+ self.errmsg = None\n+\n+ else: self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))\n+\n \n getInfo = create_getInfo(StreamcloudEu)\n", "issue": "Implemented StreamcloudEu plugin based on XFileSharingPro\nResolves #128\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo\nimport re\n\nclass StreamcloudEu(XFileSharingPro):\n __name__ = \"StreamcloudEu\"\n __type__ = \"hoster\"\n __pattern__ = r\"http://(www\\.)?streamcloud\\.eu/\\S+\"\n __version__ = \"0.01\"\n __description__ = \"\"\"Streamcloud.eu hoster plugin\"\"\"\n __author_name__ = (\"seoester\")\n __author_mail__ = (\"[email protected]\")\n\n HOSTER_NAME = \"streamcloud.eu\"\n DIRECT_LINK_PATTERN = r'file: \"(http://(stor|cdn)\\d+\\.streamcloud.eu:?\\d*/.*/video\\.mp4)\",'\n\n def setup(self):\n super(XFileSharingPro, self).setup()\n self.multiDL = True\n\n def getDownloadLink(self):\n found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)\n if found:\n return found.group(1)\n\n return super(XFileSharingPro, self).getDownloadLink()\n\ngetInfo = create_getInfo(StreamcloudEu)\n", "path": "module/plugins/hoster/StreamcloudEu.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo\nfrom module.network.HTTPRequest import HTTPRequest\nfrom time import sleep\nimport re\n\nclass StreamcloudEu(XFileSharingPro):\n __name__ = \"StreamcloudEu\"\n __type__ = \"hoster\"\n __pattern__ = r\"http://(www\\.)?streamcloud\\.eu/\\S+\"\n __version__ = \"0.01\"\n __description__ = \"\"\"Streamcloud.eu hoster plugin\"\"\"\n __author_name__ = (\"seoester\")\n __author_mail__ = (\"[email protected]\")\n\n HOSTER_NAME = \"streamcloud.eu\"\n DIRECT_LINK_PATTERN = r'file: \"(http://(stor|cdn)\\d+\\.streamcloud.eu:?\\d*/.*/video\\.mp4)\",'\n\n def setup(self):\n super(StreamcloudEu, self).setup()\n self.multiDL = True\n\n def getDownloadLink(self):\n found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)\n if found:\n return found.group(1)\n\n for i in range(5):\n self.logDebug(\"Getting download link: #%d\" % i)\n data = self.getPostParameters()\n httpRequest = HTTPRequest(options=self.req.options)\n httpRequest.cj = self.req.cj\n sleep(10)\n self.html = httpRequest.load(self.pyfile.url, post = data, referer=False, cookies=True, decode = True)\n self.header = httpRequest.header\n\n found = re.search(\"Location\\s*:\\s*(.*)\", self.header, re.I)\n if found:\n break\n\n found = re.search(self.DIRECT_LINK_PATTERN, self.html, re.S)\n if found:\n break\n\n else:\n if self.errmsg and 'captcha' in self.errmsg:\n self.fail(\"No valid captcha code entered\")\n else:\n self.fail(\"Download link not found\")\n\n return found.group(1)\n\n def getPostParameters(self):\n for i in range(3):\n if not self.errmsg: self.checkErrors()\n\n if hasattr(self,\"FORM_PATTERN\"):\n action, inputs = self.parseHtmlForm(self.FORM_PATTERN)\n else:\n action, inputs = self.parseHtmlForm(input_names={\"op\": re.compile(\"^download\")})\n\n if not inputs:\n action, inputs = self.parseHtmlForm('F1')\n if not inputs:\n if self.errmsg:\n self.retry()\n else:\n self.parseError(\"Form not found\")\n\n self.logDebug(self.HOSTER_NAME, inputs)\n\n if 'op' in inputs and inputs['op'] in ('download1', 'download2', 'download3'):\n if \"password\" in inputs:\n if self.passwords:\n inputs['password'] = self.passwords.pop(0)\n else:\n self.fail(\"No or invalid passport\")\n\n if not self.premium:\n found = re.search(self.WAIT_PATTERN, self.html)\n if found:\n wait_time = int(found.group(1)) + 1\n self.setWait(wait_time, False)\n else:\n wait_time = 0\n\n self.captcha = self.handleCaptcha(inputs)\n\n if wait_time: self.wait()\n\n self.errmsg = None\n self.logDebug(\"getPostParameters {0}\".format(i))\n return inputs\n\n else:\n inputs['referer'] = self.pyfile.url\n\n if self.premium:\n inputs['method_premium'] = \"Premium Download\"\n if 'method_free' in inputs: del inputs['method_free']\n else:\n inputs['method_free'] = \"Free Download\"\n if 'method_premium' in inputs: del inputs['method_premium']\n\n self.html = self.load(self.pyfile.url, post = inputs, ref = False)\n self.errmsg = None\n\n else: self.parseError('FORM: %s' % (inputs['op'] if 'op' in inputs else 'UNKNOWN'))\n\n\ngetInfo = create_getInfo(StreamcloudEu)\n", "path": "module/plugins/hoster/StreamcloudEu.py"}]}
| 590 | 946 |
gh_patches_debug_36759
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-206
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Generate API docs
As our repository matures it's important to have api docs to improve user experience. As discussed in #38 we will also be able to remove the table of contents off the main README.
Should we host on https://readthedocs.org/ or is there something else recommended @ewilderj @dynamicwebpaige @karmel ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/docs/build_docs.py`
Content:
```
1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """ Modified from the tfdocs example api reference docs generation script.
16
17 This script generates API reference docs.
18
19 Install pre-requisites:
20 $> pip install -U git+https://github.com/tensorflow/docs
21 $> pip install artifacts/tensorflow_addons-*.whl
22
23 Generate Docs:
24 $> from the repo root run: python tools/docs/build_docs.py
25 """
26
27 from __future__ import absolute_import
28 from __future__ import division
29 from __future__ import print_function
30
31 from absl import app
32 from absl import flags
33
34 import tensorflow_addons
35 from tensorflow_docs.api_generator import generate_lib
36 from tensorflow_docs.api_generator import public_api
37
38 PROJECT_SHORT_NAME = 'tfaddons'
39 PROJECT_FULL_NAME = 'TensorFlow Addons'
40 CODE_URL_PREFIX = 'https://github.com/tensorflow/addons/tree/master/tensorflow_addons'
41
42 FLAGS = flags.FLAGS
43
44 flags.DEFINE_string(
45 'output_dir',
46 default='/addons/docs/api_docs/python/',
47 help='Where to write the resulting docs to.')
48
49
50 def main(argv):
51 if argv[1:]:
52 raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
53
54 doc_generator = generate_lib.DocGenerator(
55 root_title=PROJECT_FULL_NAME,
56 # Replace `tensorflow_docs` with your module, here.
57 py_modules=[(PROJECT_SHORT_NAME, tensorflow_addons)],
58 code_url_prefix=CODE_URL_PREFIX,
59 # This callback cleans up a lot of aliases caused by internal imports.
60 callbacks=[public_api.local_definitions_filter])
61
62 doc_generator.build(FLAGS.output_dir)
63
64 print('Output docs to: ', FLAGS.output_dir)
65
66
67 if __name__ == '__main__':
68 app.run(main)
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/docs/build_docs.py b/tools/docs/build_docs.py
--- a/tools/docs/build_docs.py
+++ b/tools/docs/build_docs.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-""" Modified from the tfdocs example api reference docs generation script.
+"""Modified from the tfdocs example api reference docs generation script.
This script generates API reference docs.
@@ -31,19 +31,30 @@
from absl import app
from absl import flags
-import tensorflow_addons
+import tensorflow_addons as tfa
+
from tensorflow_docs.api_generator import generate_lib
+from tensorflow_docs.api_generator import parser
from tensorflow_docs.api_generator import public_api
-PROJECT_SHORT_NAME = 'tfaddons'
+from tensorflow.python.util import tf_inspect
+
+# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.
+parser.tf_inspect = tf_inspect
+
+PROJECT_SHORT_NAME = 'tfa'
PROJECT_FULL_NAME = 'TensorFlow Addons'
-CODE_URL_PREFIX = 'https://github.com/tensorflow/addons/tree/master/tensorflow_addons'
FLAGS = flags.FLAGS
+flags.DEFINE_string(
+ 'git_branch',
+ default='master',
+ help='The name of the corresponding branch on github.')
+
flags.DEFINE_string(
'output_dir',
- default='/addons/docs/api_docs/python/',
+ default='docs/api_docs/python/',
help='Where to write the resulting docs to.')
@@ -51,11 +62,16 @@
if argv[1:]:
raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))
+ code_url_prefix = ('https://github.com/tensorflow/addons/tree/'
+ '{git_branch}/tensorflow_addons'.format(
+ git_branch=FLAGS.git_branch))
+
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
# Replace `tensorflow_docs` with your module, here.
- py_modules=[(PROJECT_SHORT_NAME, tensorflow_addons)],
- code_url_prefix=CODE_URL_PREFIX,
+ py_modules=[(PROJECT_SHORT_NAME, tfa)],
+ code_url_prefix=code_url_prefix,
+ private_map={'tfa': ['__version__', 'utils', 'version']},
# This callback cleans up a lot of aliases caused by internal imports.
callbacks=[public_api.local_definitions_filter])
|
{"golden_diff": "diff --git a/tools/docs/build_docs.py b/tools/docs/build_docs.py\n--- a/tools/docs/build_docs.py\n+++ b/tools/docs/build_docs.py\n@@ -12,7 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n # ==============================================================================\n-\"\"\" Modified from the tfdocs example api reference docs generation script.\n+\"\"\"Modified from the tfdocs example api reference docs generation script.\n \n This script generates API reference docs.\n \n@@ -31,19 +31,30 @@\n from absl import app\n from absl import flags\n \n-import tensorflow_addons\n+import tensorflow_addons as tfa\n+\n from tensorflow_docs.api_generator import generate_lib\n+from tensorflow_docs.api_generator import parser\n from tensorflow_docs.api_generator import public_api\n \n-PROJECT_SHORT_NAME = 'tfaddons'\n+from tensorflow.python.util import tf_inspect\n+\n+# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.\n+parser.tf_inspect = tf_inspect\n+\n+PROJECT_SHORT_NAME = 'tfa'\n PROJECT_FULL_NAME = 'TensorFlow Addons'\n-CODE_URL_PREFIX = 'https://github.com/tensorflow/addons/tree/master/tensorflow_addons'\n \n FLAGS = flags.FLAGS\n \n+flags.DEFINE_string(\n+ 'git_branch',\n+ default='master',\n+ help='The name of the corresponding branch on github.')\n+\n flags.DEFINE_string(\n 'output_dir',\n- default='/addons/docs/api_docs/python/',\n+ default='docs/api_docs/python/',\n help='Where to write the resulting docs to.')\n \n \n@@ -51,11 +62,16 @@\n if argv[1:]:\n raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))\n \n+ code_url_prefix = ('https://github.com/tensorflow/addons/tree/'\n+ '{git_branch}/tensorflow_addons'.format(\n+ git_branch=FLAGS.git_branch))\n+\n doc_generator = generate_lib.DocGenerator(\n root_title=PROJECT_FULL_NAME,\n # Replace `tensorflow_docs` with your module, here.\n- py_modules=[(PROJECT_SHORT_NAME, tensorflow_addons)],\n- code_url_prefix=CODE_URL_PREFIX,\n+ py_modules=[(PROJECT_SHORT_NAME, tfa)],\n+ code_url_prefix=code_url_prefix,\n+ private_map={'tfa': ['__version__', 'utils', 'version']},\n # This callback cleans up a lot of aliases caused by internal imports.\n callbacks=[public_api.local_definitions_filter])\n", "issue": "Generate API docs\nAs our repository matures it's important to have api docs to improve user experience. As discussed in #38 we will also be able to remove the table of contents off the main README.\r\n\r\nShould we host on https://readthedocs.org/ or is there something else recommended @ewilderj @dynamicwebpaige @karmel ?\n", "before_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\" Modified from the tfdocs example api reference docs generation script.\n\nThis script generates API reference docs.\n\nInstall pre-requisites:\n$> pip install -U git+https://github.com/tensorflow/docs\n$> pip install artifacts/tensorflow_addons-*.whl\n\nGenerate Docs:\n$> from the repo root run: python tools/docs/build_docs.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport tensorflow_addons\nfrom tensorflow_docs.api_generator import generate_lib\nfrom tensorflow_docs.api_generator import public_api\n\nPROJECT_SHORT_NAME = 'tfaddons'\nPROJECT_FULL_NAME = 'TensorFlow Addons'\nCODE_URL_PREFIX = 'https://github.com/tensorflow/addons/tree/master/tensorflow_addons'\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'output_dir',\n default='/addons/docs/api_docs/python/',\n help='Where to write the resulting docs to.')\n\n\ndef main(argv):\n if argv[1:]:\n raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))\n\n doc_generator = generate_lib.DocGenerator(\n root_title=PROJECT_FULL_NAME,\n # Replace `tensorflow_docs` with your module, here.\n py_modules=[(PROJECT_SHORT_NAME, tensorflow_addons)],\n code_url_prefix=CODE_URL_PREFIX,\n # This callback cleans up a lot of aliases caused by internal imports.\n callbacks=[public_api.local_definitions_filter])\n\n doc_generator.build(FLAGS.output_dir)\n\n print('Output docs to: ', FLAGS.output_dir)\n\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "tools/docs/build_docs.py"}], "after_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Modified from the tfdocs example api reference docs generation script.\n\nThis script generates API reference docs.\n\nInstall pre-requisites:\n$> pip install -U git+https://github.com/tensorflow/docs\n$> pip install artifacts/tensorflow_addons-*.whl\n\nGenerate Docs:\n$> from the repo root run: python tools/docs/build_docs.py\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport tensorflow_addons as tfa\n\nfrom tensorflow_docs.api_generator import generate_lib\nfrom tensorflow_docs.api_generator import parser\nfrom tensorflow_docs.api_generator import public_api\n\nfrom tensorflow.python.util import tf_inspect\n\n# Use tensorflow's `tf_inspect`, which is aware of `tf_decorator`.\nparser.tf_inspect = tf_inspect\n\nPROJECT_SHORT_NAME = 'tfa'\nPROJECT_FULL_NAME = 'TensorFlow Addons'\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'git_branch',\n default='master',\n help='The name of the corresponding branch on github.')\n\nflags.DEFINE_string(\n 'output_dir',\n default='docs/api_docs/python/',\n help='Where to write the resulting docs to.')\n\n\ndef main(argv):\n if argv[1:]:\n raise ValueError('Unrecognized arguments: {}'.format(argv[1:]))\n\n code_url_prefix = ('https://github.com/tensorflow/addons/tree/'\n '{git_branch}/tensorflow_addons'.format(\n git_branch=FLAGS.git_branch))\n\n doc_generator = generate_lib.DocGenerator(\n root_title=PROJECT_FULL_NAME,\n # Replace `tensorflow_docs` with your module, here.\n py_modules=[(PROJECT_SHORT_NAME, tfa)],\n code_url_prefix=code_url_prefix,\n private_map={'tfa': ['__version__', 'utils', 'version']},\n # This callback cleans up a lot of aliases caused by internal imports.\n callbacks=[public_api.local_definitions_filter])\n\n doc_generator.build(FLAGS.output_dir)\n\n print('Output docs to: ', FLAGS.output_dir)\n\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "tools/docs/build_docs.py"}]}
| 956 | 531 |
gh_patches_debug_36739
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Authentication is currently broken
When attempting to log into the web app when running locally, we encounter this error:
```
Traceback (most recent call last):
File "python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
response = get_response(request)
File "python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
response = self.process_exception_by_middleware(e, request)
File "python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "d/jwql/jwql/website/apps/jwql/oauth.py", line 113, in authorize
request, headers={'Accept': 'application/json'}
File "python3.6/site-packages/authlib/integrations/django_client/remote_app.py", line 63, in authorize_access_token
return self.fetch_access_token(**params)
File "python3.6/site-packages/authlib/integrations/_client/remote_app.py", line 106, in fetch_access_token
token = client.fetch_token(token_endpoint, **kwargs)
File "python3.6/site-packages/authlib/oauth2/client.py", line 202, in fetch_token
headers=headers, **session_kwargs
File "python3.6/site-packages/authlib/oauth2/client.py", line 223, in _fetch_token
return self.parse_response_token(resp.json())
File "python3.6/site-packages/requests/models.py", line 898, in json
return complexjson.loads(self.text, **kwargs)
File "python3.6/json/__init__.py", line 354, in loads
return _default_decoder.decode(s)
File "python3.6/json/decoder.py", line 339, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "python3.6/json/decoder.py", line 357, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/website/apps/jwql/oauth.py`
Content:
```
1 """Provides an OAuth object for authentication of the ``jwql`` web app,
2 as well as decorator functions to require user authentication in other
3 views of the web application.
4
5
6 Authors
7 -------
8
9 - Matthew Bourque
10 - Christian Mesh
11
12 Use
13 ---
14
15 This module is intended to be imported and used as such:
16 ::
17
18 from .oauth import auth_info
19 from .oauth import auth_required
20 from .oauth import JWQL_OAUTH
21
22 @auth_info
23 def some_view(request):
24 pass
25
26 @auth_required
27 def login(request):
28 pass
29
30 References
31 ----------
32 Much of this code was taken from the ``authlib`` documentation,
33 found here: ``http://docs.authlib.org/en/latest/client/django.html``
34
35 Dependencies
36 ------------
37 The user must have a configuration file named ``config.json``
38 placed in the ``jwql/utils/`` directory.
39 """
40
41 import os
42 import requests
43
44 from authlib.django.client import OAuth
45 from django.shortcuts import redirect, render
46
47 import jwql
48 from jwql.utils.constants import MONITORS
49 from jwql.utils.utils import get_base_url, get_config, check_config_for_key
50
51 PREV_PAGE = '/'
52
53
54 def register_oauth():
55 """Register the ``jwql`` application with the ``auth.mast``
56 authentication service.
57
58 Returns
59 -------
60 oauth : Object
61 An object containing methods to authenticate a user, provided
62 by the ``auth.mast`` service.
63 """
64
65 # Get configuration parameters
66 for key in ['client_id', 'client_secret', 'auth_mast']:
67 check_config_for_key(key)
68 client_id = get_config()['client_id']
69 client_secret = get_config()['client_secret']
70 auth_mast = get_config()['auth_mast']
71
72 # Register with auth.mast
73 oauth = OAuth()
74 client_kwargs = {'scope': 'mast:user:info'}
75 oauth.register(
76 'mast_auth',
77 client_id='{}'.format(client_id),
78 client_secret='{}'.format(client_secret),
79 access_token_url='https://{}/oauth/access_token?client_secret={}'.format(
80 auth_mast, client_secret
81 ),
82 access_token_params=None,
83 refresh_token_url=None,
84 authorize_url='https://{}/oauth/authorize'.format(auth_mast),
85 api_base_url='https://{}/1.1/'.format(auth_mast),
86 client_kwargs=client_kwargs)
87
88 return oauth
89
90
91 JWQL_OAUTH = register_oauth()
92
93
94 def authorize(request):
95 """Spawn the authentication process for the user
96
97 The authentication process involves retreiving an access token
98 from ``auth.mast`` and porting the data to a cookie.
99
100 Parameters
101 ----------
102 request : HttpRequest object
103 Incoming request from the webpage
104
105 Returns
106 -------
107 HttpResponse object
108 Outgoing response sent to the webpage
109 """
110
111 # Get auth.mast token
112 token = JWQL_OAUTH.mast_auth.authorize_access_token(
113 request, headers={'Accept': 'application/json'}
114 )
115
116 # Determine domain
117 base_url = get_base_url()
118 if '127' in base_url:
119 domain = '127.0.0.1'
120 else:
121 domain = base_url.split('//')[-1]
122
123 # Set secure cookie parameters
124 cookie_args = {}
125 # cookie_args['domain'] = domain # Currently broken
126 # cookie_args['secure'] = True # Currently broken
127 cookie_args['httponly'] = True
128
129 # Set the cookie
130 response = redirect(PREV_PAGE)
131 response.set_cookie("ASB-AUTH", token["access_token"], **cookie_args)
132
133 return response
134
135
136 def auth_info(fn):
137 """A decorator function that will return user credentials along
138 with what is returned by the original function.
139
140 Parameters
141 ----------
142 fn : function
143 The function to decorate
144
145 Returns
146 -------
147 user_info : function
148 The decorated function
149 """
150
151 def user_info(request, **kwargs):
152 """Store authenticated user credentials in a cookie and return
153 it. If the user is not authenticated, store no credentials in
154 the cookie.
155
156 Parameters
157 ----------
158 request : HttpRequest object
159 Incoming request from the webpage
160
161 Returns
162 -------
163 fn : function
164 The decorated function
165 """
166
167 cookie = request.COOKIES.get("ASB-AUTH")
168
169 # If user is authenticated, return user credentials
170 if cookie is not None:
171 check_config_for_key('auth_mast')
172 # Note: for now, this must be the development version
173 auth_mast = get_config()['auth_mast']
174
175 response = requests.get(
176 'https://{}/info'.format(auth_mast),
177 headers={'Accept': 'application/json',
178 'Authorization': 'token {}'.format(cookie)})
179 response = response.json()
180 response['access_token'] = cookie
181
182 # If user is not authenticated, return no credentials
183 else:
184 response = {'ezid': None, "anon": True, 'access_token': None}
185
186 return fn(request, response, **kwargs)
187
188 return user_info
189
190
191 def auth_required(fn):
192 """A decorator function that requires the given function to have
193 authentication through ``auth.mast`` set up.
194
195 Parameters
196 ----------
197 fn : function
198 The function to decorate
199
200 Returns
201 -------
202 check_auth : function
203 The decorated function
204 """
205
206 @auth_info
207 def check_auth(request, user, **kwargs):
208 """Check if the user is authenticated through ``auth.mast``.
209 If not, perform the authorization.
210
211 Parameters
212 ----------
213 request : HttpRequest object
214 Incoming request from the webpage
215 user : dict
216 A dictionary of user credentials
217
218 Returns
219 -------
220 fn : function
221 The decorated function
222 """
223
224 # If user is currently anonymous, require a login
225 if user['ezid']:
226
227 return fn(request, user, **kwargs)
228
229 else:
230 template = 'not_authenticated.html'
231 context = {'inst': ''}
232
233 return render(request, template, context)
234
235 return check_auth
236
237
238 @auth_info
239 def login(request, user):
240 """Spawn a login process for the user
241
242 The ``auth_requred`` decorator is used to require that the user
243 authenticate through ``auth.mast``, then the user is redirected
244 back to the homepage.
245
246 Parameters
247 ----------
248 request : HttpRequest object
249 Incoming request from the webpage
250 user : dict
251 A dictionary of user credentials.
252
253 Returns
254 -------
255 HttpResponse object
256 Outgoing response sent to the webpage
257 """
258
259 # Redirect to oauth login
260 global PREV_PAGE
261 PREV_PAGE = request.META.get('HTTP_REFERER')
262 redirect_uri = os.path.join(get_base_url(), 'authorize')
263
264 return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri)
265
266
267 def logout(request):
268 """Spawn a logout process for the user
269
270 Upon logout, the user's ``auth.mast`` credientials are removed and
271 the user is redirected back to the homepage.
272
273 Parameters
274 ----------
275 request : HttpRequest object
276 Incoming request from the webpage
277 user : dict
278 A dictionary of user credentials.
279
280 Returns
281 -------
282 HttpResponse object
283 Outgoing response sent to the webpage
284 """
285
286 global PREV_PAGE
287 PREV_PAGE = request.META.get('HTTP_REFERER')
288 response = redirect(PREV_PAGE)
289 response.delete_cookie("ASB-AUTH")
290
291 return response
292
```
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.23.0'
6
7 AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',
13 'git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles'
14 ]
15 REQUIRES = [
16 'asdf>=2.3.3',
17 'astropy>=3.2.1',
18 'astroquery>=0.3.9',
19 'authlib',
20 'bokeh>=1.0,<1.4',
21 'codecov',
22 'crds',
23 'django>=2.0,<3.0',
24 'flake8',
25 'inflection',
26 'ipython',
27 'jinja2',
28 'jsonschema',
29 'jwedb>=0.0.3',
30 'matplotlib',
31 'nodejs',
32 'numpy',
33 'numpydoc',
34 'pandas',
35 'psycopg2',
36 'pysiaf',
37 'pytest',
38 'pytest-cov',
39 'scipy',
40 'sphinx',
41 'sqlalchemy',
42 'stsci_rtd_theme',
43 'twine'
44 ]
45
46 setup(
47 name='jwql',
48 version=VERSION,
49 description=DESCRIPTION,
50 url='https://github.com/spacetelescope/jwql.git',
51 author=AUTHORS,
52 author_email='[email protected]',
53 license='BSD',
54 keywords=['astronomy', 'python'],
55 classifiers=['Programming Language :: Python'],
56 packages=find_packages(),
57 install_requires=REQUIRES,
58 dependency_links=DEPENDENCY_LINKS,
59 include_package_data=True,
60 include_dirs=[np.get_include()],
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jwql/website/apps/jwql/oauth.py b/jwql/website/apps/jwql/oauth.py
--- a/jwql/website/apps/jwql/oauth.py
+++ b/jwql/website/apps/jwql/oauth.py
@@ -8,6 +8,7 @@
- Matthew Bourque
- Christian Mesh
+ - Ben Falk
Use
---
@@ -41,8 +42,9 @@
import os
import requests
-from authlib.django.client import OAuth
+from authlib.integrations.django_client import OAuth
from django.shortcuts import redirect, render
+from django.urls import reverse
import jwql
from jwql.utils.constants import MONITORS
@@ -71,14 +73,15 @@
# Register with auth.mast
oauth = OAuth()
- client_kwargs = {'scope': 'mast:user:info'}
+ client_kwargs = {
+ 'scope': 'mast:user:info',
+ 'token_endpoint_auth_method': 'client_secret_basic',
+ 'token_placement': 'header'}
oauth.register(
'mast_auth',
client_id='{}'.format(client_id),
client_secret='{}'.format(client_secret),
- access_token_url='https://{}/oauth/access_token?client_secret={}'.format(
- auth_mast, client_secret
- ),
+ access_token_url='https://{}/oauth/token'.format(auth_mast),
access_token_params=None,
refresh_token_url=None,
authorize_url='https://{}/oauth/authorize'.format(auth_mast),
@@ -109,9 +112,7 @@
"""
# Get auth.mast token
- token = JWQL_OAUTH.mast_auth.authorize_access_token(
- request, headers={'Accept': 'application/json'}
- )
+ token = JWQL_OAUTH.mast_auth.authorize_access_token(request)
# Determine domain
base_url = get_base_url()
@@ -259,7 +260,7 @@
# Redirect to oauth login
global PREV_PAGE
PREV_PAGE = request.META.get('HTTP_REFERER')
- redirect_uri = os.path.join(get_base_url(), 'authorize')
+ redirect_uri = f"{get_base_url()}{reverse('jwql:authorize')}"
return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,7 @@
'bokeh>=1.0,<1.4',
'codecov',
'crds',
+ 'cryptography',
'django>=2.0,<3.0',
'flake8',
'inflection',
|
{"golden_diff": "diff --git a/jwql/website/apps/jwql/oauth.py b/jwql/website/apps/jwql/oauth.py\n--- a/jwql/website/apps/jwql/oauth.py\n+++ b/jwql/website/apps/jwql/oauth.py\n@@ -8,6 +8,7 @@\n \n - Matthew Bourque\n - Christian Mesh\n+ - Ben Falk\n \n Use\n ---\n@@ -41,8 +42,9 @@\n import os\n import requests\n \n-from authlib.django.client import OAuth\n+from authlib.integrations.django_client import OAuth\n from django.shortcuts import redirect, render\n+from django.urls import reverse\n \n import jwql\n from jwql.utils.constants import MONITORS\n@@ -71,14 +73,15 @@\n \n # Register with auth.mast\n oauth = OAuth()\n- client_kwargs = {'scope': 'mast:user:info'}\n+ client_kwargs = {\n+ 'scope': 'mast:user:info',\n+ 'token_endpoint_auth_method': 'client_secret_basic',\n+ 'token_placement': 'header'}\n oauth.register(\n 'mast_auth',\n client_id='{}'.format(client_id),\n client_secret='{}'.format(client_secret),\n- access_token_url='https://{}/oauth/access_token?client_secret={}'.format(\n- auth_mast, client_secret\n- ),\n+ access_token_url='https://{}/oauth/token'.format(auth_mast),\n access_token_params=None,\n refresh_token_url=None,\n authorize_url='https://{}/oauth/authorize'.format(auth_mast),\n@@ -109,9 +112,7 @@\n \"\"\"\n \n # Get auth.mast token\n- token = JWQL_OAUTH.mast_auth.authorize_access_token(\n- request, headers={'Accept': 'application/json'}\n- )\n+ token = JWQL_OAUTH.mast_auth.authorize_access_token(request)\n \n # Determine domain\n base_url = get_base_url()\n@@ -259,7 +260,7 @@\n # Redirect to oauth login\n global PREV_PAGE\n PREV_PAGE = request.META.get('HTTP_REFERER')\n- redirect_uri = os.path.join(get_base_url(), 'authorize')\n+ redirect_uri = f\"{get_base_url()}{reverse('jwql:authorize')}\"\n \n return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri)\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,6 +20,7 @@\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n+ 'cryptography',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n", "issue": "Authentication is currently broken \nWhen attempting to log into the web app when running locally, we encounter this error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"python3.6/site-packages/django/core/handlers/exception.py\", line 34, in inner\r\n response = get_response(request)\r\n File \"python3.6/site-packages/django/core/handlers/base.py\", line 115, in _get_response\r\n response = self.process_exception_by_middleware(e, request)\r\n File \"python3.6/site-packages/django/core/handlers/base.py\", line 113, in _get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"d/jwql/jwql/website/apps/jwql/oauth.py\", line 113, in authorize\r\n request, headers={'Accept': 'application/json'}\r\n File \"python3.6/site-packages/authlib/integrations/django_client/remote_app.py\", line 63, in authorize_access_token\r\n return self.fetch_access_token(**params)\r\n File \"python3.6/site-packages/authlib/integrations/_client/remote_app.py\", line 106, in fetch_access_token\r\n token = client.fetch_token(token_endpoint, **kwargs)\r\n File \"python3.6/site-packages/authlib/oauth2/client.py\", line 202, in fetch_token\r\n headers=headers, **session_kwargs\r\n File \"python3.6/site-packages/authlib/oauth2/client.py\", line 223, in _fetch_token\r\n return self.parse_response_token(resp.json())\r\n File \"python3.6/site-packages/requests/models.py\", line 898, in json\r\n return complexjson.loads(self.text, **kwargs)\r\n File \"python3.6/json/__init__.py\", line 354, in loads\r\n return _default_decoder.decode(s)\r\n File \"python3.6/json/decoder.py\", line 339, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"python3.6/json/decoder.py\", line 357, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\n", "before_files": [{"content": "\"\"\"Provides an OAuth object for authentication of the ``jwql`` web app,\nas well as decorator functions to require user authentication in other\nviews of the web application.\n\n\nAuthors\n-------\n\n - Matthew Bourque\n - Christian Mesh\n\nUse\n---\n\n This module is intended to be imported and used as such:\n ::\n\n from .oauth import auth_info\n from .oauth import auth_required\n from .oauth import JWQL_OAUTH\n\n @auth_info\n def some_view(request):\n pass\n\n @auth_required\n def login(request):\n pass\n\nReferences\n----------\n Much of this code was taken from the ``authlib`` documentation,\n found here: ``http://docs.authlib.org/en/latest/client/django.html``\n\nDependencies\n------------\n The user must have a configuration file named ``config.json``\n placed in the ``jwql/utils/`` directory.\n\"\"\"\n\nimport os\nimport requests\n\nfrom authlib.django.client import OAuth\nfrom django.shortcuts import redirect, render\n\nimport jwql\nfrom jwql.utils.constants import MONITORS\nfrom jwql.utils.utils import get_base_url, get_config, check_config_for_key\n\nPREV_PAGE = '/'\n\n\ndef register_oauth():\n \"\"\"Register the ``jwql`` application with the ``auth.mast``\n authentication service.\n\n Returns\n -------\n oauth : Object\n An object containing methods to authenticate a user, provided\n by the ``auth.mast`` service.\n \"\"\"\n\n # Get configuration parameters\n for key in ['client_id', 'client_secret', 'auth_mast']:\n check_config_for_key(key)\n client_id = get_config()['client_id']\n client_secret = get_config()['client_secret']\n auth_mast = get_config()['auth_mast']\n\n # Register with auth.mast\n oauth = OAuth()\n client_kwargs = {'scope': 'mast:user:info'}\n oauth.register(\n 'mast_auth',\n client_id='{}'.format(client_id),\n client_secret='{}'.format(client_secret),\n access_token_url='https://{}/oauth/access_token?client_secret={}'.format(\n auth_mast, client_secret\n ),\n access_token_params=None,\n refresh_token_url=None,\n authorize_url='https://{}/oauth/authorize'.format(auth_mast),\n api_base_url='https://{}/1.1/'.format(auth_mast),\n client_kwargs=client_kwargs)\n\n return oauth\n\n\nJWQL_OAUTH = register_oauth()\n\n\ndef authorize(request):\n \"\"\"Spawn the authentication process for the user\n\n The authentication process involves retreiving an access token\n from ``auth.mast`` and porting the data to a cookie.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n HttpResponse object\n Outgoing response sent to the webpage\n \"\"\"\n\n # Get auth.mast token\n token = JWQL_OAUTH.mast_auth.authorize_access_token(\n request, headers={'Accept': 'application/json'}\n )\n\n # Determine domain\n base_url = get_base_url()\n if '127' in base_url:\n domain = '127.0.0.1'\n else:\n domain = base_url.split('//')[-1]\n\n # Set secure cookie parameters\n cookie_args = {}\n # cookie_args['domain'] = domain # Currently broken\n # cookie_args['secure'] = True # Currently broken\n cookie_args['httponly'] = True\n\n # Set the cookie\n response = redirect(PREV_PAGE)\n response.set_cookie(\"ASB-AUTH\", token[\"access_token\"], **cookie_args)\n\n return response\n\n\ndef auth_info(fn):\n \"\"\"A decorator function that will return user credentials along\n with what is returned by the original function.\n\n Parameters\n ----------\n fn : function\n The function to decorate\n\n Returns\n -------\n user_info : function\n The decorated function\n \"\"\"\n\n def user_info(request, **kwargs):\n \"\"\"Store authenticated user credentials in a cookie and return\n it. If the user is not authenticated, store no credentials in\n the cookie.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n fn : function\n The decorated function\n \"\"\"\n\n cookie = request.COOKIES.get(\"ASB-AUTH\")\n\n # If user is authenticated, return user credentials\n if cookie is not None:\n check_config_for_key('auth_mast')\n # Note: for now, this must be the development version\n auth_mast = get_config()['auth_mast']\n\n response = requests.get(\n 'https://{}/info'.format(auth_mast),\n headers={'Accept': 'application/json',\n 'Authorization': 'token {}'.format(cookie)})\n response = response.json()\n response['access_token'] = cookie\n\n # If user is not authenticated, return no credentials\n else:\n response = {'ezid': None, \"anon\": True, 'access_token': None}\n\n return fn(request, response, **kwargs)\n\n return user_info\n\n\ndef auth_required(fn):\n \"\"\"A decorator function that requires the given function to have\n authentication through ``auth.mast`` set up.\n\n Parameters\n ----------\n fn : function\n The function to decorate\n\n Returns\n -------\n check_auth : function\n The decorated function\n \"\"\"\n\n @auth_info\n def check_auth(request, user, **kwargs):\n \"\"\"Check if the user is authenticated through ``auth.mast``.\n If not, perform the authorization.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n user : dict\n A dictionary of user credentials\n\n Returns\n -------\n fn : function\n The decorated function\n \"\"\"\n\n # If user is currently anonymous, require a login\n if user['ezid']:\n\n return fn(request, user, **kwargs)\n\n else:\n template = 'not_authenticated.html'\n context = {'inst': ''}\n\n return render(request, template, context)\n\n return check_auth\n\n\n@auth_info\ndef login(request, user):\n \"\"\"Spawn a login process for the user\n\n The ``auth_requred`` decorator is used to require that the user\n authenticate through ``auth.mast``, then the user is redirected\n back to the homepage.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n user : dict\n A dictionary of user credentials.\n\n Returns\n -------\n HttpResponse object\n Outgoing response sent to the webpage\n \"\"\"\n\n # Redirect to oauth login\n global PREV_PAGE\n PREV_PAGE = request.META.get('HTTP_REFERER')\n redirect_uri = os.path.join(get_base_url(), 'authorize')\n\n return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri)\n\n\ndef logout(request):\n \"\"\"Spawn a logout process for the user\n\n Upon logout, the user's ``auth.mast`` credientials are removed and\n the user is redirected back to the homepage.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n user : dict\n A dictionary of user credentials.\n\n Returns\n -------\n HttpResponse object\n Outgoing response sent to the webpage\n \"\"\"\n\n global PREV_PAGE\n PREV_PAGE = request.META.get('HTTP_REFERER')\n response = redirect(PREV_PAGE)\n response.delete_cookie(\"ASB-AUTH\")\n\n return response\n", "path": "jwql/website/apps/jwql/oauth.py"}, {"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.23.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',\n 'git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles'\n ]\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Provides an OAuth object for authentication of the ``jwql`` web app,\nas well as decorator functions to require user authentication in other\nviews of the web application.\n\n\nAuthors\n-------\n\n - Matthew Bourque\n - Christian Mesh\n - Ben Falk\n\nUse\n---\n\n This module is intended to be imported and used as such:\n ::\n\n from .oauth import auth_info\n from .oauth import auth_required\n from .oauth import JWQL_OAUTH\n\n @auth_info\n def some_view(request):\n pass\n\n @auth_required\n def login(request):\n pass\n\nReferences\n----------\n Much of this code was taken from the ``authlib`` documentation,\n found here: ``http://docs.authlib.org/en/latest/client/django.html``\n\nDependencies\n------------\n The user must have a configuration file named ``config.json``\n placed in the ``jwql/utils/`` directory.\n\"\"\"\n\nimport os\nimport requests\n\nfrom authlib.integrations.django_client import OAuth\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\n\nimport jwql\nfrom jwql.utils.constants import MONITORS\nfrom jwql.utils.utils import get_base_url, get_config, check_config_for_key\n\nPREV_PAGE = '/'\n\n\ndef register_oauth():\n \"\"\"Register the ``jwql`` application with the ``auth.mast``\n authentication service.\n\n Returns\n -------\n oauth : Object\n An object containing methods to authenticate a user, provided\n by the ``auth.mast`` service.\n \"\"\"\n\n # Get configuration parameters\n for key in ['client_id', 'client_secret', 'auth_mast']:\n check_config_for_key(key)\n client_id = get_config()['client_id']\n client_secret = get_config()['client_secret']\n auth_mast = get_config()['auth_mast']\n\n # Register with auth.mast\n oauth = OAuth()\n client_kwargs = {\n 'scope': 'mast:user:info',\n 'token_endpoint_auth_method': 'client_secret_basic',\n 'token_placement': 'header'}\n oauth.register(\n 'mast_auth',\n client_id='{}'.format(client_id),\n client_secret='{}'.format(client_secret),\n access_token_url='https://{}/oauth/token'.format(auth_mast),\n access_token_params=None,\n refresh_token_url=None,\n authorize_url='https://{}/oauth/authorize'.format(auth_mast),\n api_base_url='https://{}/1.1/'.format(auth_mast),\n client_kwargs=client_kwargs)\n\n return oauth\n\n\nJWQL_OAUTH = register_oauth()\n\n\ndef authorize(request):\n \"\"\"Spawn the authentication process for the user\n\n The authentication process involves retreiving an access token\n from ``auth.mast`` and porting the data to a cookie.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n HttpResponse object\n Outgoing response sent to the webpage\n \"\"\"\n\n # Get auth.mast token\n token = JWQL_OAUTH.mast_auth.authorize_access_token(request)\n\n # Determine domain\n base_url = get_base_url()\n if '127' in base_url:\n domain = '127.0.0.1'\n else:\n domain = base_url.split('//')[-1]\n\n # Set secure cookie parameters\n cookie_args = {}\n # cookie_args['domain'] = domain # Currently broken\n # cookie_args['secure'] = True # Currently broken\n cookie_args['httponly'] = True\n\n # Set the cookie\n response = redirect(PREV_PAGE)\n response.set_cookie(\"ASB-AUTH\", token[\"access_token\"], **cookie_args)\n\n return response\n\n\ndef auth_info(fn):\n \"\"\"A decorator function that will return user credentials along\n with what is returned by the original function.\n\n Parameters\n ----------\n fn : function\n The function to decorate\n\n Returns\n -------\n user_info : function\n The decorated function\n \"\"\"\n\n def user_info(request, **kwargs):\n \"\"\"Store authenticated user credentials in a cookie and return\n it. If the user is not authenticated, store no credentials in\n the cookie.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n\n Returns\n -------\n fn : function\n The decorated function\n \"\"\"\n\n cookie = request.COOKIES.get(\"ASB-AUTH\")\n\n # If user is authenticated, return user credentials\n if cookie is not None:\n check_config_for_key('auth_mast')\n # Note: for now, this must be the development version\n auth_mast = get_config()['auth_mast']\n\n response = requests.get(\n 'https://{}/info'.format(auth_mast),\n headers={'Accept': 'application/json',\n 'Authorization': 'token {}'.format(cookie)})\n response = response.json()\n response['access_token'] = cookie\n\n # If user is not authenticated, return no credentials\n else:\n response = {'ezid': None, \"anon\": True, 'access_token': None}\n\n return fn(request, response, **kwargs)\n\n return user_info\n\n\ndef auth_required(fn):\n \"\"\"A decorator function that requires the given function to have\n authentication through ``auth.mast`` set up.\n\n Parameters\n ----------\n fn : function\n The function to decorate\n\n Returns\n -------\n check_auth : function\n The decorated function\n \"\"\"\n\n @auth_info\n def check_auth(request, user, **kwargs):\n \"\"\"Check if the user is authenticated through ``auth.mast``.\n If not, perform the authorization.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n user : dict\n A dictionary of user credentials\n\n Returns\n -------\n fn : function\n The decorated function\n \"\"\"\n\n # If user is currently anonymous, require a login\n if user['ezid']:\n\n return fn(request, user, **kwargs)\n\n else:\n template = 'not_authenticated.html'\n context = {'inst': ''}\n\n return render(request, template, context)\n\n return check_auth\n\n\n@auth_info\ndef login(request, user):\n \"\"\"Spawn a login process for the user\n\n The ``auth_requred`` decorator is used to require that the user\n authenticate through ``auth.mast``, then the user is redirected\n back to the homepage.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n user : dict\n A dictionary of user credentials.\n\n Returns\n -------\n HttpResponse object\n Outgoing response sent to the webpage\n \"\"\"\n\n # Redirect to oauth login\n global PREV_PAGE\n PREV_PAGE = request.META.get('HTTP_REFERER')\n redirect_uri = f\"{get_base_url()}{reverse('jwql:authorize')}\"\n\n return JWQL_OAUTH.mast_auth.authorize_redirect(request, redirect_uri)\n\n\ndef logout(request):\n \"\"\"Spawn a logout process for the user\n\n Upon logout, the user's ``auth.mast`` credientials are removed and\n the user is redirected back to the homepage.\n\n Parameters\n ----------\n request : HttpRequest object\n Incoming request from the webpage\n user : dict\n A dictionary of user credentials.\n\n Returns\n -------\n HttpResponse object\n Outgoing response sent to the webpage\n \"\"\"\n\n global PREV_PAGE\n PREV_PAGE = request.META.get('HTTP_REFERER')\n response = redirect(PREV_PAGE)\n response.delete_cookie(\"ASB-AUTH\")\n\n return response\n", "path": "jwql/website/apps/jwql/oauth.py"}, {"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.23.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Mike Engesser, Mees Fix, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Teagan King, Catherine Martlin, Maria Pena-Guerrero, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/[email protected]',\n 'git+https://github.com/spacetelescope/jwst_reffiles#egg=jwst_reffiles'\n ]\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'crds',\n 'cryptography',\n 'django>=2.0,<3.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'nodejs',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]}
| 3,803 | 607 |
gh_patches_debug_13568
|
rasdani/github-patches
|
git_diff
|
facebookresearch__xformers-40
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logo doesn't appear on documentation sub-pages
# 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
Currently, the `xFormers` logo only appears on the main docs page and the `what_is_xformers` page which is present in the same directory as it, but not on the other sub-pages. I was wondering whether setting the Sphinx option `html_logo` in the `conf.py` file would fix this.
Would be happy to make a PR for this, let me know what you think.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6
7 # type: ignore
8 # Configuration file for the Sphinx documentation builder.
9 #
10 # This file only contains a selection of the most common options. For a full
11 # list see the documentation:
12 # https://www.sphinx-doc.org/en/master/usage/configuration.html
13
14 # -- Path setup --------------------------------------------------------------
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 import os
21 import sys
22 from typing import Any, List
23
24 # The theme to use for HTML and HTML Help pages. See the documentation for
25 # a list of builtin themes.
26 #
27 from recommonmark.transform import AutoStructify
28
29 sys.path.insert(0, os.path.abspath("../.."))
30
31 # -- Project information -----------------------------------------------------
32
33 project = "xFormers"
34 copyright = "2021, Facebook AI Research"
35 author = "Facebook AI Research"
36
37 # The full version, including alpha/beta/rc tags
38 release = "0.0.1"
39
40
41 # -- General configuration ---------------------------------------------------
42
43 # Add any Sphinx extension module names here, as strings. They can be
44 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
45 # ones.
46 extensions = [
47 "sphinx.ext.autodoc",
48 "sphinx.ext.autosectionlabel",
49 "sphinx.ext.napoleon", # support NumPy and Google style docstrings
50 "recommonmark",
51 "sphinx.ext.intersphinx",
52 "sphinx.ext.todo",
53 "sphinx.ext.coverage",
54 "sphinx.ext.mathjax",
55 "sphinx.ext.viewcode",
56 "sphinx.ext.githubpages",
57 "sphinx.ext.doctest",
58 "sphinx.ext.ifconfig",
59 ]
60
61 # autosectionlabel throws warnings if section names are duplicated.
62 # The following tells autosectionlabel to not throw a warning for
63 # duplicated section names that are in different documents.
64 autosectionlabel_prefix_document = True
65
66 # -- Configurations for plugins ------------
67 napoleon_google_docstring = True
68 napoleon_include_init_with_doc = True
69 napoleon_include_special_with_doc = True
70 napoleon_numpy_docstring = False
71 napoleon_use_rtype = False
72 autodoc_inherit_docstrings = False
73 autodoc_member_order = "bysource"
74
75 intersphinx_mapping = {
76 "python": ("https://docs.python.org/3.6", None),
77 "numpy": ("https://docs.scipy.org/doc/numpy/", None),
78 "torch": ("https://pytorch.org/docs/master/", None),
79 }
80 # -------------------------
81
82 # Add any paths that contain templates here, relative to this directory.
83 templates_path = ["_templates"]
84
85 # List of patterns, relative to source directory, that match files and
86 # directories to ignore when looking for source files.
87 # This pattern also affects html_static_path and html_extra_path.
88 exclude_patterns: List[Any] = []
89
90 # The suffix(es) of source filenames.
91 # You can specify multiple suffix as a list of string:
92 #
93 source_suffix = [".rst", ".md"]
94
95 # The master toctree document.
96 master_doc = "index"
97
98 # If true, `todo` and `todoList` produce output, else they produce nothing.
99 todo_include_todos = True
100
101 # -- Options for HTML output -------------------------------------------------
102
103
104 html_theme = "pytorch_sphinx_theme"
105 templates_path = ["_templates"]
106
107
108 # Add any paths that contain custom static files (such as style sheets) here,
109 # Theme options are theme-specific and customize the look and feel of a theme
110 # further. For a list of options available for each theme, see the
111 # documentation.
112 #
113 html_theme_options = {
114 "includehidden": True,
115 "canonical_url": "https://fairinternal.github.io/xformers",
116 "pytorch_project": "docs",
117 "logo_only": True, # default = False
118 }
119
120 # relative to this directory. They are copied after the builtin static files,
121 # so a file named "default.css" will overwrite the builtin "default.css".
122 html_static_path = ["_static"]
123
124 # setting custom stylesheets https://stackoverflow.com/a/34420612
125 html_context = {"css_files": ["_static/css/customize.css"]}
126
127 # -- Options for HTMLHelp output ------------------------------------------
128
129 # Output file base name for HTML help builder.
130 htmlhelp_basename = "xformersdocs"
131 github_doc_root = "https://github.com/fairinternal/xformers/blob/v0.1/"
132
133
134 # Over-ride PyTorch Sphinx css
135 def setup(app):
136 app.add_config_value(
137 "recommonmark_config",
138 {
139 "url_resolver": lambda url: github_doc_root + url,
140 "auto_toc_tree_section": "Contents",
141 "enable_math": True,
142 "enable_inline_math": True,
143 "enable_eval_rst": True,
144 "enable_auto_toc_tree": True,
145 },
146 True,
147 )
148 app.add_transform(AutoStructify)
149 app.add_css_file("css/customize.css")
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -112,7 +112,7 @@
#
html_theme_options = {
"includehidden": True,
- "canonical_url": "https://fairinternal.github.io/xformers",
+ "canonical_url": "https://facebookresearch.github.io/xformers",
"pytorch_project": "docs",
"logo_only": True, # default = False
}
@@ -128,7 +128,7 @@
# Output file base name for HTML help builder.
htmlhelp_basename = "xformersdocs"
-github_doc_root = "https://github.com/fairinternal/xformers/blob/v0.1/"
+github_doc_root = "https://github.com/facebookresearch/xformers/tree/main/docs/"
# Over-ride PyTorch Sphinx css
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -112,7 +112,7 @@\n #\n html_theme_options = {\n \"includehidden\": True,\n- \"canonical_url\": \"https://fairinternal.github.io/xformers\",\n+ \"canonical_url\": \"https://facebookresearch.github.io/xformers\",\n \"pytorch_project\": \"docs\",\n \"logo_only\": True, # default = False\n }\n@@ -128,7 +128,7 @@\n \n # Output file base name for HTML help builder.\n htmlhelp_basename = \"xformersdocs\"\n-github_doc_root = \"https://github.com/fairinternal/xformers/blob/v0.1/\"\n+github_doc_root = \"https://github.com/facebookresearch/xformers/tree/main/docs/\"\n \n \n # Over-ride PyTorch Sphinx css\n", "issue": "Logo doesn't appear on documentation sub-pages\n# \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nCurrently, the `xFormers` logo only appears on the main docs page and the `what_is_xformers` page which is present in the same directory as it, but not on the other sub-pages. I was wondering whether setting the Sphinx option `html_logo` in the `conf.py` file would fix this.\r\n\r\nWould be happy to make a PR for this, let me know what you think.\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# type: ignore\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom typing import Any, List\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nfrom recommonmark.transform import AutoStructify\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"xFormers\"\ncopyright = \"2021, Facebook AI Research\"\nauthor = \"Facebook AI Research\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"0.0.1\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.napoleon\", # support NumPy and Google style docstrings\n \"recommonmark\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.ifconfig\",\n]\n\n# autosectionlabel throws warnings if section names are duplicated.\n# The following tells autosectionlabel to not throw a warning for\n# duplicated section names that are in different documents.\nautosectionlabel_prefix_document = True\n\n# -- Configurations for plugins ------------\nnapoleon_google_docstring = True\nnapoleon_include_init_with_doc = True\nnapoleon_include_special_with_doc = True\nnapoleon_numpy_docstring = False\nnapoleon_use_rtype = False\nautodoc_inherit_docstrings = False\nautodoc_member_order = \"bysource\"\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.6\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n}\n# -------------------------\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns: List[Any] = []\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for HTML output -------------------------------------------------\n\n\nhtml_theme = \"pytorch_sphinx_theme\"\ntemplates_path = [\"_templates\"]\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"includehidden\": True,\n \"canonical_url\": \"https://fairinternal.github.io/xformers\",\n \"pytorch_project\": \"docs\",\n \"logo_only\": True, # default = False\n}\n\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# setting custom stylesheets https://stackoverflow.com/a/34420612\nhtml_context = {\"css_files\": [\"_static/css/customize.css\"]}\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"xformersdocs\"\ngithub_doc_root = \"https://github.com/fairinternal/xformers/blob/v0.1/\"\n\n\n# Over-ride PyTorch Sphinx css\ndef setup(app):\n app.add_config_value(\n \"recommonmark_config\",\n {\n \"url_resolver\": lambda url: github_doc_root + url,\n \"auto_toc_tree_section\": \"Contents\",\n \"enable_math\": True,\n \"enable_inline_math\": True,\n \"enable_eval_rst\": True,\n \"enable_auto_toc_tree\": True,\n },\n True,\n )\n app.add_transform(AutoStructify)\n app.add_css_file(\"css/customize.css\")\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n# type: ignore\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nfrom typing import Any, List\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nfrom recommonmark.transform import AutoStructify\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"xFormers\"\ncopyright = \"2021, Facebook AI Research\"\nauthor = \"Facebook AI Research\"\n\n# The full version, including alpha/beta/rc tags\nrelease = \"0.0.1\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.napoleon\", # support NumPy and Google style docstrings\n \"recommonmark\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.ifconfig\",\n]\n\n# autosectionlabel throws warnings if section names are duplicated.\n# The following tells autosectionlabel to not throw a warning for\n# duplicated section names that are in different documents.\nautosectionlabel_prefix_document = True\n\n# -- Configurations for plugins ------------\nnapoleon_google_docstring = True\nnapoleon_include_init_with_doc = True\nnapoleon_include_special_with_doc = True\nnapoleon_numpy_docstring = False\nnapoleon_use_rtype = False\nautodoc_inherit_docstrings = False\nautodoc_member_order = \"bysource\"\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.6\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n}\n# -------------------------\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns: List[Any] = []\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for HTML output -------------------------------------------------\n\n\nhtml_theme = \"pytorch_sphinx_theme\"\ntemplates_path = [\"_templates\"]\n\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_theme_options = {\n \"includehidden\": True,\n \"canonical_url\": \"https://facebookresearch.github.io/xformers\",\n \"pytorch_project\": \"docs\",\n \"logo_only\": True, # default = False\n}\n\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# setting custom stylesheets https://stackoverflow.com/a/34420612\nhtml_context = {\"css_files\": [\"_static/css/customize.css\"]}\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"xformersdocs\"\ngithub_doc_root = \"https://github.com/facebookresearch/xformers/tree/main/docs/\"\n\n\n# Over-ride PyTorch Sphinx css\ndef setup(app):\n app.add_config_value(\n \"recommonmark_config\",\n {\n \"url_resolver\": lambda url: github_doc_root + url,\n \"auto_toc_tree_section\": \"Contents\",\n \"enable_math\": True,\n \"enable_inline_math\": True,\n \"enable_eval_rst\": True,\n \"enable_auto_toc_tree\": True,\n },\n True,\n )\n app.add_transform(AutoStructify)\n app.add_css_file(\"css/customize.css\")\n", "path": "docs/source/conf.py"}]}
| 1,856 | 199 |
gh_patches_debug_35287
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-6529
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pkgutil.iter_modules with arbitrary path
## Description of the issue
The iter_modules patch implemented in #5959 has a bug where the path must start with the _MEIPASS or it will throw an assertion error.
The normal iter_modules function can take any valid path. Your code first calls that:
https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py#L37
and later asserts it starts with _MEIPASS
https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py#L59
which means that a path outside of the executable will throw the assertion error.
I think when implementing it was overlooked that this function could be used to look at a path outside the executable path.
### Context information (for bug reports)
* PyInstaller Version 4.8
* All OS and python versions
I will have a look into creating a pull request to fix this issue.
I think the solution is to change the assertion to an if statement to only run the code below that if it starts with _MEIPASS and thus could be bundled in the executable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2021, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11 #
12 # This rthook overrides pkgutil.iter_modules with custom implementation that uses PyInstaller's FrozenImporter to list
13 # sub-modules embedded in the PYZ archive. The non-embedded modules (binary extensions, or .pyc modules in noarchive
14 # build) are handled by original pkgutil iter_modules implementation (and consequently, python's FileFinder).
15 #
16 # The preferred way of adding support for iter_modules would be adding non-standard iter_modules() method to
17 # FrozenImporter itself. However, that seems to work only for path entry finders (for use with sys.path_hooks), while
18 # PyInstaller's FrozenImporter is registered as meta path finders (for use with sys.meta_path). Turning FrozenImporter
19 # into path entry finder, would seemingly require the latter to support on-filesystem resources (e.g., extension
20 # modules) in addition to PYZ-embedded ones.
21 #
22 # Therefore, we instead opt for overriding pkgutil.iter_modules with custom implementation that augments the output of
23 # original implementation with contents of PYZ archive from FrozenImporter's TOC.
24
25 import os
26 import pkgutil
27 import sys
28
29 from pyimod03_importers import FrozenImporter
30
31 _orig_pkgutil_iter_modules = pkgutil.iter_modules
32
33
34 def _pyi_pkgutil_iter_modules(path=None, prefix=''):
35 # Use original implementation to discover on-filesystem modules (binary extensions in regular builds, or both binary
36 # extensions and compiled pyc modules in noarchive debug builds).
37 yield from _orig_pkgutil_iter_modules(path, prefix)
38
39 # Find the instance of PyInstaller's FrozenImporter.
40 for importer in pkgutil.iter_importers():
41 if isinstance(importer, FrozenImporter):
42 break
43 else:
44 return
45
46 if not path:
47 # Search for all top-level packages/modules. These will have no dots in their entry names.
48 for entry in importer.toc:
49 if entry.count('.') != 0:
50 continue
51 is_pkg = importer.is_package(entry)
52 yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg)
53 else:
54 # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.
55 SYS_PREFIX = sys._MEIPASS + os.path.sep
56 SYS_PREFIXLEN = len(SYS_PREFIX)
57 # Only single path is supported, and it must start with sys._MEIPASS.
58 pkg_path = os.path.normpath(path[0])
59 assert pkg_path.startswith(SYS_PREFIX)
60 # Construct package prefix from path...
61 pkg_prefix = pkg_path[SYS_PREFIXLEN:]
62 pkg_prefix = pkg_prefix.replace(os.path.sep, '.')
63 # ... and ensure it ends with a dot (so we can directly filter out the package itself).
64 if not pkg_prefix.endswith('.'):
65 pkg_prefix += '.'
66 pkg_prefix_len = len(pkg_prefix)
67
68 for entry in importer.toc:
69 if not entry.startswith(pkg_prefix):
70 continue
71 name = entry[pkg_prefix_len:]
72 if name.count('.') != 0:
73 continue
74 is_pkg = importer.is_package(entry)
75 yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)
76
77
78 pkgutil.iter_modules = _pyi_pkgutil_iter_modules
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py
--- a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py
+++ b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py
@@ -43,7 +43,7 @@
else:
return
- if not path:
+ if path is None:
# Search for all top-level packages/modules. These will have no dots in their entry names.
for entry in importer.toc:
if entry.count('.') != 0:
@@ -54,25 +54,28 @@
# Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.
SYS_PREFIX = sys._MEIPASS + os.path.sep
SYS_PREFIXLEN = len(SYS_PREFIX)
- # Only single path is supported, and it must start with sys._MEIPASS.
- pkg_path = os.path.normpath(path[0])
- assert pkg_path.startswith(SYS_PREFIX)
- # Construct package prefix from path...
- pkg_prefix = pkg_path[SYS_PREFIXLEN:]
- pkg_prefix = pkg_prefix.replace(os.path.sep, '.')
- # ... and ensure it ends with a dot (so we can directly filter out the package itself).
- if not pkg_prefix.endswith('.'):
- pkg_prefix += '.'
- pkg_prefix_len = len(pkg_prefix)
- for entry in importer.toc:
- if not entry.startswith(pkg_prefix):
- continue
- name = entry[pkg_prefix_len:]
- if name.count('.') != 0:
+ for pkg_path in path:
+ pkg_path = os.path.normpath(pkg_path)
+ if not pkg_path.startswith(SYS_PREFIX):
+ # if the path does not start with sys._MEIPASS then it cannot be a bundled package.
continue
- is_pkg = importer.is_package(entry)
- yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)
+ # Construct package prefix from path...
+ pkg_prefix = pkg_path[SYS_PREFIXLEN:]
+ pkg_prefix = pkg_prefix.replace(os.path.sep, '.')
+ # ... and ensure it ends with a dot (so we can directly filter out the package itself).
+ if not pkg_prefix.endswith('.'):
+ pkg_prefix += '.'
+ pkg_prefix_len = len(pkg_prefix)
+
+ for entry in importer.toc:
+ if not entry.startswith(pkg_prefix):
+ continue
+ name = entry[pkg_prefix_len:]
+ if name.count('.') != 0:
+ continue
+ is_pkg = importer.is_package(entry)
+ yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)
pkgutil.iter_modules = _pyi_pkgutil_iter_modules
|
{"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py\n@@ -43,7 +43,7 @@\n else:\n return\n \n- if not path:\n+ if path is None:\n # Search for all top-level packages/modules. These will have no dots in their entry names.\n for entry in importer.toc:\n if entry.count('.') != 0:\n@@ -54,25 +54,28 @@\n # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.\n SYS_PREFIX = sys._MEIPASS + os.path.sep\n SYS_PREFIXLEN = len(SYS_PREFIX)\n- # Only single path is supported, and it must start with sys._MEIPASS.\n- pkg_path = os.path.normpath(path[0])\n- assert pkg_path.startswith(SYS_PREFIX)\n- # Construct package prefix from path...\n- pkg_prefix = pkg_path[SYS_PREFIXLEN:]\n- pkg_prefix = pkg_prefix.replace(os.path.sep, '.')\n- # ... and ensure it ends with a dot (so we can directly filter out the package itself).\n- if not pkg_prefix.endswith('.'):\n- pkg_prefix += '.'\n- pkg_prefix_len = len(pkg_prefix)\n \n- for entry in importer.toc:\n- if not entry.startswith(pkg_prefix):\n- continue\n- name = entry[pkg_prefix_len:]\n- if name.count('.') != 0:\n+ for pkg_path in path:\n+ pkg_path = os.path.normpath(pkg_path)\n+ if not pkg_path.startswith(SYS_PREFIX):\n+ # if the path does not start with sys._MEIPASS then it cannot be a bundled package.\n continue\n- is_pkg = importer.is_package(entry)\n- yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)\n+ # Construct package prefix from path...\n+ pkg_prefix = pkg_path[SYS_PREFIXLEN:]\n+ pkg_prefix = pkg_prefix.replace(os.path.sep, '.')\n+ # ... and ensure it ends with a dot (so we can directly filter out the package itself).\n+ if not pkg_prefix.endswith('.'):\n+ pkg_prefix += '.'\n+ pkg_prefix_len = len(pkg_prefix)\n+\n+ for entry in importer.toc:\n+ if not entry.startswith(pkg_prefix):\n+ continue\n+ name = entry[pkg_prefix_len:]\n+ if name.count('.') != 0:\n+ continue\n+ is_pkg = importer.is_package(entry)\n+ yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)\n \n \n pkgutil.iter_modules = _pyi_pkgutil_iter_modules\n", "issue": "pkgutil.iter_modules with arbitrary path\n## Description of the issue\r\nThe iter_modules patch implemented in #5959 has a bug where the path must start with the _MEIPASS or it will throw an assertion error.\r\n\r\nThe normal iter_modules function can take any valid path. Your code first calls that:\r\nhttps://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py#L37\r\n\r\nand later asserts it starts with _MEIPASS\r\nhttps://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py#L59\r\n\r\nwhich means that a path outside of the executable will throw the assertion error.\r\n\r\nI think when implementing it was overlooked that this function could be used to look at a path outside the executable path.\r\n\r\n### Context information (for bug reports)\r\n\r\n* PyInstaller Version 4.8\r\n* All OS and python versions\r\n\r\nI will have a look into creating a pull request to fix this issue.\r\nI think the solution is to change the assertion to an if statement to only run the code below that if it starts with _MEIPASS and thus could be bundled in the executable.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n#\n# This rthook overrides pkgutil.iter_modules with custom implementation that uses PyInstaller's FrozenImporter to list\n# sub-modules embedded in the PYZ archive. The non-embedded modules (binary extensions, or .pyc modules in noarchive\n# build) are handled by original pkgutil iter_modules implementation (and consequently, python's FileFinder).\n#\n# The preferred way of adding support for iter_modules would be adding non-standard iter_modules() method to\n# FrozenImporter itself. However, that seems to work only for path entry finders (for use with sys.path_hooks), while\n# PyInstaller's FrozenImporter is registered as meta path finders (for use with sys.meta_path). Turning FrozenImporter\n# into path entry finder, would seemingly require the latter to support on-filesystem resources (e.g., extension\n# modules) in addition to PYZ-embedded ones.\n#\n# Therefore, we instead opt for overriding pkgutil.iter_modules with custom implementation that augments the output of\n# original implementation with contents of PYZ archive from FrozenImporter's TOC.\n\nimport os\nimport pkgutil\nimport sys\n\nfrom pyimod03_importers import FrozenImporter\n\n_orig_pkgutil_iter_modules = pkgutil.iter_modules\n\n\ndef _pyi_pkgutil_iter_modules(path=None, prefix=''):\n # Use original implementation to discover on-filesystem modules (binary extensions in regular builds, or both binary\n # extensions and compiled pyc modules in noarchive debug builds).\n yield from _orig_pkgutil_iter_modules(path, prefix)\n\n # Find the instance of PyInstaller's FrozenImporter.\n for importer in pkgutil.iter_importers():\n if isinstance(importer, FrozenImporter):\n break\n else:\n return\n\n if not path:\n # Search for all top-level packages/modules. These will have no dots in their entry names.\n for entry in importer.toc:\n if entry.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg)\n else:\n # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.\n SYS_PREFIX = sys._MEIPASS + os.path.sep\n SYS_PREFIXLEN = len(SYS_PREFIX)\n # Only single path is supported, and it must start with sys._MEIPASS.\n pkg_path = os.path.normpath(path[0])\n assert pkg_path.startswith(SYS_PREFIX)\n # Construct package prefix from path...\n pkg_prefix = pkg_path[SYS_PREFIXLEN:]\n pkg_prefix = pkg_prefix.replace(os.path.sep, '.')\n # ... and ensure it ends with a dot (so we can directly filter out the package itself).\n if not pkg_prefix.endswith('.'):\n pkg_prefix += '.'\n pkg_prefix_len = len(pkg_prefix)\n\n for entry in importer.toc:\n if not entry.startswith(pkg_prefix):\n continue\n name = entry[pkg_prefix_len:]\n if name.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)\n\n\npkgutil.iter_modules = _pyi_pkgutil_iter_modules\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2021, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n#\n# This rthook overrides pkgutil.iter_modules with custom implementation that uses PyInstaller's FrozenImporter to list\n# sub-modules embedded in the PYZ archive. The non-embedded modules (binary extensions, or .pyc modules in noarchive\n# build) are handled by original pkgutil iter_modules implementation (and consequently, python's FileFinder).\n#\n# The preferred way of adding support for iter_modules would be adding non-standard iter_modules() method to\n# FrozenImporter itself. However, that seems to work only for path entry finders (for use with sys.path_hooks), while\n# PyInstaller's FrozenImporter is registered as meta path finders (for use with sys.meta_path). Turning FrozenImporter\n# into path entry finder, would seemingly require the latter to support on-filesystem resources (e.g., extension\n# modules) in addition to PYZ-embedded ones.\n#\n# Therefore, we instead opt for overriding pkgutil.iter_modules with custom implementation that augments the output of\n# original implementation with contents of PYZ archive from FrozenImporter's TOC.\n\nimport os\nimport pkgutil\nimport sys\n\nfrom pyimod03_importers import FrozenImporter\n\n_orig_pkgutil_iter_modules = pkgutil.iter_modules\n\n\ndef _pyi_pkgutil_iter_modules(path=None, prefix=''):\n # Use original implementation to discover on-filesystem modules (binary extensions in regular builds, or both binary\n # extensions and compiled pyc modules in noarchive debug builds).\n yield from _orig_pkgutil_iter_modules(path, prefix)\n\n # Find the instance of PyInstaller's FrozenImporter.\n for importer in pkgutil.iter_importers():\n if isinstance(importer, FrozenImporter):\n break\n else:\n return\n\n if path is None:\n # Search for all top-level packages/modules. These will have no dots in their entry names.\n for entry in importer.toc:\n if entry.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + entry, is_pkg)\n else:\n # Declare SYS_PREFIX locally, to avoid clash with eponymous global symbol from pyi_rth_pkgutil hook.\n SYS_PREFIX = sys._MEIPASS + os.path.sep\n SYS_PREFIXLEN = len(SYS_PREFIX)\n\n for pkg_path in path:\n pkg_path = os.path.normpath(pkg_path)\n if not pkg_path.startswith(SYS_PREFIX):\n # if the path does not start with sys._MEIPASS then it cannot be a bundled package.\n continue\n # Construct package prefix from path...\n pkg_prefix = pkg_path[SYS_PREFIXLEN:]\n pkg_prefix = pkg_prefix.replace(os.path.sep, '.')\n # ... and ensure it ends with a dot (so we can directly filter out the package itself).\n if not pkg_prefix.endswith('.'):\n pkg_prefix += '.'\n pkg_prefix_len = len(pkg_prefix)\n\n for entry in importer.toc:\n if not entry.startswith(pkg_prefix):\n continue\n name = entry[pkg_prefix_len:]\n if name.count('.') != 0:\n continue\n is_pkg = importer.is_package(entry)\n yield pkgutil.ModuleInfo(importer, prefix + name, is_pkg)\n\n\npkgutil.iter_modules = _pyi_pkgutil_iter_modules\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_pkgutil.py"}]}
| 1,429 | 620 |
gh_patches_debug_1650
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-13273
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
unravel_index
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/numpy/indexing.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.jax.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6
7
8 @to_ivy_arrays_and_back
9 def diagonal(a, offset=0, axis1=0, axis2=1):
10 return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
11
12
13 @to_ivy_arrays_and_back
14 def diag(v, k=0):
15 return ivy.diag(v, k=k)
16
17
18 @to_ivy_arrays_and_back
19 def diag_indices(n, ndim=2):
20 idx = ivy.arange(n, dtype=int)
21 return (idx,) * ndim
22
23
24 # take_along_axis
25 @to_ivy_arrays_and_back
26 def take_along_axis(arr, indices, axis, mode="fill"):
27 return ivy.take_along_axis(arr, indices, axis, mode=mode)
28
29
30 @to_ivy_arrays_and_back
31 def tril_indices(n_rows, n_cols=None, k=0):
32 return ivy.tril_indices(n_rows, n_cols, k)
33
34
35 @to_ivy_arrays_and_back
36 def triu_indices(n, k=0, m=None):
37 return ivy.triu_indices(n, m, k)
38
39
40 @to_ivy_arrays_and_back
41 def triu_indices_from(arr, k=0):
42 return ivy.triu_indices(arr.shape[-2], arr.shape[-1], k)
43
44
45 def tril_indices_from(arr, k=0):
46 return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py
--- a/ivy/functional/frontends/jax/numpy/indexing.py
+++ b/ivy/functional/frontends/jax/numpy/indexing.py
@@ -44,3 +44,10 @@
def tril_indices_from(arr, k=0):
return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)
+
+
+# unravel_index
+@to_ivy_arrays_and_back
+def unravel_index(indices, shape):
+ ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)]
+ return tuple(ret)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/indexing.py b/ivy/functional/frontends/jax/numpy/indexing.py\n--- a/ivy/functional/frontends/jax/numpy/indexing.py\n+++ b/ivy/functional/frontends/jax/numpy/indexing.py\n@@ -44,3 +44,10 @@\n \n def tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n+\n+\n+# unravel_index\n+@to_ivy_arrays_and_back\n+def unravel_index(indices, shape):\n+ ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n+ return tuple(ret)\n", "issue": "unravel_index\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices(n, k=0, m=None):\n return ivy.triu_indices(n, m, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices_from(arr, k=0):\n return ivy.triu_indices(arr.shape[-2], arr.shape[-1], k)\n\n\ndef tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset=0, axis1=0, axis2=1):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n\n\n@to_ivy_arrays_and_back\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diag_indices(n, ndim=2):\n idx = ivy.arange(n, dtype=int)\n return (idx,) * ndim\n\n\n# take_along_axis\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis, mode=\"fill\"):\n return ivy.take_along_axis(arr, indices, axis, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n_rows, n_cols=None, k=0):\n return ivy.tril_indices(n_rows, n_cols, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices(n, k=0, m=None):\n return ivy.triu_indices(n, m, k)\n\n\n@to_ivy_arrays_and_back\ndef triu_indices_from(arr, k=0):\n return ivy.triu_indices(arr.shape[-2], arr.shape[-1], k)\n\n\ndef tril_indices_from(arr, k=0):\n return ivy.tril_indices(arr.shape[-2], arr.shape[-1], k)\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n", "path": "ivy/functional/frontends/jax/numpy/indexing.py"}]}
| 701 | 162 |
gh_patches_debug_4175
|
rasdani/github-patches
|
git_diff
|
cleanlab__cleanlab-965
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Revert #961 before release
Tensorflow version temporarily has an upper bound (`tensorflow<2.16.0`) in requirements-dev.txt.
scikit-learn version temporarily has an upper bound (`scikit-learn>=1.0,<1.4.0`) in setup.py
This needs to be reverted before releasing v2.6.0.
_Originally posted by @elisno in https://github.com/cleanlab/cleanlab/issues/961#issuecomment-1898968097_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from setuptools.command.egg_info import egg_info
3
4 # To use a consistent encoding
5 from codecs import open
6 from os import path
7
8
9 class egg_info_ex(egg_info):
10 """Includes license file into `.egg-info` folder."""
11
12 def run(self):
13 # don't duplicate license into `.egg-info` when building a distribution
14 if not self.distribution.have_run.get("install", True):
15 # `install` command is in progress, copy license
16 self.mkpath(self.egg_info)
17 self.copy_file("LICENSE", self.egg_info)
18
19 egg_info.run(self)
20
21
22 here = path.abspath(path.dirname(__file__))
23
24 # Get the long description from the README file
25 with open(path.join(here, "README.md"), encoding="utf-8") as f:
26 long_description = f.read()
27
28 # Get version number and store it in __version__
29 exec(open("cleanlab/version.py").read())
30
31 DATALAB_REQUIRE = [
32 # Mainly for Datalab's data storage class.
33 # Still some type hints that require datasets
34 "datasets>=2.7.0",
35 ]
36
37 IMAGE_REQUIRE = DATALAB_REQUIRE + ["cleanvision>=0.3.2"]
38
39 EXTRAS_REQUIRE = {
40 "datalab": DATALAB_REQUIRE,
41 "image": IMAGE_REQUIRE,
42 "all": ["matplotlib>=3.5.1"],
43 }
44 EXTRAS_REQUIRE["all"] = list(set(sum(EXTRAS_REQUIRE.values(), [])))
45
46 setup(
47 name="cleanlab",
48 version=__version__,
49 license="AGPLv3+",
50 long_description=long_description,
51 long_description_content_type="text/markdown",
52 description="The standard package for data-centric AI, machine learning with label errors, "
53 "and automatically finding and fixing dataset issues in Python.",
54 url="https://cleanlab.ai",
55 project_urls={
56 "Documentation": "https://docs.cleanlab.ai",
57 "Bug Tracker": "https://github.com/cleanlab/cleanlab/issues",
58 "Source Code": "https://github.com/cleanlab/cleanlab",
59 },
60 author="Cleanlab Inc.",
61 author_email="[email protected]",
62 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
63 classifiers=[
64 "Development Status :: 4 - Beta",
65 "Intended Audience :: Developers",
66 "Intended Audience :: Education",
67 "Intended Audience :: Science/Research",
68 "Intended Audience :: Information Technology",
69 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
70 "Natural Language :: English",
71 # We believe this package works will these versions, but we do not guarantee it!
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.7",
74 "Programming Language :: Python :: 3.8",
75 "Programming Language :: Python :: 3.9",
76 "Programming Language :: Python :: 3.10",
77 "Programming Language :: Python",
78 "Topic :: Software Development",
79 "Topic :: Scientific/Engineering",
80 "Topic :: Scientific/Engineering :: Mathematics",
81 "Topic :: Scientific/Engineering :: Artificial Intelligence",
82 "Topic :: Software Development :: Libraries",
83 "Topic :: Software Development :: Libraries :: Python Modules",
84 ],
85 python_requires=">=3.7",
86 # What does your project relate to?
87 keywords="machine_learning data_cleaning confident_learning classification weak_supervision "
88 "learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric",
89 # You can just specify the packages manually here if your project is
90 # simple. Or you can use find_packages().
91 packages=find_packages(exclude=[]),
92 # Include cleanlab license file.
93 include_package_data=True,
94 package_data={
95 "": ["LICENSE"],
96 },
97 license_files=("LICENSE",),
98 cmdclass={"egg_info": egg_info_ex},
99 # List run-time dependencies here. These will be installed by pip when
100 # your project is installed. For an analysis of "install_requires" vs pip's
101 # requirements files see:
102 # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/
103 install_requires=[
104 "numpy>=1.20.0",
105 "scikit-learn>=1.0,<1.4.0",
106 "tqdm>=4.53.0",
107 "pandas>=1.1.5",
108 "termcolor>=2.0.0,<2.4.0",
109 ],
110 extras_require=EXTRAS_REQUIRE,
111 )
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -102,7 +102,7 @@
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/
install_requires=[
"numpy>=1.20.0",
- "scikit-learn>=1.0,<1.4.0",
+ "scikit-learn>=1.0",
"tqdm>=4.53.0",
"pandas>=1.1.5",
"termcolor>=2.0.0,<2.4.0",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -102,7 +102,7 @@\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/\n install_requires=[\n \"numpy>=1.20.0\",\n- \"scikit-learn>=1.0,<1.4.0\",\n+ \"scikit-learn>=1.0\",\n \"tqdm>=4.53.0\",\n \"pandas>=1.1.5\",\n \"termcolor>=2.0.0,<2.4.0\",\n", "issue": "Revert #961 before release\nTensorflow version temporarily has an upper bound (`tensorflow<2.16.0`) in requirements-dev.txt.\r\nscikit-learn version temporarily has an upper bound (`scikit-learn>=1.0,<1.4.0`) in setup.py\r\n\r\nThis needs to be reverted before releasing v2.6.0.\r\n\r\n\r\n _Originally posted by @elisno in https://github.com/cleanlab/cleanlab/issues/961#issuecomment-1898968097_\r\n \n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom setuptools.command.egg_info import egg_info\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n\nclass egg_info_ex(egg_info):\n \"\"\"Includes license file into `.egg-info` folder.\"\"\"\n\n def run(self):\n # don't duplicate license into `.egg-info` when building a distribution\n if not self.distribution.have_run.get(\"install\", True):\n # `install` command is in progress, copy license\n self.mkpath(self.egg_info)\n self.copy_file(\"LICENSE\", self.egg_info)\n\n egg_info.run(self)\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n# Get version number and store it in __version__\nexec(open(\"cleanlab/version.py\").read())\n\nDATALAB_REQUIRE = [\n # Mainly for Datalab's data storage class.\n # Still some type hints that require datasets\n \"datasets>=2.7.0\",\n]\n\nIMAGE_REQUIRE = DATALAB_REQUIRE + [\"cleanvision>=0.3.2\"]\n\nEXTRAS_REQUIRE = {\n \"datalab\": DATALAB_REQUIRE,\n \"image\": IMAGE_REQUIRE,\n \"all\": [\"matplotlib>=3.5.1\"],\n}\nEXTRAS_REQUIRE[\"all\"] = list(set(sum(EXTRAS_REQUIRE.values(), [])))\n\nsetup(\n name=\"cleanlab\",\n version=__version__,\n license=\"AGPLv3+\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n description=\"The standard package for data-centric AI, machine learning with label errors, \"\n \"and automatically finding and fixing dataset issues in Python.\",\n url=\"https://cleanlab.ai\",\n project_urls={\n \"Documentation\": \"https://docs.cleanlab.ai\",\n \"Bug Tracker\": \"https://github.com/cleanlab/cleanlab/issues\",\n \"Source Code\": \"https://github.com/cleanlab/cleanlab\",\n },\n author=\"Cleanlab Inc.\",\n author_email=\"[email protected]\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Natural Language :: English\",\n # We believe this package works will these versions, but we do not guarantee it!\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n python_requires=\">=3.7\",\n # What does your project relate to?\n keywords=\"machine_learning data_cleaning confident_learning classification weak_supervision \"\n \"learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[]),\n # Include cleanlab license file.\n include_package_data=True,\n package_data={\n \"\": [\"LICENSE\"],\n },\n license_files=(\"LICENSE\",),\n cmdclass={\"egg_info\": egg_info_ex},\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/\n install_requires=[\n \"numpy>=1.20.0\",\n \"scikit-learn>=1.0,<1.4.0\",\n \"tqdm>=4.53.0\",\n \"pandas>=1.1.5\",\n \"termcolor>=2.0.0,<2.4.0\",\n ],\n extras_require=EXTRAS_REQUIRE,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom setuptools.command.egg_info import egg_info\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n\nclass egg_info_ex(egg_info):\n \"\"\"Includes license file into `.egg-info` folder.\"\"\"\n\n def run(self):\n # don't duplicate license into `.egg-info` when building a distribution\n if not self.distribution.have_run.get(\"install\", True):\n # `install` command is in progress, copy license\n self.mkpath(self.egg_info)\n self.copy_file(\"LICENSE\", self.egg_info)\n\n egg_info.run(self)\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n# Get version number and store it in __version__\nexec(open(\"cleanlab/version.py\").read())\n\nDATALAB_REQUIRE = [\n # Mainly for Datalab's data storage class.\n # Still some type hints that require datasets\n \"datasets>=2.7.0\",\n]\n\nIMAGE_REQUIRE = DATALAB_REQUIRE + [\"cleanvision>=0.3.2\"]\n\nEXTRAS_REQUIRE = {\n \"datalab\": DATALAB_REQUIRE,\n \"image\": IMAGE_REQUIRE,\n \"all\": [\"matplotlib>=3.5.1\"],\n}\nEXTRAS_REQUIRE[\"all\"] = list(set(sum(EXTRAS_REQUIRE.values(), [])))\n\nsetup(\n name=\"cleanlab\",\n version=__version__,\n license=\"AGPLv3+\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n description=\"The standard package for data-centric AI, machine learning with label errors, \"\n \"and automatically finding and fixing dataset issues in Python.\",\n url=\"https://cleanlab.ai\",\n project_urls={\n \"Documentation\": \"https://docs.cleanlab.ai\",\n \"Bug Tracker\": \"https://github.com/cleanlab/cleanlab/issues\",\n \"Source Code\": \"https://github.com/cleanlab/cleanlab\",\n },\n author=\"Cleanlab Inc.\",\n author_email=\"[email protected]\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Natural Language :: English\",\n # We believe this package works will these versions, but we do not guarantee it!\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n python_requires=\">=3.7\",\n # What does your project relate to?\n keywords=\"machine_learning data_cleaning confident_learning classification weak_supervision \"\n \"learning_with_noisy_labels unsupervised_learning datacentric_ai, datacentric\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n packages=find_packages(exclude=[]),\n # Include cleanlab license file.\n include_package_data=True,\n package_data={\n \"\": [\"LICENSE\"],\n },\n license_files=(\"LICENSE\",),\n cmdclass={\"egg_info\": egg_info_ex},\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/\n install_requires=[\n \"numpy>=1.20.0\",\n \"scikit-learn>=1.0\",\n \"tqdm>=4.53.0\",\n \"pandas>=1.1.5\",\n \"termcolor>=2.0.0,<2.4.0\",\n ],\n extras_require=EXTRAS_REQUIRE,\n)\n", "path": "setup.py"}]}
| 1,592 | 139 |
gh_patches_debug_12258
|
rasdani/github-patches
|
git_diff
|
blakeblackshear__frigate-5532
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Support]: Frigate crashes on ARM64 after upgrade to beta 8
### Describe the problem you are having
On ARM64 machine, frigate crashes while trying to start go2rtc.
Looking for the "ELF load command alignment not page-aligned" error, I found this: https://github.com/numpy/numpy/issues/16677
I assume the problem is related to ARM64 and the 64K page size of the "Red Hat Enterprise Linux release 8.7 (Ootpa)" operating system.
```
$ python3 -c 'import os; print(os.sysconf("SC_PAGESIZE"))'
65536
```
### Version
0.12.0 Beta 8
### Frigate config file
```yaml
mqtt:
host: mqtt
cameras:
entrance:
ffmpeg:
inputs:
- path: rtsp://**REDACTED**
roles:
- detect
- rtmp
- record
detect:
width: 2688
height: 1520
fps: 5
mqtt:
timestamp: False
bounding_box: False
crop: True
quality: 100
height: 1520
record:
enabled: True
events:
retain:
default: 5
```
### Relevant log output
```shell
[INFO] Starting go2rtc...
2023-02-15 14:19:19.142580863 14:19:19.142 INF go2rtc version 1.1.2 linux/arm64
2023-02-15 14:19:19.142706423 14:19:19.142 INF [api] listen addr=:1984
2023-02-15 14:19:19.142896941 14:19:19.142 INF [rtsp] listen addr=:8554
2023-02-15 14:19:19.143039420 14:19:19.143 INF [srtp] listen addr=:8443
2023-02-15 14:19:19.143322578 14:19:19.143 INF [webrtc] listen addr=:8555
2023-02-15 14:19:19.555985825 Traceback (most recent call last):
2023-02-15 14:19:19.556016824 File "/usr/lib/python3.9/runpy.py", line 197, in _run_module_as_main
2023-02-15 14:19:19.556044824 return _run_code(code, main_globals, None,
2023-02-15 14:19:19.556045944 File "/usr/lib/python3.9/runpy.py", line 87, in _run_code
2023-02-15 14:19:19.556083464 exec(code, run_globals)
2023-02-15 14:19:19.556084664 File "/opt/frigate/frigate/__main__.py", line 9, in <module>
2023-02-15 14:19:19.556140984 from frigate.app import FrigateApp
2023-02-15 14:19:19.556142424 File "/opt/frigate/frigate/app.py", line 17, in <module>
2023-02-15 14:19:19.556202983 from frigate.comms.dispatcher import Communicator, Dispatcher
2023-02-15 14:19:19.556205023 File "/opt/frigate/frigate/comms/dispatcher.py", line 9, in <module>
2023-02-15 14:19:19.556227943 from frigate.config import FrigateConfig
2023-02-15 14:19:19.556268703 File "/opt/frigate/frigate/config.py", line 36, in <module>
2023-02-15 14:19:19.556269863 from frigate.detectors import (
2023-02-15 14:19:19.556270783 File "/opt/frigate/frigate/detectors/__init__.py", line 9, in <module>
2023-02-15 14:19:19.556287783 from .detector_types import DetectorTypeEnum, api_types, DetectorConfig
2023-02-15 14:19:19.556289223 File "/opt/frigate/frigate/detectors/detector_types.py", line 16, in <module>
2023-02-15 14:19:19.556353022 plugin_modules = [
2023-02-15 14:19:19.556354502 File "/opt/frigate/frigate/detectors/detector_types.py", line 17, in <listcomp>
2023-02-15 14:19:19.556355142 importlib.import_module(name)
2023-02-15 14:19:19.556356102 File "/usr/lib/python3.9/importlib/__init__.py", line 127, in import_module
2023-02-15 14:19:19.556364622 return _bootstrap._gcd_import(name[level:], package, level)
2023-02-15 14:19:19.556368702 File "/opt/frigate/frigate/detectors/plugins/openvino.py", line 3, in <module>
2023-02-15 14:19:19.556395182 import openvino.runtime as ov
2023-02-15 14:19:19.556396862 File "/usr/local/lib/python3.9/dist-packages/openvino/runtime/__init__.py", line 20, in <module>
2023-02-15 14:19:19.556431222 from openvino.pyopenvino import Dimension
2023-02-15 14:19:19.556440942 ImportError: /usr/local/lib/python3.9/dist-packages/openvino/pyopenvino.cpython-39-aarch64-linux-gnu.so: ELF load command alignment not page-aligned
Service Frigate exited with code 1 (by signal 0)
```
### FFprobe output from your camera
```shell
N/A
```
### Frigate stats
_No response_
### Operating system
Other Linux
### Install method
Docker Compose
### Coral version
CPU (no coral)
### Network connection
Wired
### Camera make and model
Hikvision
### Any other information that may be helpful
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frigate/detectors/detector_types.py`
Content:
```
1 import logging
2 import importlib
3 import pkgutil
4 from typing import Union
5 from typing_extensions import Annotated
6 from enum import Enum
7 from pydantic import Field
8
9 from . import plugins
10 from .detection_api import DetectionApi
11 from .detector_config import BaseDetectorConfig
12
13
14 logger = logging.getLogger(__name__)
15
16 plugin_modules = [
17 importlib.import_module(name)
18 for finder, name, ispkg in pkgutil.iter_modules(
19 plugins.__path__, plugins.__name__ + "."
20 )
21 ]
22
23 api_types = {det.type_key: det for det in DetectionApi.__subclasses__()}
24
25
26 class StrEnum(str, Enum):
27 pass
28
29
30 DetectorTypeEnum = StrEnum("DetectorTypeEnum", {k: k for k in api_types})
31
32 DetectorConfig = Annotated[
33 Union[tuple(BaseDetectorConfig.__subclasses__())],
34 Field(discriminator="type"),
35 ]
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frigate/detectors/detector_types.py b/frigate/detectors/detector_types.py
--- a/frigate/detectors/detector_types.py
+++ b/frigate/detectors/detector_types.py
@@ -13,12 +13,19 @@
logger = logging.getLogger(__name__)
-plugin_modules = [
- importlib.import_module(name)
- for finder, name, ispkg in pkgutil.iter_modules(
- plugins.__path__, plugins.__name__ + "."
- )
-]
+
+_included_modules = pkgutil.iter_modules(plugins.__path__, plugins.__name__ + ".")
+
+plugin_modules = []
+
+for _, name, _ in _included_modules:
+ try:
+ # currently openvino may fail when importing
+ # on an arm device with 64 KiB page size.
+ plugin_modules.append(importlib.import_module(name))
+ except ImportError as e:
+ logger.error(f"Error importing detector runtime: {e}")
+
api_types = {det.type_key: det for det in DetectionApi.__subclasses__()}
|
{"golden_diff": "diff --git a/frigate/detectors/detector_types.py b/frigate/detectors/detector_types.py\n--- a/frigate/detectors/detector_types.py\n+++ b/frigate/detectors/detector_types.py\n@@ -13,12 +13,19 @@\n \n logger = logging.getLogger(__name__)\n \n-plugin_modules = [\n- importlib.import_module(name)\n- for finder, name, ispkg in pkgutil.iter_modules(\n- plugins.__path__, plugins.__name__ + \".\"\n- )\n-]\n+\n+_included_modules = pkgutil.iter_modules(plugins.__path__, plugins.__name__ + \".\")\n+\n+plugin_modules = []\n+\n+for _, name, _ in _included_modules:\n+ try:\n+ # currently openvino may fail when importing\n+ # on an arm device with 64 KiB page size.\n+ plugin_modules.append(importlib.import_module(name))\n+ except ImportError as e:\n+ logger.error(f\"Error importing detector runtime: {e}\")\n+\n \n api_types = {det.type_key: det for det in DetectionApi.__subclasses__()}\n", "issue": "[Support]: Frigate crashes on ARM64 after upgrade to beta 8\n### Describe the problem you are having\r\n\r\nOn ARM64 machine, frigate crashes while trying to start go2rtc.\r\nLooking for the \"ELF load command alignment not page-aligned\" error, I found this: https://github.com/numpy/numpy/issues/16677\r\n\r\nI assume the problem is related to ARM64 and the 64K page size of the \"Red Hat Enterprise Linux release 8.7 (Ootpa)\" operating system.\r\n\r\n```\r\n$ python3 -c 'import os; print(os.sysconf(\"SC_PAGESIZE\"))'\r\n65536\r\n```\r\n\r\n### Version\r\n\r\n0.12.0 Beta 8\r\n\r\n### Frigate config file\r\n\r\n```yaml\r\nmqtt:\r\n host: mqtt\r\n\r\ncameras:\r\n entrance:\r\n ffmpeg:\r\n inputs:\r\n - path: rtsp://**REDACTED**\r\n roles:\r\n - detect\r\n - rtmp\r\n - record\r\n detect:\r\n width: 2688\r\n height: 1520\r\n fps: 5\r\n mqtt:\r\n timestamp: False\r\n bounding_box: False\r\n crop: True\r\n quality: 100\r\n height: 1520\r\n\r\nrecord:\r\n enabled: True\r\n events:\r\n retain:\r\n default: 5\r\n```\r\n\r\n\r\n### Relevant log output\r\n\r\n```shell\r\n[INFO] Starting go2rtc...\r\n2023-02-15 14:19:19.142580863 14:19:19.142 INF go2rtc version 1.1.2 linux/arm64\r\n2023-02-15 14:19:19.142706423 14:19:19.142 INF [api] listen addr=:1984\r\n2023-02-15 14:19:19.142896941 14:19:19.142 INF [rtsp] listen addr=:8554\r\n2023-02-15 14:19:19.143039420 14:19:19.143 INF [srtp] listen addr=:8443\r\n2023-02-15 14:19:19.143322578 14:19:19.143 INF [webrtc] listen addr=:8555\r\n2023-02-15 14:19:19.555985825 Traceback (most recent call last):\r\n2023-02-15 14:19:19.556016824 File \"/usr/lib/python3.9/runpy.py\", line 197, in _run_module_as_main\r\n2023-02-15 14:19:19.556044824 return _run_code(code, main_globals, None,\r\n2023-02-15 14:19:19.556045944 File \"/usr/lib/python3.9/runpy.py\", line 87, in _run_code\r\n2023-02-15 14:19:19.556083464 exec(code, run_globals)\r\n2023-02-15 14:19:19.556084664 File \"/opt/frigate/frigate/__main__.py\", line 9, in <module>\r\n2023-02-15 14:19:19.556140984 from frigate.app import FrigateApp\r\n2023-02-15 14:19:19.556142424 File \"/opt/frigate/frigate/app.py\", line 17, in <module>\r\n2023-02-15 14:19:19.556202983 from frigate.comms.dispatcher import Communicator, Dispatcher\r\n2023-02-15 14:19:19.556205023 File \"/opt/frigate/frigate/comms/dispatcher.py\", line 9, in <module>\r\n2023-02-15 14:19:19.556227943 from frigate.config import FrigateConfig\r\n2023-02-15 14:19:19.556268703 File \"/opt/frigate/frigate/config.py\", line 36, in <module>\r\n2023-02-15 14:19:19.556269863 from frigate.detectors import (\r\n2023-02-15 14:19:19.556270783 File \"/opt/frigate/frigate/detectors/__init__.py\", line 9, in <module>\r\n2023-02-15 14:19:19.556287783 from .detector_types import DetectorTypeEnum, api_types, DetectorConfig\r\n2023-02-15 14:19:19.556289223 File \"/opt/frigate/frigate/detectors/detector_types.py\", line 16, in <module>\r\n2023-02-15 14:19:19.556353022 plugin_modules = [\r\n2023-02-15 14:19:19.556354502 File \"/opt/frigate/frigate/detectors/detector_types.py\", line 17, in <listcomp>\r\n2023-02-15 14:19:19.556355142 importlib.import_module(name)\r\n2023-02-15 14:19:19.556356102 File \"/usr/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\n2023-02-15 14:19:19.556364622 return _bootstrap._gcd_import(name[level:], package, level)\r\n2023-02-15 14:19:19.556368702 File \"/opt/frigate/frigate/detectors/plugins/openvino.py\", line 3, in <module>\r\n2023-02-15 14:19:19.556395182 import openvino.runtime as ov\r\n2023-02-15 14:19:19.556396862 File \"/usr/local/lib/python3.9/dist-packages/openvino/runtime/__init__.py\", line 20, in <module>\r\n2023-02-15 14:19:19.556431222 from openvino.pyopenvino import Dimension\r\n2023-02-15 14:19:19.556440942 ImportError: /usr/local/lib/python3.9/dist-packages/openvino/pyopenvino.cpython-39-aarch64-linux-gnu.so: ELF load command alignment not page-aligned\r\nService Frigate exited with code 1 (by signal 0)\r\n```\r\n\r\n\r\n### FFprobe output from your camera\r\n\r\n```shell\r\nN/A\r\n```\r\n\r\n\r\n### Frigate stats\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\nOther Linux\r\n\r\n### Install method\r\n\r\nDocker Compose\r\n\r\n### Coral version\r\n\r\nCPU (no coral)\r\n\r\n### Network connection\r\n\r\nWired\r\n\r\n### Camera make and model\r\n\r\nHikvision\r\n\r\n### Any other information that may be helpful\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport importlib\nimport pkgutil\nfrom typing import Union\nfrom typing_extensions import Annotated\nfrom enum import Enum\nfrom pydantic import Field\n\nfrom . import plugins\nfrom .detection_api import DetectionApi\nfrom .detector_config import BaseDetectorConfig\n\n\nlogger = logging.getLogger(__name__)\n\nplugin_modules = [\n importlib.import_module(name)\n for finder, name, ispkg in pkgutil.iter_modules(\n plugins.__path__, plugins.__name__ + \".\"\n )\n]\n\napi_types = {det.type_key: det for det in DetectionApi.__subclasses__()}\n\n\nclass StrEnum(str, Enum):\n pass\n\n\nDetectorTypeEnum = StrEnum(\"DetectorTypeEnum\", {k: k for k in api_types})\n\nDetectorConfig = Annotated[\n Union[tuple(BaseDetectorConfig.__subclasses__())],\n Field(discriminator=\"type\"),\n]\n", "path": "frigate/detectors/detector_types.py"}], "after_files": [{"content": "import logging\nimport importlib\nimport pkgutil\nfrom typing import Union\nfrom typing_extensions import Annotated\nfrom enum import Enum\nfrom pydantic import Field\n\nfrom . import plugins\nfrom .detection_api import DetectionApi\nfrom .detector_config import BaseDetectorConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\n_included_modules = pkgutil.iter_modules(plugins.__path__, plugins.__name__ + \".\")\n\nplugin_modules = []\n\nfor _, name, _ in _included_modules:\n try:\n # currently openvino may fail when importing\n # on an arm device with 64 KiB page size.\n plugin_modules.append(importlib.import_module(name))\n except ImportError as e:\n logger.error(f\"Error importing detector runtime: {e}\")\n\n\napi_types = {det.type_key: det for det in DetectionApi.__subclasses__()}\n\n\nclass StrEnum(str, Enum):\n pass\n\n\nDetectorTypeEnum = StrEnum(\"DetectorTypeEnum\", {k: k for k in api_types})\n\nDetectorConfig = Annotated[\n Union[tuple(BaseDetectorConfig.__subclasses__())],\n Field(discriminator=\"type\"),\n]\n", "path": "frigate/detectors/detector_types.py"}]}
| 2,425 | 244 |
gh_patches_debug_31014
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-3608
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement `tables.delete` RPC method
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/rpc/tables.py`
Content:
```
1 from typing import Optional, TypedDict
2
3 from modernrpc.core import rpc_method, REQUEST_KEY
4 from modernrpc.auth.basic import http_basic_auth_login_required
5
6 from db.tables.operations.select import get_table_info
7 from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions
8 from mathesar.rpc.utils import connect
9
10
11 class TableInfo(TypedDict):
12 """
13 Information about a table.
14
15 Attributes:
16 oid: The `oid` of the table in the schema.
17 name: The name of the table.
18 schema: The `oid` of the schema where the table lives.
19 description: The description of the table.
20 """
21 oid: int
22 name: str
23 schema: int
24 description: Optional[str]
25
26
27 @rpc_method(name="tables.list")
28 @http_basic_auth_login_required
29 @handle_rpc_exceptions
30 def list_(*, schema_oid: int, database_id: int, **kwargs) -> list[TableInfo]:
31 """
32 List information about tables for a schema. Exposed as `list`.
33
34 Args:
35 schema_oid: Identity of the schema in the user's database.
36 database_id: The Django id of the database containing the table.
37
38 Returns:
39 A list of table details.
40 """
41 user = kwargs.get(REQUEST_KEY).user
42 with connect(database_id, user) as conn:
43 raw_table_info = get_table_info(schema_oid, conn)
44 return [
45 TableInfo(tab) for tab in raw_table_info
46 ]
47
```
Path: `db/tables/operations/drop.py`
Content:
```
1 from db.connection import execute_msar_func_with_engine
2
3
4 def drop_table(name, schema, engine, cascade=False, if_exists=False):
5 execute_msar_func_with_engine(engine, 'drop_table', schema, name, cascade, if_exists)
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/tables/operations/drop.py b/db/tables/operations/drop.py
--- a/db/tables/operations/drop.py
+++ b/db/tables/operations/drop.py
@@ -1,5 +1,21 @@
-from db.connection import execute_msar_func_with_engine
+from db.connection import execute_msar_func_with_engine, exec_msar_func
def drop_table(name, schema, engine, cascade=False, if_exists=False):
execute_msar_func_with_engine(engine, 'drop_table', schema, name, cascade, if_exists)
+
+
+def drop_table_from_database(table_oid, conn, cascade=False):
+ """
+ Drop a table.
+
+ Args:
+ table_oid: OID of the table to drop.
+ cascade: Whether to drop the dependent objects.
+
+ Returns:
+ Returns the fully qualified name of the dropped table.
+ """
+ return exec_msar_func(
+ conn, 'drop_table', table_oid, cascade
+ ).fetchone()[0]
diff --git a/mathesar/rpc/tables.py b/mathesar/rpc/tables.py
--- a/mathesar/rpc/tables.py
+++ b/mathesar/rpc/tables.py
@@ -4,6 +4,7 @@
from modernrpc.auth.basic import http_basic_auth_login_required
from db.tables.operations.select import get_table_info
+from db.tables.operations.drop import drop_table_from_database
from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions
from mathesar.rpc.utils import connect
@@ -44,3 +45,25 @@
return [
TableInfo(tab) for tab in raw_table_info
]
+
+
+@rpc_method(name="tables.delete")
+@http_basic_auth_login_required
+@handle_rpc_exceptions
+def delete(
+ *, table_oid: int, database_id: int, cascade: bool = False, **kwargs
+) -> str:
+ """
+ Delete a table from a schema.
+
+ Args:
+ table_oid: Identity of the table in the user's database.
+ database_id: The Django id of the database containing the table.
+ cascade: Whether to drop the dependent objects.
+
+ Returns:
+ The name of the dropped table.
+ """
+ user = kwargs.get(REQUEST_KEY).user
+ with connect(database_id, user) as conn:
+ return drop_table_from_database(table_oid, conn, cascade)
|
{"golden_diff": "diff --git a/db/tables/operations/drop.py b/db/tables/operations/drop.py\n--- a/db/tables/operations/drop.py\n+++ b/db/tables/operations/drop.py\n@@ -1,5 +1,21 @@\n-from db.connection import execute_msar_func_with_engine\n+from db.connection import execute_msar_func_with_engine, exec_msar_func\n \n \n def drop_table(name, schema, engine, cascade=False, if_exists=False):\n execute_msar_func_with_engine(engine, 'drop_table', schema, name, cascade, if_exists)\n+\n+\n+def drop_table_from_database(table_oid, conn, cascade=False):\n+ \"\"\"\n+ Drop a table.\n+\n+ Args:\n+ table_oid: OID of the table to drop.\n+ cascade: Whether to drop the dependent objects.\n+\n+ Returns:\n+ Returns the fully qualified name of the dropped table.\n+ \"\"\"\n+ return exec_msar_func(\n+ conn, 'drop_table', table_oid, cascade\n+ ).fetchone()[0]\ndiff --git a/mathesar/rpc/tables.py b/mathesar/rpc/tables.py\n--- a/mathesar/rpc/tables.py\n+++ b/mathesar/rpc/tables.py\n@@ -4,6 +4,7 @@\n from modernrpc.auth.basic import http_basic_auth_login_required\n \n from db.tables.operations.select import get_table_info\n+from db.tables.operations.drop import drop_table_from_database\n from mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\n from mathesar.rpc.utils import connect\n \n@@ -44,3 +45,25 @@\n return [\n TableInfo(tab) for tab in raw_table_info\n ]\n+\n+\n+@rpc_method(name=\"tables.delete\")\n+@http_basic_auth_login_required\n+@handle_rpc_exceptions\n+def delete(\n+ *, table_oid: int, database_id: int, cascade: bool = False, **kwargs\n+) -> str:\n+ \"\"\"\n+ Delete a table from a schema.\n+\n+ Args:\n+ table_oid: Identity of the table in the user's database.\n+ database_id: The Django id of the database containing the table.\n+ cascade: Whether to drop the dependent objects.\n+\n+ Returns:\n+ The name of the dropped table.\n+ \"\"\"\n+ user = kwargs.get(REQUEST_KEY).user\n+ with connect(database_id, user) as conn:\n+ return drop_table_from_database(table_oid, conn, cascade)\n", "issue": "Implement `tables.delete` RPC method\n\n", "before_files": [{"content": "from typing import Optional, TypedDict\n\nfrom modernrpc.core import rpc_method, REQUEST_KEY\nfrom modernrpc.auth.basic import http_basic_auth_login_required\n\nfrom db.tables.operations.select import get_table_info\nfrom mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\nfrom mathesar.rpc.utils import connect\n\n\nclass TableInfo(TypedDict):\n \"\"\"\n Information about a table.\n\n Attributes:\n oid: The `oid` of the table in the schema.\n name: The name of the table.\n schema: The `oid` of the schema where the table lives.\n description: The description of the table.\n \"\"\"\n oid: int\n name: str\n schema: int\n description: Optional[str]\n\n\n@rpc_method(name=\"tables.list\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef list_(*, schema_oid: int, database_id: int, **kwargs) -> list[TableInfo]:\n \"\"\"\n List information about tables for a schema. Exposed as `list`.\n\n Args:\n schema_oid: Identity of the schema in the user's database.\n database_id: The Django id of the database containing the table.\n\n Returns:\n A list of table details.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n raw_table_info = get_table_info(schema_oid, conn)\n return [\n TableInfo(tab) for tab in raw_table_info\n ]\n", "path": "mathesar/rpc/tables.py"}, {"content": "from db.connection import execute_msar_func_with_engine\n\n\ndef drop_table(name, schema, engine, cascade=False, if_exists=False):\n execute_msar_func_with_engine(engine, 'drop_table', schema, name, cascade, if_exists)\n", "path": "db/tables/operations/drop.py"}], "after_files": [{"content": "from typing import Optional, TypedDict\n\nfrom modernrpc.core import rpc_method, REQUEST_KEY\nfrom modernrpc.auth.basic import http_basic_auth_login_required\n\nfrom db.tables.operations.select import get_table_info\nfrom db.tables.operations.drop import drop_table_from_database\nfrom mathesar.rpc.exceptions.handlers import handle_rpc_exceptions\nfrom mathesar.rpc.utils import connect\n\n\nclass TableInfo(TypedDict):\n \"\"\"\n Information about a table.\n\n Attributes:\n oid: The `oid` of the table in the schema.\n name: The name of the table.\n schema: The `oid` of the schema where the table lives.\n description: The description of the table.\n \"\"\"\n oid: int\n name: str\n schema: int\n description: Optional[str]\n\n\n@rpc_method(name=\"tables.list\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef list_(*, schema_oid: int, database_id: int, **kwargs) -> list[TableInfo]:\n \"\"\"\n List information about tables for a schema. Exposed as `list`.\n\n Args:\n schema_oid: Identity of the schema in the user's database.\n database_id: The Django id of the database containing the table.\n\n Returns:\n A list of table details.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n raw_table_info = get_table_info(schema_oid, conn)\n return [\n TableInfo(tab) for tab in raw_table_info\n ]\n\n\n@rpc_method(name=\"tables.delete\")\n@http_basic_auth_login_required\n@handle_rpc_exceptions\ndef delete(\n *, table_oid: int, database_id: int, cascade: bool = False, **kwargs\n) -> str:\n \"\"\"\n Delete a table from a schema.\n\n Args:\n table_oid: Identity of the table in the user's database.\n database_id: The Django id of the database containing the table.\n cascade: Whether to drop the dependent objects.\n\n Returns:\n The name of the dropped table.\n \"\"\"\n user = kwargs.get(REQUEST_KEY).user\n with connect(database_id, user) as conn:\n return drop_table_from_database(table_oid, conn, cascade)\n", "path": "mathesar/rpc/tables.py"}, {"content": "from db.connection import execute_msar_func_with_engine, exec_msar_func\n\n\ndef drop_table(name, schema, engine, cascade=False, if_exists=False):\n execute_msar_func_with_engine(engine, 'drop_table', schema, name, cascade, if_exists)\n\n\ndef drop_table_from_database(table_oid, conn, cascade=False):\n \"\"\"\n Drop a table.\n\n Args:\n table_oid: OID of the table to drop.\n cascade: Whether to drop the dependent objects.\n\n Returns:\n Returns the fully qualified name of the dropped table.\n \"\"\"\n return exec_msar_func(\n conn, 'drop_table', table_oid, cascade\n ).fetchone()[0]\n", "path": "db/tables/operations/drop.py"}]}
| 743 | 525 |
gh_patches_debug_21122
|
rasdani/github-patches
|
git_diff
|
pypa__pipenv-2968
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pipenv install/graph fail with 2018.10.9: ImportError: No module named weakref
### Issue description
With pipenv 2018.10.9, all the pipenv commands fail (including `pipenv --support`):
```pytb
Traceback (most recent call last):
File "c:\python27\lib\runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "c:\python27\lib\runpy.py", line 72, in _run_code
exec code in run_globals
File "C:\Python27\Scripts\pipenv.exe\__main__.py", line 5, in <module>
File "c:\python27\lib\site-packages\pipenv\__init__.py", line 23, in <module>
from .cli import cli
File "c:\python27\lib\site-packages\pipenv\cli\__init__.py", line 3, in <module>
from .command import cli
File "c:\python27\lib\site-packages\pipenv\cli\command.py", line 18, in <module>
from .. import environments
File "c:\python27\lib\site-packages\pipenv\environments.py", line 4, in <module>
from .vendor.vistir.misc import fs_str
File "c:\python27\lib\site-packages\pipenv\vendor\vistir\__init__.py", line 4, in <module>
from .compat import NamedTemporaryFile, TemporaryDirectory, partialmethod
File "c:\python27\lib\site-packages\pipenv\vendor\vistir\compat.py", line 34, in <module>
from .backports.tempfile import NamedTemporaryFile
File "c:\python27\lib\site-packages\pipenv\vendor\vistir\backports\__init__.py", line 5, in <module>
from .tempfile import NamedTemporaryFile
File "c:\python27\lib\site-packages\pipenv\vendor\vistir\backports\tempfile.py", line 16, in <module>
from backports.weakref import finalize
ImportError: No module named weakref
```
### Enviroment
* pip version: 10.0.1 and 18.1
* Python version: 2.7
* OS: Windows
Fix import error on Python 2.7
Continue #2962
Close #2961 #2950
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pipenv/vendor/vistir/backports/tempfile.py`
Content:
```
1 # -*- coding=utf-8 -*-
2 from __future__ import absolute_import, unicode_literals
3
4 import functools
5 import io
6 import os
7 import sys
8
9 from tempfile import _bin_openflags, _mkstemp_inner, gettempdir
10
11 import six
12
13 try:
14 from weakref import finalize
15 except ImportError:
16 from backports.weakref import finalize
17
18
19 __all__ = ["finalize", "NamedTemporaryFile"]
20
21
22 try:
23 from tempfile import _infer_return_type
24 except ImportError:
25
26 def _infer_return_type(*args):
27 _types = set()
28 for arg in args:
29 if isinstance(type(arg), six.string_types):
30 _types.add(str)
31 elif isinstance(type(arg), bytes):
32 _types.add(bytes)
33 elif arg:
34 _types.add(type(arg))
35 return _types.pop()
36
37
38 def _sanitize_params(prefix, suffix, dir):
39 """Common parameter processing for most APIs in this module."""
40 output_type = _infer_return_type(prefix, suffix, dir)
41 if suffix is None:
42 suffix = output_type()
43 if prefix is None:
44 if output_type is str:
45 prefix = "tmp"
46 else:
47 prefix = os.fsencode("tmp")
48 if dir is None:
49 if output_type is str:
50 dir = gettempdir()
51 else:
52 dir = os.fsencode(gettempdir())
53 return prefix, suffix, dir, output_type
54
55
56 class _TemporaryFileCloser:
57 """A separate object allowing proper closing of a temporary file's
58 underlying file object, without adding a __del__ method to the
59 temporary file."""
60
61 file = None # Set here since __del__ checks it
62 close_called = False
63
64 def __init__(self, file, name, delete=True):
65 self.file = file
66 self.name = name
67 self.delete = delete
68
69 # NT provides delete-on-close as a primitive, so we don't need
70 # the wrapper to do anything special. We still use it so that
71 # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
72 if os.name != "nt":
73
74 # Cache the unlinker so we don't get spurious errors at
75 # shutdown when the module-level "os" is None'd out. Note
76 # that this must be referenced as self.unlink, because the
77 # name TemporaryFileWrapper may also get None'd out before
78 # __del__ is called.
79
80 def close(self, unlink=os.unlink):
81 if not self.close_called and self.file is not None:
82 self.close_called = True
83 try:
84 self.file.close()
85 finally:
86 if self.delete:
87 unlink(self.name)
88
89 # Need to ensure the file is deleted on __del__
90
91 def __del__(self):
92 self.close()
93
94 else:
95
96 def close(self):
97 if not self.close_called:
98 self.close_called = True
99 self.file.close()
100
101
102 class _TemporaryFileWrapper:
103 """Temporary file wrapper
104 This class provides a wrapper around files opened for
105 temporary use. In particular, it seeks to automatically
106 remove the file when it is no longer needed.
107 """
108
109 def __init__(self, file, name, delete=True):
110 self.file = file
111 self.name = name
112 self.delete = delete
113 self._closer = _TemporaryFileCloser(file, name, delete)
114
115 def __getattr__(self, name):
116 # Attribute lookups are delegated to the underlying file
117 # and cached for non-numeric results
118 # (i.e. methods are cached, closed and friends are not)
119 file = self.__dict__["file"]
120 a = getattr(file, name)
121 if hasattr(a, "__call__"):
122 func = a
123
124 @functools.wraps(func)
125 def func_wrapper(*args, **kwargs):
126 return func(*args, **kwargs)
127
128 # Avoid closing the file as long as the wrapper is alive,
129 # see issue #18879.
130 func_wrapper._closer = self._closer
131 a = func_wrapper
132 if not isinstance(a, int):
133 setattr(self, name, a)
134 return a
135
136 # The underlying __enter__ method returns the wrong object
137 # (self.file) so override it to return the wrapper
138
139 def __enter__(self):
140 self.file.__enter__()
141 return self
142
143 # Need to trap __exit__ as well to ensure the file gets
144 # deleted when used in a with statement
145
146 def __exit__(self, exc, value, tb):
147 result = self.file.__exit__(exc, value, tb)
148 self.close()
149 return result
150
151 def close(self):
152 """
153 Close the temporary file, possibly deleting it.
154 """
155 self._closer.close()
156
157 # iter() doesn't use __getattr__ to find the __iter__ method
158
159 def __iter__(self):
160 # Don't return iter(self.file), but yield from it to avoid closing
161 # file as long as it's being used as iterator (see issue #23700). We
162 # can't use 'yield from' here because iter(file) returns the file
163 # object itself, which has a close method, and thus the file would get
164 # closed when the generator is finalized, due to PEP380 semantics.
165 for line in self.file:
166 yield line
167
168
169 def NamedTemporaryFile(
170 mode="w+b",
171 buffering=-1,
172 encoding=None,
173 newline=None,
174 suffix=None,
175 prefix=None,
176 dir=None,
177 delete=True,
178 ):
179 """Create and return a temporary file.
180 Arguments:
181 'prefix', 'suffix', 'dir' -- as for mkstemp.
182 'mode' -- the mode argument to io.open (default "w+b").
183 'buffering' -- the buffer size argument to io.open (default -1).
184 'encoding' -- the encoding argument to io.open (default None)
185 'newline' -- the newline argument to io.open (default None)
186 'delete' -- whether the file is deleted on close (default True).
187 The file is created as mkstemp() would do it.
188 Returns an object with a file-like interface; the name of the file
189 is accessible as its 'name' attribute. The file will be automatically
190 deleted when it is closed unless the 'delete' argument is set to False.
191 """
192 prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
193 flags = _bin_openflags
194 # Setting O_TEMPORARY in the flags causes the OS to delete
195 # the file when it is closed. This is only supported by Windows.
196 if os.name == "nt" and delete:
197 flags |= os.O_TEMPORARY
198 if sys.version_info < (3, 5):
199 (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
200 else:
201 (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
202 try:
203 file = io.open(
204 fd, mode, buffering=buffering, newline=newline, encoding=encoding
205 )
206 return _TemporaryFileWrapper(file, name, delete)
207
208 except BaseException:
209 os.unlink(name)
210 os.close(fd)
211 raise
212
```
Path: `pipenv/vendor/vistir/compat.py`
Content:
```
1 # -*- coding=utf-8 -*-
2 from __future__ import absolute_import, unicode_literals
3
4 import os
5 import sys
6 import warnings
7
8 from tempfile import mkdtemp
9
10 import six
11
12
13 __all__ = [
14 "Path",
15 "get_terminal_size",
16 "finalize",
17 "partialmethod",
18 "JSONDecodeError",
19 "ResourceWarning",
20 "FileNotFoundError",
21 "fs_str",
22 "TemporaryDirectory",
23 "NamedTemporaryFile",
24 ]
25
26 if sys.version_info >= (3, 5):
27 from pathlib import Path
28
29 else:
30 from pathlib2 import Path
31
32 if sys.version_info < (3, 3):
33 from backports.shutil_get_terminal_size import get_terminal_size
34 from .backports.tempfile import NamedTemporaryFile
35 else:
36 from tempfile import NamedTemporaryFile
37 from shutil import get_terminal_size
38
39 try:
40 from weakref import finalize
41 except ImportError:
42 from backports.weakref import finalize
43
44 try:
45 from functools import partialmethod
46 except Exception:
47 from .backports.functools import partialmethod
48
49 try:
50 from json import JSONDecodeError
51 except ImportError: # Old Pythons.
52 JSONDecodeError = ValueError
53
54 if six.PY2:
55
56 class ResourceWarning(Warning):
57 pass
58
59 class FileNotFoundError(IOError):
60 pass
61
62 else:
63 from builtins import ResourceWarning, FileNotFoundError
64
65 class ResourceWarning(ResourceWarning):
66 pass
67
68 class FileNotFoundError(FileNotFoundError):
69 pass
70
71
72 class TemporaryDirectory(object):
73 """Create and return a temporary directory. This has the same
74 behavior as mkdtemp but can be used as a context manager. For
75 example:
76
77 with TemporaryDirectory() as tmpdir:
78 ...
79
80 Upon exiting the context, the directory and everything contained
81 in it are removed.
82 """
83
84 def __init__(self, suffix="", prefix=None, dir=None):
85 if "RAM_DISK" in os.environ:
86 import uuid
87
88 name = uuid.uuid4().hex
89 dir_name = os.path.join(os.environ["RAM_DISK"].strip(), name)
90 os.mkdir(dir_name)
91 self.name = dir_name
92 else:
93 suffix = suffix if suffix else ""
94 if not prefix:
95 self.name = mkdtemp(suffix=suffix, dir=dir)
96 else:
97 self.name = mkdtemp(suffix, prefix, dir)
98 self._finalizer = finalize(
99 self,
100 self._cleanup,
101 self.name,
102 warn_message="Implicitly cleaning up {!r}".format(self),
103 )
104
105 @classmethod
106 def _cleanup(cls, name, warn_message):
107 from .path import rmtree
108 rmtree(name)
109 warnings.warn(warn_message, ResourceWarning)
110
111 def __repr__(self):
112 return "<{} {!r}>".format(self.__class__.__name__, self.name)
113
114 def __enter__(self):
115 return self
116
117 def __exit__(self, exc, value, tb):
118 self.cleanup()
119
120 def cleanup(self):
121 from .path import rmtree
122 if self._finalizer.detach():
123 rmtree(self.name)
124
125
126 def fs_str(string):
127 """Encodes a string into the proper filesystem encoding
128
129 Borrowed from pip-tools
130 """
131 if isinstance(string, str):
132 return string
133 assert not isinstance(string, bytes)
134 return string.encode(_fs_encoding)
135
136
137 _fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pipenv/vendor/vistir/backports/tempfile.py b/pipenv/vendor/vistir/backports/tempfile.py
--- a/pipenv/vendor/vistir/backports/tempfile.py
+++ b/pipenv/vendor/vistir/backports/tempfile.py
@@ -13,7 +13,7 @@
try:
from weakref import finalize
except ImportError:
- from backports.weakref import finalize
+ from pipenv.vendor.backports.weakref import finalize
__all__ = ["finalize", "NamedTemporaryFile"]
diff --git a/pipenv/vendor/vistir/compat.py b/pipenv/vendor/vistir/compat.py
--- a/pipenv/vendor/vistir/compat.py
+++ b/pipenv/vendor/vistir/compat.py
@@ -30,7 +30,7 @@
from pathlib2 import Path
if sys.version_info < (3, 3):
- from backports.shutil_get_terminal_size import get_terminal_size
+ from pipenv.vendor.backports.shutil_get_terminal_size import get_terminal_size
from .backports.tempfile import NamedTemporaryFile
else:
from tempfile import NamedTemporaryFile
@@ -39,7 +39,7 @@
try:
from weakref import finalize
except ImportError:
- from backports.weakref import finalize
+ from pipenv.vendor.backports.weakref import finalize
try:
from functools import partialmethod
|
{"golden_diff": "diff --git a/pipenv/vendor/vistir/backports/tempfile.py b/pipenv/vendor/vistir/backports/tempfile.py\n--- a/pipenv/vendor/vistir/backports/tempfile.py\n+++ b/pipenv/vendor/vistir/backports/tempfile.py\n@@ -13,7 +13,7 @@\n try:\n from weakref import finalize\n except ImportError:\n- from backports.weakref import finalize\n+ from pipenv.vendor.backports.weakref import finalize\n \n \n __all__ = [\"finalize\", \"NamedTemporaryFile\"]\ndiff --git a/pipenv/vendor/vistir/compat.py b/pipenv/vendor/vistir/compat.py\n--- a/pipenv/vendor/vistir/compat.py\n+++ b/pipenv/vendor/vistir/compat.py\n@@ -30,7 +30,7 @@\n from pathlib2 import Path\n \n if sys.version_info < (3, 3):\n- from backports.shutil_get_terminal_size import get_terminal_size\n+ from pipenv.vendor.backports.shutil_get_terminal_size import get_terminal_size\n from .backports.tempfile import NamedTemporaryFile\n else:\n from tempfile import NamedTemporaryFile\n@@ -39,7 +39,7 @@\n try:\n from weakref import finalize\n except ImportError:\n- from backports.weakref import finalize\n+ from pipenv.vendor.backports.weakref import finalize\n \n try:\n from functools import partialmethod\n", "issue": "pipenv install/graph fail with 2018.10.9: ImportError: No module named weakref\n### Issue description\r\n\r\nWith pipenv 2018.10.9, all the pipenv commands fail (including `pipenv --support`): \r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"c:\\python27\\lib\\runpy.py\", line 174, in _run_module_as_main\r\n \"__main__\", fname, loader, pkg_name)\r\n File \"c:\\python27\\lib\\runpy.py\", line 72, in _run_code\r\n exec code in run_globals\r\n File \"C:\\Python27\\Scripts\\pipenv.exe\\__main__.py\", line 5, in <module>\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\__init__.py\", line 23, in <module>\r\n from .cli import cli\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\cli\\__init__.py\", line 3, in <module>\r\n from .command import cli\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\cli\\command.py\", line 18, in <module>\r\n from .. import environments\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\environments.py\", line 4, in <module>\r\n from .vendor.vistir.misc import fs_str\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\vendor\\vistir\\__init__.py\", line 4, in <module>\r\n from .compat import NamedTemporaryFile, TemporaryDirectory, partialmethod\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\vendor\\vistir\\compat.py\", line 34, in <module>\r\n from .backports.tempfile import NamedTemporaryFile\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\vendor\\vistir\\backports\\__init__.py\", line 5, in <module>\r\n from .tempfile import NamedTemporaryFile\r\n File \"c:\\python27\\lib\\site-packages\\pipenv\\vendor\\vistir\\backports\\tempfile.py\", line 16, in <module>\r\n from backports.weakref import finalize\r\nImportError: No module named weakref\r\n```\r\n\r\n### Enviroment\r\n\r\n* pip version: 10.0.1 and 18.1\r\n* Python version: 2.7\r\n* OS: Windows\r\n\r\n\nFix import error on Python 2.7\nContinue #2962 \r\nClose #2961 #2950 \n", "before_files": [{"content": "# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport functools\nimport io\nimport os\nimport sys\n\nfrom tempfile import _bin_openflags, _mkstemp_inner, gettempdir\n\nimport six\n\ntry:\n from weakref import finalize\nexcept ImportError:\n from backports.weakref import finalize\n\n\n__all__ = [\"finalize\", \"NamedTemporaryFile\"]\n\n\ntry:\n from tempfile import _infer_return_type\nexcept ImportError:\n\n def _infer_return_type(*args):\n _types = set()\n for arg in args:\n if isinstance(type(arg), six.string_types):\n _types.add(str)\n elif isinstance(type(arg), bytes):\n _types.add(bytes)\n elif arg:\n _types.add(type(arg))\n return _types.pop()\n\n\ndef _sanitize_params(prefix, suffix, dir):\n \"\"\"Common parameter processing for most APIs in this module.\"\"\"\n output_type = _infer_return_type(prefix, suffix, dir)\n if suffix is None:\n suffix = output_type()\n if prefix is None:\n if output_type is str:\n prefix = \"tmp\"\n else:\n prefix = os.fsencode(\"tmp\")\n if dir is None:\n if output_type is str:\n dir = gettempdir()\n else:\n dir = os.fsencode(gettempdir())\n return prefix, suffix, dir, output_type\n\n\nclass _TemporaryFileCloser:\n \"\"\"A separate object allowing proper closing of a temporary file's\n underlying file object, without adding a __del__ method to the\n temporary file.\"\"\"\n\n file = None # Set here since __del__ checks it\n close_called = False\n\n def __init__(self, file, name, delete=True):\n self.file = file\n self.name = name\n self.delete = delete\n\n # NT provides delete-on-close as a primitive, so we don't need\n # the wrapper to do anything special. We still use it so that\n # file.name is useful (i.e. not \"(fdopen)\") with NamedTemporaryFile.\n if os.name != \"nt\":\n\n # Cache the unlinker so we don't get spurious errors at\n # shutdown when the module-level \"os\" is None'd out. Note\n # that this must be referenced as self.unlink, because the\n # name TemporaryFileWrapper may also get None'd out before\n # __del__ is called.\n\n def close(self, unlink=os.unlink):\n if not self.close_called and self.file is not None:\n self.close_called = True\n try:\n self.file.close()\n finally:\n if self.delete:\n unlink(self.name)\n\n # Need to ensure the file is deleted on __del__\n\n def __del__(self):\n self.close()\n\n else:\n\n def close(self):\n if not self.close_called:\n self.close_called = True\n self.file.close()\n\n\nclass _TemporaryFileWrapper:\n \"\"\"Temporary file wrapper\n This class provides a wrapper around files opened for\n temporary use. In particular, it seeks to automatically\n remove the file when it is no longer needed.\n \"\"\"\n\n def __init__(self, file, name, delete=True):\n self.file = file\n self.name = name\n self.delete = delete\n self._closer = _TemporaryFileCloser(file, name, delete)\n\n def __getattr__(self, name):\n # Attribute lookups are delegated to the underlying file\n # and cached for non-numeric results\n # (i.e. methods are cached, closed and friends are not)\n file = self.__dict__[\"file\"]\n a = getattr(file, name)\n if hasattr(a, \"__call__\"):\n func = a\n\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n # Avoid closing the file as long as the wrapper is alive,\n # see issue #18879.\n func_wrapper._closer = self._closer\n a = func_wrapper\n if not isinstance(a, int):\n setattr(self, name, a)\n return a\n\n # The underlying __enter__ method returns the wrong object\n # (self.file) so override it to return the wrapper\n\n def __enter__(self):\n self.file.__enter__()\n return self\n\n # Need to trap __exit__ as well to ensure the file gets\n # deleted when used in a with statement\n\n def __exit__(self, exc, value, tb):\n result = self.file.__exit__(exc, value, tb)\n self.close()\n return result\n\n def close(self):\n \"\"\"\n Close the temporary file, possibly deleting it.\n \"\"\"\n self._closer.close()\n\n # iter() doesn't use __getattr__ to find the __iter__ method\n\n def __iter__(self):\n # Don't return iter(self.file), but yield from it to avoid closing\n # file as long as it's being used as iterator (see issue #23700). We\n # can't use 'yield from' here because iter(file) returns the file\n # object itself, which has a close method, and thus the file would get\n # closed when the generator is finalized, due to PEP380 semantics.\n for line in self.file:\n yield line\n\n\ndef NamedTemporaryFile(\n mode=\"w+b\",\n buffering=-1,\n encoding=None,\n newline=None,\n suffix=None,\n prefix=None,\n dir=None,\n delete=True,\n):\n \"\"\"Create and return a temporary file.\n Arguments:\n 'prefix', 'suffix', 'dir' -- as for mkstemp.\n 'mode' -- the mode argument to io.open (default \"w+b\").\n 'buffering' -- the buffer size argument to io.open (default -1).\n 'encoding' -- the encoding argument to io.open (default None)\n 'newline' -- the newline argument to io.open (default None)\n 'delete' -- whether the file is deleted on close (default True).\n The file is created as mkstemp() would do it.\n Returns an object with a file-like interface; the name of the file\n is accessible as its 'name' attribute. The file will be automatically\n deleted when it is closed unless the 'delete' argument is set to False.\n \"\"\"\n prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)\n flags = _bin_openflags\n # Setting O_TEMPORARY in the flags causes the OS to delete\n # the file when it is closed. This is only supported by Windows.\n if os.name == \"nt\" and delete:\n flags |= os.O_TEMPORARY\n if sys.version_info < (3, 5):\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)\n else:\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\n try:\n file = io.open(\n fd, mode, buffering=buffering, newline=newline, encoding=encoding\n )\n return _TemporaryFileWrapper(file, name, delete)\n\n except BaseException:\n os.unlink(name)\n os.close(fd)\n raise\n", "path": "pipenv/vendor/vistir/backports/tempfile.py"}, {"content": "# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nimport warnings\n\nfrom tempfile import mkdtemp\n\nimport six\n\n\n__all__ = [\n \"Path\",\n \"get_terminal_size\",\n \"finalize\",\n \"partialmethod\",\n \"JSONDecodeError\",\n \"ResourceWarning\",\n \"FileNotFoundError\",\n \"fs_str\",\n \"TemporaryDirectory\",\n \"NamedTemporaryFile\",\n]\n\nif sys.version_info >= (3, 5):\n from pathlib import Path\n\nelse:\n from pathlib2 import Path\n\nif sys.version_info < (3, 3):\n from backports.shutil_get_terminal_size import get_terminal_size\n from .backports.tempfile import NamedTemporaryFile\nelse:\n from tempfile import NamedTemporaryFile\n from shutil import get_terminal_size\n\ntry:\n from weakref import finalize\nexcept ImportError:\n from backports.weakref import finalize\n\ntry:\n from functools import partialmethod\nexcept Exception:\n from .backports.functools import partialmethod\n\ntry:\n from json import JSONDecodeError\nexcept ImportError: # Old Pythons.\n JSONDecodeError = ValueError\n\nif six.PY2:\n\n class ResourceWarning(Warning):\n pass\n\n class FileNotFoundError(IOError):\n pass\n\nelse:\n from builtins import ResourceWarning, FileNotFoundError\n\n class ResourceWarning(ResourceWarning):\n pass\n\n class FileNotFoundError(FileNotFoundError):\n pass\n\n\nclass TemporaryDirectory(object):\n \"\"\"Create and return a temporary directory. This has the same\n behavior as mkdtemp but can be used as a context manager. For\n example:\n\n with TemporaryDirectory() as tmpdir:\n ...\n\n Upon exiting the context, the directory and everything contained\n in it are removed.\n \"\"\"\n\n def __init__(self, suffix=\"\", prefix=None, dir=None):\n if \"RAM_DISK\" in os.environ:\n import uuid\n\n name = uuid.uuid4().hex\n dir_name = os.path.join(os.environ[\"RAM_DISK\"].strip(), name)\n os.mkdir(dir_name)\n self.name = dir_name\n else:\n suffix = suffix if suffix else \"\"\n if not prefix:\n self.name = mkdtemp(suffix=suffix, dir=dir)\n else:\n self.name = mkdtemp(suffix, prefix, dir)\n self._finalizer = finalize(\n self,\n self._cleanup,\n self.name,\n warn_message=\"Implicitly cleaning up {!r}\".format(self),\n )\n\n @classmethod\n def _cleanup(cls, name, warn_message):\n from .path import rmtree\n rmtree(name)\n warnings.warn(warn_message, ResourceWarning)\n\n def __repr__(self):\n return \"<{} {!r}>\".format(self.__class__.__name__, self.name)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc, value, tb):\n self.cleanup()\n\n def cleanup(self):\n from .path import rmtree\n if self._finalizer.detach():\n rmtree(self.name)\n\n\ndef fs_str(string):\n \"\"\"Encodes a string into the proper filesystem encoding\n\n Borrowed from pip-tools\n \"\"\"\n if isinstance(string, str):\n return string\n assert not isinstance(string, bytes)\n return string.encode(_fs_encoding)\n\n\n_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n", "path": "pipenv/vendor/vistir/compat.py"}], "after_files": [{"content": "# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport functools\nimport io\nimport os\nimport sys\n\nfrom tempfile import _bin_openflags, _mkstemp_inner, gettempdir\n\nimport six\n\ntry:\n from weakref import finalize\nexcept ImportError:\n from pipenv.vendor.backports.weakref import finalize\n\n\n__all__ = [\"finalize\", \"NamedTemporaryFile\"]\n\n\ntry:\n from tempfile import _infer_return_type\nexcept ImportError:\n\n def _infer_return_type(*args):\n _types = set()\n for arg in args:\n if isinstance(type(arg), six.string_types):\n _types.add(str)\n elif isinstance(type(arg), bytes):\n _types.add(bytes)\n elif arg:\n _types.add(type(arg))\n return _types.pop()\n\n\ndef _sanitize_params(prefix, suffix, dir):\n \"\"\"Common parameter processing for most APIs in this module.\"\"\"\n output_type = _infer_return_type(prefix, suffix, dir)\n if suffix is None:\n suffix = output_type()\n if prefix is None:\n if output_type is str:\n prefix = \"tmp\"\n else:\n prefix = os.fsencode(\"tmp\")\n if dir is None:\n if output_type is str:\n dir = gettempdir()\n else:\n dir = os.fsencode(gettempdir())\n return prefix, suffix, dir, output_type\n\n\nclass _TemporaryFileCloser:\n \"\"\"A separate object allowing proper closing of a temporary file's\n underlying file object, without adding a __del__ method to the\n temporary file.\"\"\"\n\n file = None # Set here since __del__ checks it\n close_called = False\n\n def __init__(self, file, name, delete=True):\n self.file = file\n self.name = name\n self.delete = delete\n\n # NT provides delete-on-close as a primitive, so we don't need\n # the wrapper to do anything special. We still use it so that\n # file.name is useful (i.e. not \"(fdopen)\") with NamedTemporaryFile.\n if os.name != \"nt\":\n\n # Cache the unlinker so we don't get spurious errors at\n # shutdown when the module-level \"os\" is None'd out. Note\n # that this must be referenced as self.unlink, because the\n # name TemporaryFileWrapper may also get None'd out before\n # __del__ is called.\n\n def close(self, unlink=os.unlink):\n if not self.close_called and self.file is not None:\n self.close_called = True\n try:\n self.file.close()\n finally:\n if self.delete:\n unlink(self.name)\n\n # Need to ensure the file is deleted on __del__\n\n def __del__(self):\n self.close()\n\n else:\n\n def close(self):\n if not self.close_called:\n self.close_called = True\n self.file.close()\n\n\nclass _TemporaryFileWrapper:\n \"\"\"Temporary file wrapper\n This class provides a wrapper around files opened for\n temporary use. In particular, it seeks to automatically\n remove the file when it is no longer needed.\n \"\"\"\n\n def __init__(self, file, name, delete=True):\n self.file = file\n self.name = name\n self.delete = delete\n self._closer = _TemporaryFileCloser(file, name, delete)\n\n def __getattr__(self, name):\n # Attribute lookups are delegated to the underlying file\n # and cached for non-numeric results\n # (i.e. methods are cached, closed and friends are not)\n file = self.__dict__[\"file\"]\n a = getattr(file, name)\n if hasattr(a, \"__call__\"):\n func = a\n\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n # Avoid closing the file as long as the wrapper is alive,\n # see issue #18879.\n func_wrapper._closer = self._closer\n a = func_wrapper\n if not isinstance(a, int):\n setattr(self, name, a)\n return a\n\n # The underlying __enter__ method returns the wrong object\n # (self.file) so override it to return the wrapper\n\n def __enter__(self):\n self.file.__enter__()\n return self\n\n # Need to trap __exit__ as well to ensure the file gets\n # deleted when used in a with statement\n\n def __exit__(self, exc, value, tb):\n result = self.file.__exit__(exc, value, tb)\n self.close()\n return result\n\n def close(self):\n \"\"\"\n Close the temporary file, possibly deleting it.\n \"\"\"\n self._closer.close()\n\n # iter() doesn't use __getattr__ to find the __iter__ method\n\n def __iter__(self):\n # Don't return iter(self.file), but yield from it to avoid closing\n # file as long as it's being used as iterator (see issue #23700). We\n # can't use 'yield from' here because iter(file) returns the file\n # object itself, which has a close method, and thus the file would get\n # closed when the generator is finalized, due to PEP380 semantics.\n for line in self.file:\n yield line\n\n\ndef NamedTemporaryFile(\n mode=\"w+b\",\n buffering=-1,\n encoding=None,\n newline=None,\n suffix=None,\n prefix=None,\n dir=None,\n delete=True,\n):\n \"\"\"Create and return a temporary file.\n Arguments:\n 'prefix', 'suffix', 'dir' -- as for mkstemp.\n 'mode' -- the mode argument to io.open (default \"w+b\").\n 'buffering' -- the buffer size argument to io.open (default -1).\n 'encoding' -- the encoding argument to io.open (default None)\n 'newline' -- the newline argument to io.open (default None)\n 'delete' -- whether the file is deleted on close (default True).\n The file is created as mkstemp() would do it.\n Returns an object with a file-like interface; the name of the file\n is accessible as its 'name' attribute. The file will be automatically\n deleted when it is closed unless the 'delete' argument is set to False.\n \"\"\"\n prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)\n flags = _bin_openflags\n # Setting O_TEMPORARY in the flags causes the OS to delete\n # the file when it is closed. This is only supported by Windows.\n if os.name == \"nt\" and delete:\n flags |= os.O_TEMPORARY\n if sys.version_info < (3, 5):\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)\n else:\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\n try:\n file = io.open(\n fd, mode, buffering=buffering, newline=newline, encoding=encoding\n )\n return _TemporaryFileWrapper(file, name, delete)\n\n except BaseException:\n os.unlink(name)\n os.close(fd)\n raise\n", "path": "pipenv/vendor/vistir/backports/tempfile.py"}, {"content": "# -*- coding=utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nimport warnings\n\nfrom tempfile import mkdtemp\n\nimport six\n\n\n__all__ = [\n \"Path\",\n \"get_terminal_size\",\n \"finalize\",\n \"partialmethod\",\n \"JSONDecodeError\",\n \"ResourceWarning\",\n \"FileNotFoundError\",\n \"fs_str\",\n \"TemporaryDirectory\",\n \"NamedTemporaryFile\",\n]\n\nif sys.version_info >= (3, 5):\n from pathlib import Path\n\nelse:\n from pathlib2 import Path\n\nif sys.version_info < (3, 3):\n from pipenv.vendor.backports.shutil_get_terminal_size import get_terminal_size\n from .backports.tempfile import NamedTemporaryFile\nelse:\n from tempfile import NamedTemporaryFile\n from shutil import get_terminal_size\n\ntry:\n from weakref import finalize\nexcept ImportError:\n from pipenv.vendor.backports.weakref import finalize\n\ntry:\n from functools import partialmethod\nexcept Exception:\n from .backports.functools import partialmethod\n\ntry:\n from json import JSONDecodeError\nexcept ImportError: # Old Pythons.\n JSONDecodeError = ValueError\n\nif six.PY2:\n\n class ResourceWarning(Warning):\n pass\n\n class FileNotFoundError(IOError):\n pass\n\nelse:\n from builtins import ResourceWarning, FileNotFoundError\n\n class ResourceWarning(ResourceWarning):\n pass\n\n class FileNotFoundError(FileNotFoundError):\n pass\n\n\nclass TemporaryDirectory(object):\n \"\"\"Create and return a temporary directory. This has the same\n behavior as mkdtemp but can be used as a context manager. For\n example:\n\n with TemporaryDirectory() as tmpdir:\n ...\n\n Upon exiting the context, the directory and everything contained\n in it are removed.\n \"\"\"\n\n def __init__(self, suffix=\"\", prefix=None, dir=None):\n if \"RAM_DISK\" in os.environ:\n import uuid\n\n name = uuid.uuid4().hex\n dir_name = os.path.join(os.environ[\"RAM_DISK\"].strip(), name)\n os.mkdir(dir_name)\n self.name = dir_name\n else:\n suffix = suffix if suffix else \"\"\n if not prefix:\n self.name = mkdtemp(suffix=suffix, dir=dir)\n else:\n self.name = mkdtemp(suffix, prefix, dir)\n self._finalizer = finalize(\n self,\n self._cleanup,\n self.name,\n warn_message=\"Implicitly cleaning up {!r}\".format(self),\n )\n\n @classmethod\n def _cleanup(cls, name, warn_message):\n from .path import rmtree\n rmtree(name)\n warnings.warn(warn_message, ResourceWarning)\n\n def __repr__(self):\n return \"<{} {!r}>\".format(self.__class__.__name__, self.name)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc, value, tb):\n self.cleanup()\n\n def cleanup(self):\n from .path import rmtree\n if self._finalizer.detach():\n rmtree(self.name)\n\n\ndef fs_str(string):\n \"\"\"Encodes a string into the proper filesystem encoding\n\n Borrowed from pip-tools\n \"\"\"\n if isinstance(string, str):\n return string\n assert not isinstance(string, bytes)\n return string.encode(_fs_encoding)\n\n\n_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()\n", "path": "pipenv/vendor/vistir/compat.py"}]}
| 4,059 | 317 |
gh_patches_debug_3846
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
always_run + no `files` still crashes on `KeyError: files`
I was under the impression `files` was completely ignored for `always_run`, I guess not!
Here's a small reproduction:
```yaml
- repo: local
hooks:
- id: foo
name: foo
always_run: true
entry: bash -c 'echo hello && exit 1'
language: system
```
```
$ ./venv-pre_commit/bin/pre-commit run foo
An unexpected error has occurred: KeyError: u'files'
Check the log at ~/.pre-commit/pre-commit.log
```
```
$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: KeyError: u'files'
Traceback (most recent call last):
File "/tmp/foo/pre-commit/pre_commit/error_handler.py", line 48, in error_handler
yield
File "/tmp/foo/pre-commit/pre_commit/main.py", line 226, in main
return run(runner, args)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 235, in run
return _run_hooks(repo_hooks, args, environ)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 155, in _run_hooks
retval |= _run_single_hook(hook, repo, args, skips, cols)
File "/tmp/foo/pre-commit/pre_commit/commands/run.py", line 60, in _run_single_hook
filenames = get_filenames(args, hook['files'], hook['exclude'])
KeyError: u'files'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import logging
5 import os
6 import subprocess
7 import sys
8
9 from pre_commit import color
10 from pre_commit import git
11 from pre_commit import output
12 from pre_commit.output import get_hook_message
13 from pre_commit.staged_files_only import staged_files_only
14 from pre_commit.util import cmd_output
15 from pre_commit.util import noop_context
16
17
18 logger = logging.getLogger('pre_commit')
19
20
21 def _get_skips(environ):
22 skips = environ.get('SKIP', '')
23 return {skip.strip() for skip in skips.split(',') if skip.strip()}
24
25
26 def _hook_msg_start(hook, verbose):
27 return '{}{}'.format(
28 '[{}] '.format(hook['id']) if verbose else '',
29 hook['name'],
30 )
31
32
33 def get_changed_files(new, old):
34 return cmd_output(
35 'git', 'diff', '--name-only', '{}...{}'.format(old, new),
36 )[1].splitlines()
37
38
39 def get_filenames(args, include_expr, exclude_expr):
40 if args.origin and args.source:
41 getter = git.get_files_matching(
42 lambda: get_changed_files(args.origin, args.source),
43 )
44 elif args.files:
45 getter = git.get_files_matching(lambda: args.files)
46 elif args.all_files:
47 getter = git.get_all_files_matching
48 elif git.is_in_merge_conflict():
49 getter = git.get_conflicted_files_matching
50 else:
51 getter = git.get_staged_files_matching
52 return getter(include_expr, exclude_expr)
53
54
55 SKIPPED = 'Skipped'
56 NO_FILES = '(no files to check)'
57
58
59 def _run_single_hook(hook, repo, args, skips, cols):
60 filenames = get_filenames(args, hook['files'], hook['exclude'])
61 if hook['id'] in skips:
62 output.write(get_hook_message(
63 _hook_msg_start(hook, args.verbose),
64 end_msg=SKIPPED,
65 end_color=color.YELLOW,
66 use_color=args.color,
67 cols=cols,
68 ))
69 return 0
70 elif not filenames and not hook['always_run']:
71 output.write(get_hook_message(
72 _hook_msg_start(hook, args.verbose),
73 postfix=NO_FILES,
74 end_msg=SKIPPED,
75 end_color=color.TURQUOISE,
76 use_color=args.color,
77 cols=cols,
78 ))
79 return 0
80
81 # Print the hook and the dots first in case the hook takes hella long to
82 # run.
83 output.write(get_hook_message(
84 _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,
85 ))
86 sys.stdout.flush()
87
88 diff_before = cmd_output('git', 'diff', retcode=None, encoding=None)
89 retcode, stdout, stderr = repo.run_hook(
90 hook,
91 tuple(filenames) if hook['pass_filenames'] else (),
92 )
93 diff_after = cmd_output('git', 'diff', retcode=None, encoding=None)
94
95 file_modifications = diff_before != diff_after
96
97 # If the hook makes changes, fail the commit
98 if file_modifications:
99 retcode = 1
100
101 if retcode:
102 retcode = 1
103 print_color = color.RED
104 pass_fail = 'Failed'
105 else:
106 retcode = 0
107 print_color = color.GREEN
108 pass_fail = 'Passed'
109
110 output.write_line(color.format_color(pass_fail, print_color, args.color))
111
112 if (stdout or stderr or file_modifications) and (retcode or args.verbose):
113 output.write_line('hookid: {}\n'.format(hook['id']))
114
115 # Print a message if failing due to file modifications
116 if file_modifications:
117 output.write('Files were modified by this hook.')
118
119 if stdout or stderr:
120 output.write_line(' Additional output:')
121
122 output.write_line()
123
124 for out in (stdout, stderr):
125 assert type(out) is bytes, type(out)
126 if out.strip():
127 output.write_line(out.strip(), logfile_name=hook['log_file'])
128 output.write_line()
129
130 return retcode
131
132
133 def _compute_cols(hooks, verbose):
134 """Compute the number of columns to display hook messages. The widest
135 that will be displayed is in the no files skipped case:
136
137 Hook name...(no files to check) Skipped
138
139 or in the verbose case
140
141 Hook name [hookid]...(no files to check) Skipped
142 """
143 if hooks:
144 name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)
145 else:
146 name_len = 0
147
148 cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
149 return max(cols, 80)
150
151
152 def _run_hooks(repo_hooks, args, environ):
153 """Actually run the hooks."""
154 skips = _get_skips(environ)
155 cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)
156 retval = 0
157 for repo, hook in repo_hooks:
158 retval |= _run_single_hook(hook, repo, args, skips, cols)
159 if (
160 retval and
161 args.show_diff_on_failure and
162 subprocess.call(('git', 'diff', '--quiet')) != 0
163 ):
164 print('All changes made by hooks:')
165 subprocess.call(('git', 'diff'))
166 return retval
167
168
169 def get_repo_hooks(runner):
170 for repo in runner.repositories:
171 for _, hook in repo.hooks:
172 yield (repo, hook)
173
174
175 def _has_unmerged_paths(runner):
176 _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])
177 return bool(stdout.strip())
178
179
180 def _has_unstaged_config(runner):
181 retcode, _, _ = runner.cmd_runner.run(
182 ('git', 'diff', '--exit-code', runner.config_file_path),
183 retcode=None,
184 )
185 # be explicit, other git errors don't mean it has an unstaged config.
186 return retcode == 1
187
188
189 def run(runner, args, environ=os.environ):
190 no_stash = args.no_stash or args.all_files or bool(args.files)
191
192 # Check if we have unresolved merge conflict files and fail fast.
193 if _has_unmerged_paths(runner):
194 logger.error('Unmerged files. Resolve before committing.')
195 return 1
196 if bool(args.source) != bool(args.origin):
197 logger.error('Specify both --origin and --source.')
198 return 1
199 if _has_unstaged_config(runner) and not no_stash:
200 if args.allow_unstaged_config:
201 logger.warn(
202 'You have an unstaged config file and have specified the '
203 '--allow-unstaged-config option.\n'
204 'Note that your config will be stashed before the config is '
205 'parsed unless --no-stash is specified.',
206 )
207 else:
208 logger.error(
209 'Your .pre-commit-config.yaml is unstaged.\n'
210 '`git add .pre-commit-config.yaml` to fix this.\n'
211 'Run pre-commit with --allow-unstaged-config to silence this.'
212 )
213 return 1
214
215 if no_stash:
216 ctx = noop_context()
217 else:
218 ctx = staged_files_only(runner.cmd_runner)
219
220 with ctx:
221 repo_hooks = list(get_repo_hooks(runner))
222
223 if args.hook:
224 repo_hooks = [
225 (repo, hook) for repo, hook in repo_hooks
226 if hook['id'] == args.hook
227 ]
228 if not repo_hooks:
229 output.write_line('No hook with id `{}`'.format(args.hook))
230 return 1
231
232 # Filter hooks for stages
233 repo_hooks = [
234 (repo, hook) for repo, hook in repo_hooks
235 if not hook['stages'] or args.hook_stage in hook['stages']
236 ]
237
238 return _run_hooks(repo_hooks, args, environ)
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -57,7 +57,7 @@
def _run_single_hook(hook, repo, args, skips, cols):
- filenames = get_filenames(args, hook['files'], hook['exclude'])
+ filenames = get_filenames(args, hook.get('files', ''), hook['exclude'])
if hook['id'] in skips:
output.write(get_hook_message(
_hook_msg_start(hook, args.verbose),
|
{"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -57,7 +57,7 @@\n \n \n def _run_single_hook(hook, repo, args, skips, cols):\n- filenames = get_filenames(args, hook['files'], hook['exclude'])\n+ filenames = get_filenames(args, hook.get('files', ''), hook['exclude'])\n if hook['id'] in skips:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n", "issue": "always_run + no `files` still crashes on `KeyError: files`\nI was under the impression `files` was completely ignored for `always_run`, I guess not!\r\n\r\nHere's a small reproduction:\r\n\r\n```yaml\r\n- repo: local\r\n hooks:\r\n - id: foo\r\n name: foo\r\n always_run: true\r\n entry: bash -c 'echo hello && exit 1'\r\n language: system\r\n```\r\n\r\n```\r\n$ ./venv-pre_commit/bin/pre-commit run foo\r\nAn unexpected error has occurred: KeyError: u'files'\r\nCheck the log at ~/.pre-commit/pre-commit.log\r\n```\r\n\r\n```\r\n$ cat ~/.pre-commit/pre-commit.log \r\nAn unexpected error has occurred: KeyError: u'files'\r\nTraceback (most recent call last):\r\n File \"/tmp/foo/pre-commit/pre_commit/error_handler.py\", line 48, in error_handler\r\n yield\r\n File \"/tmp/foo/pre-commit/pre_commit/main.py\", line 226, in main\r\n return run(runner, args)\r\n File \"/tmp/foo/pre-commit/pre_commit/commands/run.py\", line 235, in run\r\n return _run_hooks(repo_hooks, args, environ)\r\n File \"/tmp/foo/pre-commit/pre_commit/commands/run.py\", line 155, in _run_hooks\r\n retval |= _run_single_hook(hook, repo, args, skips, cols)\r\n File \"/tmp/foo/pre-commit/pre_commit/commands/run.py\", line 60, in _run_single_hook\r\n filenames = get_filenames(args, hook['files'], hook['exclude'])\r\nKeyError: u'files'\r\n```\r\n\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\ndef _hook_msg_start(hook, verbose):\n return '{}{}'.format(\n '[{}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{}...{}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _run_single_hook(hook, repo, args, skips, cols):\n filenames = get_filenames(args, hook['files'], hook['exclude'])\n if hook['id'] in skips:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n elif not filenames and not hook['always_run']:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,\n ))\n sys.stdout.flush()\n\n diff_before = cmd_output('git', 'diff', retcode=None, encoding=None)\n retcode, stdout, stderr = repo.run_hook(\n hook,\n tuple(filenames) if hook['pass_filenames'] else (),\n )\n diff_after = cmd_output('git', 'diff', retcode=None, encoding=None)\n\n file_modifications = diff_before != diff_after\n\n # If the hook makes changes, fail the commit\n if file_modifications:\n retcode = 1\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n output.write_line(color.format_color(pass_fail, print_color, args.color))\n\n if (stdout or stderr or file_modifications) and (retcode or args.verbose):\n output.write_line('hookid: {}\\n'.format(hook['id']))\n\n # Print a message if failing due to file modifications\n if file_modifications:\n output.write('Files were modified by this hook.')\n\n if stdout or stderr:\n output.write_line(' Additional output:')\n\n output.write_line()\n\n for out in (stdout, stderr):\n assert type(out) is bytes, type(out)\n if out.strip():\n output.write_line(out.strip(), logfile_name=hook['log_file'])\n output.write_line()\n\n return retcode\n\n\ndef _compute_cols(hooks, verbose):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n\n or in the verbose case\n\n Hook name [hookid]...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _run_hooks(repo_hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, skips, cols)\n if (\n retval and\n args.show_diff_on_failure and\n subprocess.call(('git', 'diff', '--quiet')) != 0\n ):\n print('All changes made by hooks:')\n subprocess.call(('git', 'diff'))\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n output.write_line('No hook with id `{}`'.format(args.hook))\n return 1\n\n # Filter hooks for stages\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if not hook['stages'] or args.hook_stage in hook['stages']\n ]\n\n return _run_hooks(repo_hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\ndef _hook_msg_start(hook, verbose):\n return '{}{}'.format(\n '[{}] '.format(hook['id']) if verbose else '',\n hook['name'],\n )\n\n\ndef get_changed_files(new, old):\n return cmd_output(\n 'git', 'diff', '--name-only', '{}...{}'.format(old, new),\n )[1].splitlines()\n\n\ndef get_filenames(args, include_expr, exclude_expr):\n if args.origin and args.source:\n getter = git.get_files_matching(\n lambda: get_changed_files(args.origin, args.source),\n )\n elif args.files:\n getter = git.get_files_matching(lambda: args.files)\n elif args.all_files:\n getter = git.get_all_files_matching\n elif git.is_in_merge_conflict():\n getter = git.get_conflicted_files_matching\n else:\n getter = git.get_staged_files_matching\n return getter(include_expr, exclude_expr)\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _run_single_hook(hook, repo, args, skips, cols):\n filenames = get_filenames(args, hook.get('files', ''), hook['exclude'])\n if hook['id'] in skips:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n elif not filenames and not hook['always_run']:\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose),\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=args.color,\n cols=cols,\n ))\n return 0\n\n # Print the hook and the dots first in case the hook takes hella long to\n # run.\n output.write(get_hook_message(\n _hook_msg_start(hook, args.verbose), end_len=6, cols=cols,\n ))\n sys.stdout.flush()\n\n diff_before = cmd_output('git', 'diff', retcode=None, encoding=None)\n retcode, stdout, stderr = repo.run_hook(\n hook,\n tuple(filenames) if hook['pass_filenames'] else (),\n )\n diff_after = cmd_output('git', 'diff', retcode=None, encoding=None)\n\n file_modifications = diff_before != diff_after\n\n # If the hook makes changes, fail the commit\n if file_modifications:\n retcode = 1\n\n if retcode:\n retcode = 1\n print_color = color.RED\n pass_fail = 'Failed'\n else:\n retcode = 0\n print_color = color.GREEN\n pass_fail = 'Passed'\n\n output.write_line(color.format_color(pass_fail, print_color, args.color))\n\n if (stdout or stderr or file_modifications) and (retcode or args.verbose):\n output.write_line('hookid: {}\\n'.format(hook['id']))\n\n # Print a message if failing due to file modifications\n if file_modifications:\n output.write('Files were modified by this hook.')\n\n if stdout or stderr:\n output.write_line(' Additional output:')\n\n output.write_line()\n\n for out in (stdout, stderr):\n assert type(out) is bytes, type(out)\n if out.strip():\n output.write_line(out.strip(), logfile_name=hook['log_file'])\n output.write_line()\n\n return retcode\n\n\ndef _compute_cols(hooks, verbose):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n\n or in the verbose case\n\n Hook name [hookid]...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(_hook_msg_start(hook, verbose)) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _run_hooks(repo_hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(hook, repo, args, skips, cols)\n if (\n retval and\n args.show_diff_on_failure and\n subprocess.call(('git', 'diff', '--quiet')) != 0\n ):\n print('All changes made by hooks:')\n subprocess.call(('git', 'diff'))\n return retval\n\n\ndef get_repo_hooks(runner):\n for repo in runner.repositories:\n for _, hook in repo.hooks:\n yield (repo, hook)\n\n\ndef _has_unmerged_paths(runner):\n _, stdout, _ = runner.cmd_runner.run(['git', 'ls-files', '--unmerged'])\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(runner):\n retcode, _, _ = runner.cmd_runner.run(\n ('git', 'diff', '--exit-code', runner.config_file_path),\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(runner, args, environ=os.environ):\n no_stash = args.no_stash or args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths(runner):\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(runner) and not no_stash:\n if args.allow_unstaged_config:\n logger.warn(\n 'You have an unstaged config file and have specified the '\n '--allow-unstaged-config option.\\n'\n 'Note that your config will be stashed before the config is '\n 'parsed unless --no-stash is specified.',\n )\n else:\n logger.error(\n 'Your .pre-commit-config.yaml is unstaged.\\n'\n '`git add .pre-commit-config.yaml` to fix this.\\n'\n 'Run pre-commit with --allow-unstaged-config to silence this.'\n )\n return 1\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(runner.cmd_runner)\n\n with ctx:\n repo_hooks = list(get_repo_hooks(runner))\n\n if args.hook:\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if hook['id'] == args.hook\n ]\n if not repo_hooks:\n output.write_line('No hook with id `{}`'.format(args.hook))\n return 1\n\n # Filter hooks for stages\n repo_hooks = [\n (repo, hook) for repo, hook in repo_hooks\n if not hook['stages'] or args.hook_stage in hook['stages']\n ]\n\n return _run_hooks(repo_hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}]}
| 2,937 | 126 |
gh_patches_debug_42601
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1753
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
safety: _clean_cache() doesn't, not really
Behold, the `safety` module's cache-cleaning function:
https://github.com/sopel-irc/sopel/blob/39e3680db18c4bed9801d040f7486c655b95a9a0/sopel/modules/safety.py#L207-L220
It's called once every 24 hours, and by `url_handler()` when the cache has too many entries. It seems to remove precisely _one_ cache entry—the oldest—every time it's called. [To quote @HumorBaby](https://github.com/sopel-irc/sopel/pull/1569#discussion_r278521202), "Not really a `_clean`'ing if you ask me :stuck_out_tongue_closed_eyes:"
Ideally, it would:
* Remove any entries older than some reasonable threshold (a week?)
* If there are still too many entries, continue removing the oldest one until below the limit (presently 1024)
Improvements on this algorithm are, as always, welcome. I'm just tossing out a hastily thrown-together idea for fixing this.
_Discovered in unrelated review: https://github.com/sopel-irc/sopel/pull/1569#discussion_r278354791_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/safety.py`
Content:
```
1 # coding=utf-8
2 """
3 safety.py - Alerts about malicious URLs
4 Copyright © 2014, Elad Alfassa, <[email protected]>
5 Licensed under the Eiffel Forum License 2.
6
7 This module uses virustotal.com
8 """
9 from __future__ import unicode_literals, absolute_import, print_function, division
10
11 import logging
12 import os.path
13 import re
14 import sys
15 import time
16
17 import requests
18
19 from sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute
20 from sopel.formatting import color, bold
21 from sopel.module import OP
22 import sopel.tools
23
24 try:
25 # This is done separately from the below version if/else because JSONDecodeError
26 # didn't appear until Python 3.5, but Sopel claims support for 3.3+
27 # Redo this whole block of nonsense when dropping py2/old py3 support
28 from json import JSONDecodeError as InvalidJSONResponse
29 except ImportError:
30 InvalidJSONResponse = ValueError
31
32 if sys.version_info.major > 2:
33 unicode = str
34 from urllib.request import urlretrieve
35 from urllib.parse import urlparse
36 else:
37 from urllib import urlretrieve
38 from urlparse import urlparse
39
40
41 LOGGER = logging.getLogger(__name__)
42
43 vt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'
44 malware_domains = set()
45 known_good = []
46
47
48 class SafetySection(StaticSection):
49 enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)
50 """Whether to enable URL safety in all channels where it isn't explicitly disabled."""
51 known_good = ListAttribute('known_good')
52 """List of "known good" domains to ignore."""
53 vt_api_key = ValidatedAttribute('vt_api_key')
54 """Optional VirusTotal API key (improves malicious URL detection)."""
55
56
57 def configure(config):
58 """
59 | name | example | purpose |
60 | ---- | ------- | ------- |
61 | enabled\\_by\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |
62 | known\\_good | sopel.chat,dftba.net | List of "known good" domains to ignore. |
63 | vt\\_api\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |
64 """
65 config.define_section('safety', SafetySection)
66 config.safety.configure_setting(
67 'enabled_by_default',
68 "Enable URL safety in channels that don't specifically disable it?",
69 )
70 config.safety.configure_setting(
71 'known_good',
72 'Enter any domains to whitelist',
73 )
74 config.safety.configure_setting(
75 'vt_api_key',
76 "Optionally, enter a VirusTotal API key to improve malicious URL "
77 "protection.\nOtherwise, only the Malwarebytes DB will be used."
78 )
79
80
81 def setup(bot):
82 bot.config.define_section('safety', SafetySection)
83
84 if 'safety_cache' not in bot.memory:
85 bot.memory['safety_cache'] = sopel.tools.SopelMemory()
86 for item in bot.config.safety.known_good:
87 known_good.append(re.compile(item, re.I))
88
89 loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')
90 if os.path.isfile(loc):
91 if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:
92 # File exists but older than one week — update it
93 _download_malwaredomains_db(loc)
94 else:
95 _download_malwaredomains_db(loc)
96 with open(loc, 'r') as f:
97 for line in f:
98 clean_line = unicode(line).strip().lower()
99 if clean_line != '':
100 malware_domains.add(clean_line)
101
102
103 def shutdown(bot):
104 try:
105 del bot.memory['safety_cache']
106 except KeyError:
107 pass
108
109
110 def _download_malwaredomains_db(path):
111 url = 'https://mirror1.malwaredomains.com/files/justdomains'
112 LOGGER.info('Downloading malwaredomains db from %s', url)
113 urlretrieve(url, path)
114
115
116 @sopel.module.rule(r'(?u).*(https?://\S+).*')
117 @sopel.module.priority('high')
118 def url_handler(bot, trigger):
119 """Checks for malicious URLs"""
120 check = True # Enable URL checking
121 strict = False # Strict mode: kick on malicious URL
122 positives = 0 # Number of engines saying it's malicious
123 total = 0 # Number of total engines
124 use_vt = True # Use VirusTotal
125 check = bot.config.safety.enabled_by_default
126 if check is None:
127 # If not set, assume default
128 check = True
129 # DB overrides config:
130 setting = bot.db.get_channel_value(trigger.sender, 'safety')
131 if setting is not None:
132 if setting == 'off':
133 return # Not checking
134 elif setting in ['on', 'strict', 'local', 'local strict']:
135 check = True
136 if setting == 'strict' or setting == 'local strict':
137 strict = True
138 if setting == 'local' or setting == 'local strict':
139 use_vt = False
140
141 if not check:
142 return # Not overridden by DB, configured default off
143
144 try:
145 netloc = urlparse(trigger.group(1)).netloc
146 except ValueError:
147 return # Invalid IPv6 URL
148
149 if any(regex.search(netloc) for regex in known_good):
150 return # Whitelisted
151
152 apikey = bot.config.safety.vt_api_key
153 try:
154 if apikey is not None and use_vt:
155 payload = {'resource': unicode(trigger),
156 'apikey': apikey,
157 'scan': '1'}
158
159 if trigger not in bot.memory['safety_cache']:
160 r = requests.post(vt_base_api_url + 'report', data=payload)
161 r.raise_for_status()
162 result = r.json()
163 age = time.time()
164 data = {'positives': result['positives'],
165 'total': result['total'],
166 'age': age}
167 bot.memory['safety_cache'][trigger] = data
168 if len(bot.memory['safety_cache']) > 1024:
169 _clean_cache(bot)
170 else:
171 print('using cache')
172 result = bot.memory['safety_cache'][trigger]
173 positives = result['positives']
174 total = result['total']
175 except requests.exceptions.RequestException:
176 # Ignoring exceptions with VT so MalwareDomains will always work
177 LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)
178 except InvalidJSONResponse:
179 # Ignoring exceptions with VT so MalwareDomains will always work
180 LOGGER.debug('[VirusTotal] Malformed response (invalid JSON).', exc_info=True)
181
182 if unicode(netloc).lower() in malware_domains:
183 # malwaredomains is more trustworthy than some VT engines
184 # therefore it gets a weight of 10 engines when calculating confidence
185 positives += 10
186 total += 10
187
188 if positives > 1:
189 # Possibly malicious URL detected!
190 confidence = '{}%'.format(round((positives / total) * 100))
191 msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)
192 msg += '(confidence %s - %s/%s)' % (confidence, positives, total)
193 bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)
194 if strict:
195 bot.kick(trigger.nick, trigger.sender, 'Posted a malicious link')
196
197
198 @sopel.module.commands('safety')
199 def toggle_safety(bot, trigger):
200 """Set safety setting for channel"""
201 if not trigger.admin and bot.channels[trigger.sender].privileges[trigger.nick] < OP:
202 bot.reply('Only channel operators can change safety settings')
203 return
204 allowed_states = ['strict', 'on', 'off', 'local', 'local strict']
205 if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:
206 options = ' / '.join(allowed_states)
207 bot.reply('Available options: %s' % options)
208 return
209
210 channel = trigger.sender.lower()
211 bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())
212 bot.reply('Safety is now set to "%s" on this channel' % trigger.group(2))
213
214
215 # Clean the cache every day
216 # Code above also calls this if there are too many cache entries
217 @sopel.module.interval(24 * 60 * 60)
218 def _clean_cache(bot):
219 """Cleans up old entries in URL cache"""
220 # TODO: probably should use locks here, to make sure stuff doesn't explode
221 oldest_key_age = 0
222 oldest_key = ''
223 for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):
224 if data['age'] > oldest_key_age:
225 oldest_key_age = data['age']
226 oldest_key = key
227 if oldest_key in bot.memory['safety_cache']:
228 del bot.memory['safety_cache'][oldest_key]
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/modules/safety.py b/sopel/modules/safety.py
--- a/sopel/modules/safety.py
+++ b/sopel/modules/safety.py
@@ -12,6 +12,7 @@
import os.path
import re
import sys
+import threading
import time
import requests
@@ -43,6 +44,7 @@
vt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'
malware_domains = set()
known_good = []
+cache_limit = 512
class SafetySection(StaticSection):
@@ -83,6 +85,8 @@
if 'safety_cache' not in bot.memory:
bot.memory['safety_cache'] = sopel.tools.SopelMemory()
+ if 'safety_cache_lock' not in bot.memory:
+ bot.memory['safety_cache_lock'] = threading.Lock()
for item in bot.config.safety.known_good:
known_good.append(re.compile(item, re.I))
@@ -101,10 +105,8 @@
def shutdown(bot):
- try:
- del bot.memory['safety_cache']
- except KeyError:
- pass
+ bot.memory.pop('safety_cache', None)
+ bot.memory.pop('safety_cache_lock', None)
def _download_malwaredomains_db(path):
@@ -160,12 +162,12 @@
r = requests.post(vt_base_api_url + 'report', data=payload)
r.raise_for_status()
result = r.json()
- age = time.time()
+ fetched = time.time()
data = {'positives': result['positives'],
'total': result['total'],
- 'age': age}
+ 'fetched': fetched}
bot.memory['safety_cache'][trigger] = data
- if len(bot.memory['safety_cache']) > 1024:
+ if len(bot.memory['safety_cache']) >= (2 * cache_limit):
_clean_cache(bot)
else:
print('using cache')
@@ -216,13 +218,34 @@
# Code above also calls this if there are too many cache entries
@sopel.module.interval(24 * 60 * 60)
def _clean_cache(bot):
- """Cleans up old entries in URL cache"""
- # TODO: probably should use locks here, to make sure stuff doesn't explode
- oldest_key_age = 0
- oldest_key = ''
- for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):
- if data['age'] > oldest_key_age:
- oldest_key_age = data['age']
- oldest_key = key
- if oldest_key in bot.memory['safety_cache']:
- del bot.memory['safety_cache'][oldest_key]
+ """Cleans up old entries in URL safety cache."""
+ if bot.memory['safety_cache_lock'].acquire(False):
+ LOGGER.info('Starting safety cache cleanup...')
+ try:
+ # clean up by age first
+ cutoff = time.time() - (7 * 24 * 60 * 60) # 7 days ago
+ old_keys = []
+ for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):
+ if data['fetched'] <= cutoff:
+ old_keys.append(key)
+ for key in old_keys:
+ bot.memory['safety_cache'].pop(key, None)
+
+ # clean up more values if the cache is still too big
+ overage = bot.memory['safety_cache'] - cache_limit
+ if overage > 0:
+ extra_keys = sorted(
+ (data.fetched, key)
+ for (key, data)
+ in bot.memory['safety_cache'].items())[:overage]
+ for (_, key) in extra_keys:
+ bot.memory['safety_cache'].pop(key, None)
+ finally:
+ # No matter what errors happen (or not), release the lock
+ bot.memory['safety_cache_lock'].release()
+
+ LOGGER.info('Safety cache cleanup finished.')
+ else:
+ LOGGER.info(
+ 'Skipping safety cache cleanup: Cache is locked, '
+ 'cleanup already running.')
|
{"golden_diff": "diff --git a/sopel/modules/safety.py b/sopel/modules/safety.py\n--- a/sopel/modules/safety.py\n+++ b/sopel/modules/safety.py\n@@ -12,6 +12,7 @@\n import os.path\n import re\n import sys\n+import threading\n import time\n \n import requests\n@@ -43,6 +44,7 @@\n vt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'\n malware_domains = set()\n known_good = []\n+cache_limit = 512\n \n \n class SafetySection(StaticSection):\n@@ -83,6 +85,8 @@\n \n if 'safety_cache' not in bot.memory:\n bot.memory['safety_cache'] = sopel.tools.SopelMemory()\n+ if 'safety_cache_lock' not in bot.memory:\n+ bot.memory['safety_cache_lock'] = threading.Lock()\n for item in bot.config.safety.known_good:\n known_good.append(re.compile(item, re.I))\n \n@@ -101,10 +105,8 @@\n \n \n def shutdown(bot):\n- try:\n- del bot.memory['safety_cache']\n- except KeyError:\n- pass\n+ bot.memory.pop('safety_cache', None)\n+ bot.memory.pop('safety_cache_lock', None)\n \n \n def _download_malwaredomains_db(path):\n@@ -160,12 +162,12 @@\n r = requests.post(vt_base_api_url + 'report', data=payload)\n r.raise_for_status()\n result = r.json()\n- age = time.time()\n+ fetched = time.time()\n data = {'positives': result['positives'],\n 'total': result['total'],\n- 'age': age}\n+ 'fetched': fetched}\n bot.memory['safety_cache'][trigger] = data\n- if len(bot.memory['safety_cache']) > 1024:\n+ if len(bot.memory['safety_cache']) >= (2 * cache_limit):\n _clean_cache(bot)\n else:\n print('using cache')\n@@ -216,13 +218,34 @@\n # Code above also calls this if there are too many cache entries\n @sopel.module.interval(24 * 60 * 60)\n def _clean_cache(bot):\n- \"\"\"Cleans up old entries in URL cache\"\"\"\n- # TODO: probably should use locks here, to make sure stuff doesn't explode\n- oldest_key_age = 0\n- oldest_key = ''\n- for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):\n- if data['age'] > oldest_key_age:\n- oldest_key_age = data['age']\n- oldest_key = key\n- if oldest_key in bot.memory['safety_cache']:\n- del bot.memory['safety_cache'][oldest_key]\n+ \"\"\"Cleans up old entries in URL safety cache.\"\"\"\n+ if bot.memory['safety_cache_lock'].acquire(False):\n+ LOGGER.info('Starting safety cache cleanup...')\n+ try:\n+ # clean up by age first\n+ cutoff = time.time() - (7 * 24 * 60 * 60) # 7 days ago\n+ old_keys = []\n+ for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):\n+ if data['fetched'] <= cutoff:\n+ old_keys.append(key)\n+ for key in old_keys:\n+ bot.memory['safety_cache'].pop(key, None)\n+\n+ # clean up more values if the cache is still too big\n+ overage = bot.memory['safety_cache'] - cache_limit\n+ if overage > 0:\n+ extra_keys = sorted(\n+ (data.fetched, key)\n+ for (key, data)\n+ in bot.memory['safety_cache'].items())[:overage]\n+ for (_, key) in extra_keys:\n+ bot.memory['safety_cache'].pop(key, None)\n+ finally:\n+ # No matter what errors happen (or not), release the lock\n+ bot.memory['safety_cache_lock'].release()\n+\n+ LOGGER.info('Safety cache cleanup finished.')\n+ else:\n+ LOGGER.info(\n+ 'Skipping safety cache cleanup: Cache is locked, '\n+ 'cleanup already running.')\n", "issue": "safety: _clean_cache() doesn't, not really\nBehold, the `safety` module's cache-cleaning function:\r\n\r\nhttps://github.com/sopel-irc/sopel/blob/39e3680db18c4bed9801d040f7486c655b95a9a0/sopel/modules/safety.py#L207-L220\r\n\r\nIt's called once every 24 hours, and by `url_handler()` when the cache has too many entries. It seems to remove precisely _one_ cache entry\u2014the oldest\u2014every time it's called. [To quote @HumorBaby](https://github.com/sopel-irc/sopel/pull/1569#discussion_r278521202), \"Not really a `_clean`'ing if you ask me :stuck_out_tongue_closed_eyes:\"\r\n\r\nIdeally, it would:\r\n\r\n * Remove any entries older than some reasonable threshold (a week?)\r\n * If there are still too many entries, continue removing the oldest one until below the limit (presently 1024)\r\n\r\nImprovements on this algorithm are, as always, welcome. I'm just tossing out a hastily thrown-together idea for fixing this.\r\n\r\n_Discovered in unrelated review: https://github.com/sopel-irc/sopel/pull/1569#discussion_r278354791_\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nsafety.py - Alerts about malicious URLs\nCopyright \u00a9 2014, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nThis module uses virustotal.com\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport logging\nimport os.path\nimport re\nimport sys\nimport time\n\nimport requests\n\nfrom sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute\nfrom sopel.formatting import color, bold\nfrom sopel.module import OP\nimport sopel.tools\n\ntry:\n # This is done separately from the below version if/else because JSONDecodeError\n # didn't appear until Python 3.5, but Sopel claims support for 3.3+\n # Redo this whole block of nonsense when dropping py2/old py3 support\n from json import JSONDecodeError as InvalidJSONResponse\nexcept ImportError:\n InvalidJSONResponse = ValueError\n\nif sys.version_info.major > 2:\n unicode = str\n from urllib.request import urlretrieve\n from urllib.parse import urlparse\nelse:\n from urllib import urlretrieve\n from urlparse import urlparse\n\n\nLOGGER = logging.getLogger(__name__)\n\nvt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'\nmalware_domains = set()\nknown_good = []\n\n\nclass SafetySection(StaticSection):\n enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)\n \"\"\"Whether to enable URL safety in all channels where it isn't explicitly disabled.\"\"\"\n known_good = ListAttribute('known_good')\n \"\"\"List of \"known good\" domains to ignore.\"\"\"\n vt_api_key = ValidatedAttribute('vt_api_key')\n \"\"\"Optional VirusTotal API key (improves malicious URL detection).\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | enabled\\\\_by\\\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |\n | known\\\\_good | sopel.chat,dftba.net | List of \"known good\" domains to ignore. |\n | vt\\\\_api\\\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |\n \"\"\"\n config.define_section('safety', SafetySection)\n config.safety.configure_setting(\n 'enabled_by_default',\n \"Enable URL safety in channels that don't specifically disable it?\",\n )\n config.safety.configure_setting(\n 'known_good',\n 'Enter any domains to whitelist',\n )\n config.safety.configure_setting(\n 'vt_api_key',\n \"Optionally, enter a VirusTotal API key to improve malicious URL \"\n \"protection.\\nOtherwise, only the Malwarebytes DB will be used.\"\n )\n\n\ndef setup(bot):\n bot.config.define_section('safety', SafetySection)\n\n if 'safety_cache' not in bot.memory:\n bot.memory['safety_cache'] = sopel.tools.SopelMemory()\n for item in bot.config.safety.known_good:\n known_good.append(re.compile(item, re.I))\n\n loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')\n if os.path.isfile(loc):\n if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:\n # File exists but older than one week \u2014 update it\n _download_malwaredomains_db(loc)\n else:\n _download_malwaredomains_db(loc)\n with open(loc, 'r') as f:\n for line in f:\n clean_line = unicode(line).strip().lower()\n if clean_line != '':\n malware_domains.add(clean_line)\n\n\ndef shutdown(bot):\n try:\n del bot.memory['safety_cache']\n except KeyError:\n pass\n\n\ndef _download_malwaredomains_db(path):\n url = 'https://mirror1.malwaredomains.com/files/justdomains'\n LOGGER.info('Downloading malwaredomains db from %s', url)\n urlretrieve(url, path)\n\n\[email protected](r'(?u).*(https?://\\S+).*')\[email protected]('high')\ndef url_handler(bot, trigger):\n \"\"\"Checks for malicious URLs\"\"\"\n check = True # Enable URL checking\n strict = False # Strict mode: kick on malicious URL\n positives = 0 # Number of engines saying it's malicious\n total = 0 # Number of total engines\n use_vt = True # Use VirusTotal\n check = bot.config.safety.enabled_by_default\n if check is None:\n # If not set, assume default\n check = True\n # DB overrides config:\n setting = bot.db.get_channel_value(trigger.sender, 'safety')\n if setting is not None:\n if setting == 'off':\n return # Not checking\n elif setting in ['on', 'strict', 'local', 'local strict']:\n check = True\n if setting == 'strict' or setting == 'local strict':\n strict = True\n if setting == 'local' or setting == 'local strict':\n use_vt = False\n\n if not check:\n return # Not overridden by DB, configured default off\n\n try:\n netloc = urlparse(trigger.group(1)).netloc\n except ValueError:\n return # Invalid IPv6 URL\n\n if any(regex.search(netloc) for regex in known_good):\n return # Whitelisted\n\n apikey = bot.config.safety.vt_api_key\n try:\n if apikey is not None and use_vt:\n payload = {'resource': unicode(trigger),\n 'apikey': apikey,\n 'scan': '1'}\n\n if trigger not in bot.memory['safety_cache']:\n r = requests.post(vt_base_api_url + 'report', data=payload)\n r.raise_for_status()\n result = r.json()\n age = time.time()\n data = {'positives': result['positives'],\n 'total': result['total'],\n 'age': age}\n bot.memory['safety_cache'][trigger] = data\n if len(bot.memory['safety_cache']) > 1024:\n _clean_cache(bot)\n else:\n print('using cache')\n result = bot.memory['safety_cache'][trigger]\n positives = result['positives']\n total = result['total']\n except requests.exceptions.RequestException:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)\n except InvalidJSONResponse:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Malformed response (invalid JSON).', exc_info=True)\n\n if unicode(netloc).lower() in malware_domains:\n # malwaredomains is more trustworthy than some VT engines\n # therefore it gets a weight of 10 engines when calculating confidence\n positives += 10\n total += 10\n\n if positives > 1:\n # Possibly malicious URL detected!\n confidence = '{}%'.format(round((positives / total) * 100))\n msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)\n msg += '(confidence %s - %s/%s)' % (confidence, positives, total)\n bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)\n if strict:\n bot.kick(trigger.nick, trigger.sender, 'Posted a malicious link')\n\n\[email protected]('safety')\ndef toggle_safety(bot, trigger):\n \"\"\"Set safety setting for channel\"\"\"\n if not trigger.admin and bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n bot.reply('Only channel operators can change safety settings')\n return\n allowed_states = ['strict', 'on', 'off', 'local', 'local strict']\n if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:\n options = ' / '.join(allowed_states)\n bot.reply('Available options: %s' % options)\n return\n\n channel = trigger.sender.lower()\n bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())\n bot.reply('Safety is now set to \"%s\" on this channel' % trigger.group(2))\n\n\n# Clean the cache every day\n# Code above also calls this if there are too many cache entries\[email protected](24 * 60 * 60)\ndef _clean_cache(bot):\n \"\"\"Cleans up old entries in URL cache\"\"\"\n # TODO: probably should use locks here, to make sure stuff doesn't explode\n oldest_key_age = 0\n oldest_key = ''\n for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):\n if data['age'] > oldest_key_age:\n oldest_key_age = data['age']\n oldest_key = key\n if oldest_key in bot.memory['safety_cache']:\n del bot.memory['safety_cache'][oldest_key]\n", "path": "sopel/modules/safety.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nsafety.py - Alerts about malicious URLs\nCopyright \u00a9 2014, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nThis module uses virustotal.com\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport logging\nimport os.path\nimport re\nimport sys\nimport threading\nimport time\n\nimport requests\n\nfrom sopel.config.types import StaticSection, ValidatedAttribute, ListAttribute\nfrom sopel.formatting import color, bold\nfrom sopel.module import OP\nimport sopel.tools\n\ntry:\n # This is done separately from the below version if/else because JSONDecodeError\n # didn't appear until Python 3.5, but Sopel claims support for 3.3+\n # Redo this whole block of nonsense when dropping py2/old py3 support\n from json import JSONDecodeError as InvalidJSONResponse\nexcept ImportError:\n InvalidJSONResponse = ValueError\n\nif sys.version_info.major > 2:\n unicode = str\n from urllib.request import urlretrieve\n from urllib.parse import urlparse\nelse:\n from urllib import urlretrieve\n from urlparse import urlparse\n\n\nLOGGER = logging.getLogger(__name__)\n\nvt_base_api_url = 'https://www.virustotal.com/vtapi/v2/url/'\nmalware_domains = set()\nknown_good = []\ncache_limit = 512\n\n\nclass SafetySection(StaticSection):\n enabled_by_default = ValidatedAttribute('enabled_by_default', bool, default=True)\n \"\"\"Whether to enable URL safety in all channels where it isn't explicitly disabled.\"\"\"\n known_good = ListAttribute('known_good')\n \"\"\"List of \"known good\" domains to ignore.\"\"\"\n vt_api_key = ValidatedAttribute('vt_api_key')\n \"\"\"Optional VirusTotal API key (improves malicious URL detection).\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | enabled\\\\_by\\\\_default | True | Enable URL safety in all channels where it isn't explicitly disabled. |\n | known\\\\_good | sopel.chat,dftba.net | List of \"known good\" domains to ignore. |\n | vt\\\\_api\\\\_key | 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef | Optional VirusTotal API key to improve malicious URL detection |\n \"\"\"\n config.define_section('safety', SafetySection)\n config.safety.configure_setting(\n 'enabled_by_default',\n \"Enable URL safety in channels that don't specifically disable it?\",\n )\n config.safety.configure_setting(\n 'known_good',\n 'Enter any domains to whitelist',\n )\n config.safety.configure_setting(\n 'vt_api_key',\n \"Optionally, enter a VirusTotal API key to improve malicious URL \"\n \"protection.\\nOtherwise, only the Malwarebytes DB will be used.\"\n )\n\n\ndef setup(bot):\n bot.config.define_section('safety', SafetySection)\n\n if 'safety_cache' not in bot.memory:\n bot.memory['safety_cache'] = sopel.tools.SopelMemory()\n if 'safety_cache_lock' not in bot.memory:\n bot.memory['safety_cache_lock'] = threading.Lock()\n for item in bot.config.safety.known_good:\n known_good.append(re.compile(item, re.I))\n\n loc = os.path.join(bot.config.homedir, 'malwaredomains.txt')\n if os.path.isfile(loc):\n if os.path.getmtime(loc) < time.time() - 24 * 60 * 60 * 7:\n # File exists but older than one week \u2014 update it\n _download_malwaredomains_db(loc)\n else:\n _download_malwaredomains_db(loc)\n with open(loc, 'r') as f:\n for line in f:\n clean_line = unicode(line).strip().lower()\n if clean_line != '':\n malware_domains.add(clean_line)\n\n\ndef shutdown(bot):\n bot.memory.pop('safety_cache', None)\n bot.memory.pop('safety_cache_lock', None)\n\n\ndef _download_malwaredomains_db(path):\n url = 'https://mirror1.malwaredomains.com/files/justdomains'\n LOGGER.info('Downloading malwaredomains db from %s', url)\n urlretrieve(url, path)\n\n\[email protected](r'(?u).*(https?://\\S+).*')\[email protected]('high')\ndef url_handler(bot, trigger):\n \"\"\"Checks for malicious URLs\"\"\"\n check = True # Enable URL checking\n strict = False # Strict mode: kick on malicious URL\n positives = 0 # Number of engines saying it's malicious\n total = 0 # Number of total engines\n use_vt = True # Use VirusTotal\n check = bot.config.safety.enabled_by_default\n if check is None:\n # If not set, assume default\n check = True\n # DB overrides config:\n setting = bot.db.get_channel_value(trigger.sender, 'safety')\n if setting is not None:\n if setting == 'off':\n return # Not checking\n elif setting in ['on', 'strict', 'local', 'local strict']:\n check = True\n if setting == 'strict' or setting == 'local strict':\n strict = True\n if setting == 'local' or setting == 'local strict':\n use_vt = False\n\n if not check:\n return # Not overridden by DB, configured default off\n\n try:\n netloc = urlparse(trigger.group(1)).netloc\n except ValueError:\n return # Invalid IPv6 URL\n\n if any(regex.search(netloc) for regex in known_good):\n return # Whitelisted\n\n apikey = bot.config.safety.vt_api_key\n try:\n if apikey is not None and use_vt:\n payload = {'resource': unicode(trigger),\n 'apikey': apikey,\n 'scan': '1'}\n\n if trigger not in bot.memory['safety_cache']:\n r = requests.post(vt_base_api_url + 'report', data=payload)\n r.raise_for_status()\n result = r.json()\n fetched = time.time()\n data = {'positives': result['positives'],\n 'total': result['total'],\n 'fetched': fetched}\n bot.memory['safety_cache'][trigger] = data\n if len(bot.memory['safety_cache']) >= (2 * cache_limit):\n _clean_cache(bot)\n else:\n print('using cache')\n result = bot.memory['safety_cache'][trigger]\n positives = result['positives']\n total = result['total']\n except requests.exceptions.RequestException:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Error obtaining response.', exc_info=True)\n except InvalidJSONResponse:\n # Ignoring exceptions with VT so MalwareDomains will always work\n LOGGER.debug('[VirusTotal] Malformed response (invalid JSON).', exc_info=True)\n\n if unicode(netloc).lower() in malware_domains:\n # malwaredomains is more trustworthy than some VT engines\n # therefore it gets a weight of 10 engines when calculating confidence\n positives += 10\n total += 10\n\n if positives > 1:\n # Possibly malicious URL detected!\n confidence = '{}%'.format(round((positives / total) * 100))\n msg = 'link posted by %s is possibly malicious ' % bold(trigger.nick)\n msg += '(confidence %s - %s/%s)' % (confidence, positives, total)\n bot.say('[' + bold(color('WARNING', 'red')) + '] ' + msg)\n if strict:\n bot.kick(trigger.nick, trigger.sender, 'Posted a malicious link')\n\n\[email protected]('safety')\ndef toggle_safety(bot, trigger):\n \"\"\"Set safety setting for channel\"\"\"\n if not trigger.admin and bot.channels[trigger.sender].privileges[trigger.nick] < OP:\n bot.reply('Only channel operators can change safety settings')\n return\n allowed_states = ['strict', 'on', 'off', 'local', 'local strict']\n if not trigger.group(2) or trigger.group(2).lower() not in allowed_states:\n options = ' / '.join(allowed_states)\n bot.reply('Available options: %s' % options)\n return\n\n channel = trigger.sender.lower()\n bot.db.set_channel_value(channel, 'safety', trigger.group(2).lower())\n bot.reply('Safety is now set to \"%s\" on this channel' % trigger.group(2))\n\n\n# Clean the cache every day\n# Code above also calls this if there are too many cache entries\[email protected](24 * 60 * 60)\ndef _clean_cache(bot):\n \"\"\"Cleans up old entries in URL safety cache.\"\"\"\n if bot.memory['safety_cache_lock'].acquire(False):\n LOGGER.info('Starting safety cache cleanup...')\n try:\n # clean up by age first\n cutoff = time.time() - (7 * 24 * 60 * 60) # 7 days ago\n old_keys = []\n for key, data in sopel.tools.iteritems(bot.memory['safety_cache']):\n if data['fetched'] <= cutoff:\n old_keys.append(key)\n for key in old_keys:\n bot.memory['safety_cache'].pop(key, None)\n\n # clean up more values if the cache is still too big\n overage = bot.memory['safety_cache'] - cache_limit\n if overage > 0:\n extra_keys = sorted(\n (data.fetched, key)\n for (key, data)\n in bot.memory['safety_cache'].items())[:overage]\n for (_, key) in extra_keys:\n bot.memory['safety_cache'].pop(key, None)\n finally:\n # No matter what errors happen (or not), release the lock\n bot.memory['safety_cache_lock'].release()\n\n LOGGER.info('Safety cache cleanup finished.')\n else:\n LOGGER.info(\n 'Skipping safety cache cleanup: Cache is locked, '\n 'cleanup already running.')\n", "path": "sopel/modules/safety.py"}]}
| 3,209 | 961 |
gh_patches_debug_4566
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-775
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ZVA-SEK has been added. Thanks, great job. But there is a problem.
First of all: Thanks to 5ila5 for doing the job. Thats awesome.
But all entries are one day ahead.
I created the entries using the "static" configuration, and compared to the import there is one day "shift".
Maybe because there is an alarm in the ICS-file?
Or am i doing something wrong?

Greetings, Holger.
PS.: @5ila5: whenever you are in the area of Kassel, send me a note, and i will show you the finest brewery in the area (beers on me).
sources:
- name: static
calendar_title: Papier
args:
type: Altpapier
frequency: WEEKLY
interval: 4
start: '2023-01-16'
until: '2023-12-18'
excludes:
- '2023-04-10'
dates:
- '2023-04-15'
- '2023-03-01'
- name: static
calendar_title: Bio
args:
type: Biomuell
frequency: WEEKLY
interval: 2
start: '2023-01-06'
until: '2023-12-22'
excludes:
- '2023-09-29'
dates:
- '2023-09-23'
- name: static
calendar_title: Rest
args:
type: Restmuell
frequency: WEEKLY
interval: 3
start: '2023-01-17'
until: '2023-12-19'
excludes:
- '2023-09-29'
dates:
- '2023-09-23'
- name: static
calendar_title: Gelb
args:
type: Gelbe
frequency: WEEKLY
interval: 4
start: '2023-01-18'
until: '2023-12-20'
- name: zva_sek_de
calendar_title: "ZVA"
args:
bezirk: "Felsberg"
ortsteil: "Felsberg"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py`
Content:
```
1 import re
2 import requests
3 from datetime import datetime
4
5 from bs4 import BeautifulSoup
6
7 from waste_collection_schedule import Collection # type: ignore[attr-defined]
8 from waste_collection_schedule.service.ICS import ICS
9
10 TITLE = "Zweckverband Abfallwirtschaft Schwalm-Eder-Kreis"
11 DESCRIPTION = "Source for ZVA (Zweckverband Abfallwirtschaft Schwalm-Eder-Kreis)."
12 URL = "https://www.zva-sek.de"
13 TEST_CASES = {
14 "Fritzlar": {
15 "bezirk": "Fritzlar",
16 "ortsteil": "Fritzlar-kernstadt",
17 "strasse": "Ahornweg",
18 },
19 "Ottrau": {
20 "bezirk": "Ottrau",
21 "ortsteil": "immichenhain",
22 "strasse": "",
23 },
24 "Knüllwald": {
25 "bezirk": "Knüllwald",
26 "ortsteil": "Hergetsfeld",
27 },
28 }
29 SERVLET = (
30 "https://www.zva-sek.de/module/abfallkalender/generate_ical.php"
31 )
32 MAIN_URL = "https://www.zva-sek.de/online-dienste/abfallkalender-{year}/{file}"
33 API_URL = "https://www.zva-sek.de/module/abfallkalender/{file}"
34
35
36 class Source:
37 def __init__(
38 self, bezirk: str, ortsteil: str, strasse: str = None
39 ):
40 self._bezirk = bezirk
41 self._ortsteil = ortsteil
42 self._street = strasse if strasse != "" else None
43 self._ics = ICS()
44
45 def fetch(self):
46 session = requests.session()
47 year = datetime.now().year
48
49 bezirk_id = None
50 ortsteil_id = None
51
52 # get bezirke id
53 r = session.get(MAIN_URL.format(
54 year=year, file=f"abfallkalender-{year}.html"))
55 if (r.status_code == 404): # try last year URL if this year is not available
56 r = session.get(MAIN_URL.format(
57 year=year, file=f"abfallkalender-{year-1}.html"))
58 r.raise_for_status()
59
60 soup = BeautifulSoup(r.text, features="html.parser")
61 for option in soup.find("select", {"name": "ak_bezirk"}).find_all("option"):
62 if option.text.lower() == self._bezirk.lower():
63 self._bezirk = option.get("value")
64 bezirk_id = option.get("value")
65 break
66
67 if not bezirk_id:
68 raise Exception(f"bezirk not found")
69
70 # get ortsteil id
71 r = session.get(API_URL.format(
72 file="get_ortsteile.php"), params={"bez_id": bezirk_id})
73 r.raise_for_status()
74 last_orts_id = None
75 for part in r.text.split(";")[2:-1]:
76 # part is "f.ak_ortsteil.options[5].text = 'Alte Kasseler Straße'" or "ak_ortsteil.options[6].value = '2'"
77 if ("length" in part):
78 continue
79 if part.split(" = ")[1][1:-1].lower() == self._ortsteil.lower():
80 ortsteil_id = last_orts_id
81 break
82 last_orts_id = part.split(" = ")[1][1:-1]
83
84 if not ortsteil_id:
85 raise Exception(f"ortsteil not found")
86
87 street_id = None
88
89 # get street id if steet given
90 if self._street is not None:
91 r = session.get(API_URL.format(
92 file="get_strassen.php"), params={"ot_id": ortsteil_id.split("-")[0]})
93 r.raise_for_status()
94 last_street_id = None
95 for part in r.text.split(";")[2:-1]:
96 # part is "f.ak_strasse.options[5].text = 'Alte Kasseler Straße'" or "ak_strasse.options[6].value = '2'"
97 if ("length" in part):
98 continue
99 if part.split(" = ")[1][1:-1].lower() == self._street.lower():
100 street_id = last_street_id
101 break
102 last_street_id = part.split(" = ")[1][1:-1]
103
104 if not street_id:
105 raise Exception(f"street not found")
106
107 args = {
108 "year": str(year),
109 "ak_bezirk": bezirk_id,
110 "ak_ortsteil": ortsteil_id,
111 "alle_arten": "",
112 }
113 if self._street is not None:
114 args["ak_strasse"] = street_id
115
116 r = session.post(
117 SERVLET,
118 data=args,
119 )
120
121 r.raise_for_status()
122 dates = self._ics.convert(r.text)
123
124 entries = []
125 for d in dates:
126 entries.append(Collection(d[0], re.sub(
127 "[ ]*am [0-9]+.[0-9]+.[0-9]+[ ]*", "", d[1])))
128 return entries
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py
@@ -109,6 +109,8 @@
"ak_bezirk": bezirk_id,
"ak_ortsteil": ortsteil_id,
"alle_arten": "",
+ "iCalEnde": 6,
+ "iCalBeginn": 17,
}
if self._street is not None:
args["ak_strasse"] = street_id
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py\n@@ -109,6 +109,8 @@\n \"ak_bezirk\": bezirk_id,\n \"ak_ortsteil\": ortsteil_id,\n \"alle_arten\": \"\",\n+ \"iCalEnde\": 6,\n+ \"iCalBeginn\": 17,\n }\n if self._street is not None:\n args[\"ak_strasse\"] = street_id\n", "issue": "ZVA-SEK has been added. Thanks, great job. But there is a problem.\nFirst of all: Thanks to 5ila5 for doing the job. Thats awesome.\r\n\r\nBut all entries are one day ahead.\r\nI created the entries using the \"static\" configuration, and compared to the import there is one day \"shift\".\r\nMaybe because there is an alarm in the ICS-file?\r\nOr am i doing something wrong?\r\n\r\n\r\n\r\nGreetings, Holger.\r\n\r\nPS.: @5ila5: whenever you are in the area of Kassel, send me a note, and i will show you the finest brewery in the area (beers on me).\r\n\r\n sources:\r\n - name: static\r\n calendar_title: Papier\r\n args:\r\n type: Altpapier\r\n frequency: WEEKLY\r\n interval: 4\r\n start: '2023-01-16'\r\n until: '2023-12-18'\r\n excludes:\r\n - '2023-04-10'\r\n dates:\r\n - '2023-04-15'\r\n - '2023-03-01'\r\n - name: static\r\n calendar_title: Bio\r\n args:\r\n type: Biomuell\r\n frequency: WEEKLY\r\n interval: 2\r\n start: '2023-01-06'\r\n until: '2023-12-22'\r\n excludes:\r\n - '2023-09-29'\r\n dates:\r\n - '2023-09-23'\r\n - name: static\r\n calendar_title: Rest\r\n args:\r\n type: Restmuell\r\n frequency: WEEKLY\r\n interval: 3\r\n start: '2023-01-17'\r\n until: '2023-12-19'\r\n excludes:\r\n - '2023-09-29'\r\n dates:\r\n - '2023-09-23'\r\n - name: static\r\n calendar_title: Gelb\r\n args:\r\n type: Gelbe\r\n frequency: WEEKLY\r\n interval: 4\r\n start: '2023-01-18'\r\n until: '2023-12-20'\r\n - name: zva_sek_de\r\n calendar_title: \"ZVA\"\r\n args:\r\n bezirk: \"Felsberg\"\r\n ortsteil: \"Felsberg\"\r\n\n", "before_files": [{"content": "import re\nimport requests\nfrom datetime import datetime\n\nfrom bs4 import BeautifulSoup\n\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Zweckverband Abfallwirtschaft Schwalm-Eder-Kreis\"\nDESCRIPTION = \"Source for ZVA (Zweckverband Abfallwirtschaft Schwalm-Eder-Kreis).\"\nURL = \"https://www.zva-sek.de\"\nTEST_CASES = {\n \"Fritzlar\": {\n \"bezirk\": \"Fritzlar\",\n \"ortsteil\": \"Fritzlar-kernstadt\",\n \"strasse\": \"Ahornweg\",\n },\n \"Ottrau\": {\n \"bezirk\": \"Ottrau\",\n \"ortsteil\": \"immichenhain\",\n \"strasse\": \"\",\n },\n \"Kn\u00fcllwald\": {\n \"bezirk\": \"Kn\u00fcllwald\",\n \"ortsteil\": \"Hergetsfeld\",\n },\n}\nSERVLET = (\n \"https://www.zva-sek.de/module/abfallkalender/generate_ical.php\"\n)\nMAIN_URL = \"https://www.zva-sek.de/online-dienste/abfallkalender-{year}/{file}\"\nAPI_URL = \"https://www.zva-sek.de/module/abfallkalender/{file}\"\n\n\nclass Source:\n def __init__(\n self, bezirk: str, ortsteil: str, strasse: str = None\n ):\n self._bezirk = bezirk\n self._ortsteil = ortsteil\n self._street = strasse if strasse != \"\" else None\n self._ics = ICS()\n\n def fetch(self):\n session = requests.session()\n year = datetime.now().year\n\n bezirk_id = None\n ortsteil_id = None\n\n # get bezirke id\n r = session.get(MAIN_URL.format(\n year=year, file=f\"abfallkalender-{year}.html\"))\n if (r.status_code == 404): # try last year URL if this year is not available\n r = session.get(MAIN_URL.format(\n year=year, file=f\"abfallkalender-{year-1}.html\"))\n r.raise_for_status()\n \n soup = BeautifulSoup(r.text, features=\"html.parser\")\n for option in soup.find(\"select\", {\"name\": \"ak_bezirk\"}).find_all(\"option\"):\n if option.text.lower() == self._bezirk.lower():\n self._bezirk = option.get(\"value\")\n bezirk_id = option.get(\"value\")\n break\n\n if not bezirk_id:\n raise Exception(f\"bezirk not found\")\n\n # get ortsteil id\n r = session.get(API_URL.format(\n file=\"get_ortsteile.php\"), params={\"bez_id\": bezirk_id})\n r.raise_for_status()\n last_orts_id = None\n for part in r.text.split(\";\")[2:-1]:\n # part is \"f.ak_ortsteil.options[5].text = 'Alte Kasseler Stra\u00dfe'\" or \"ak_ortsteil.options[6].value = '2'\"\n if (\"length\" in part):\n continue\n if part.split(\" = \")[1][1:-1].lower() == self._ortsteil.lower():\n ortsteil_id = last_orts_id\n break\n last_orts_id = part.split(\" = \")[1][1:-1]\n\n if not ortsteil_id:\n raise Exception(f\"ortsteil not found\")\n\n street_id = None\n\n # get street id if steet given\n if self._street is not None:\n r = session.get(API_URL.format(\n file=\"get_strassen.php\"), params={\"ot_id\": ortsteil_id.split(\"-\")[0]})\n r.raise_for_status()\n last_street_id = None\n for part in r.text.split(\";\")[2:-1]: \n # part is \"f.ak_strasse.options[5].text = 'Alte Kasseler Stra\u00dfe'\" or \"ak_strasse.options[6].value = '2'\"\n if (\"length\" in part):\n continue\n if part.split(\" = \")[1][1:-1].lower() == self._street.lower():\n street_id = last_street_id\n break\n last_street_id = part.split(\" = \")[1][1:-1]\n\n if not street_id:\n raise Exception(f\"street not found\")\n\n args = {\n \"year\": str(year),\n \"ak_bezirk\": bezirk_id,\n \"ak_ortsteil\": ortsteil_id,\n \"alle_arten\": \"\",\n }\n if self._street is not None:\n args[\"ak_strasse\"] = street_id\n\n r = session.post(\n SERVLET,\n data=args,\n )\n\n r.raise_for_status()\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], re.sub(\n \"[ ]*am [0-9]+.[0-9]+.[0-9]+[ ]*\", \"\", d[1])))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py"}], "after_files": [{"content": "import re\nimport requests\nfrom datetime import datetime\n\nfrom bs4 import BeautifulSoup\n\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Zweckverband Abfallwirtschaft Schwalm-Eder-Kreis\"\nDESCRIPTION = \"Source for ZVA (Zweckverband Abfallwirtschaft Schwalm-Eder-Kreis).\"\nURL = \"https://www.zva-sek.de\"\nTEST_CASES = {\n \"Fritzlar\": {\n \"bezirk\": \"Fritzlar\",\n \"ortsteil\": \"Fritzlar-kernstadt\",\n \"strasse\": \"Ahornweg\",\n },\n \"Ottrau\": {\n \"bezirk\": \"Ottrau\",\n \"ortsteil\": \"immichenhain\",\n \"strasse\": \"\",\n },\n \"Kn\u00fcllwald\": {\n \"bezirk\": \"Kn\u00fcllwald\",\n \"ortsteil\": \"Hergetsfeld\",\n },\n}\nSERVLET = (\n \"https://www.zva-sek.de/module/abfallkalender/generate_ical.php\"\n)\nMAIN_URL = \"https://www.zva-sek.de/online-dienste/abfallkalender-{year}/{file}\"\nAPI_URL = \"https://www.zva-sek.de/module/abfallkalender/{file}\"\n\n\nclass Source:\n def __init__(\n self, bezirk: str, ortsteil: str, strasse: str = None\n ):\n self._bezirk = bezirk\n self._ortsteil = ortsteil\n self._street = strasse if strasse != \"\" else None\n self._ics = ICS()\n\n def fetch(self):\n session = requests.session()\n year = datetime.now().year\n\n bezirk_id = None\n ortsteil_id = None\n\n # get bezirke id\n r = session.get(MAIN_URL.format(\n year=year, file=f\"abfallkalender-{year}.html\"))\n if (r.status_code == 404): # try last year URL if this year is not available\n r = session.get(MAIN_URL.format(\n year=year, file=f\"abfallkalender-{year-1}.html\"))\n r.raise_for_status()\n \n soup = BeautifulSoup(r.text, features=\"html.parser\")\n for option in soup.find(\"select\", {\"name\": \"ak_bezirk\"}).find_all(\"option\"):\n if option.text.lower() == self._bezirk.lower():\n self._bezirk = option.get(\"value\")\n bezirk_id = option.get(\"value\")\n break\n\n if not bezirk_id:\n raise Exception(f\"bezirk not found\")\n\n # get ortsteil id\n r = session.get(API_URL.format(\n file=\"get_ortsteile.php\"), params={\"bez_id\": bezirk_id})\n r.raise_for_status()\n last_orts_id = None\n for part in r.text.split(\";\")[2:-1]:\n # part is \"f.ak_ortsteil.options[5].text = 'Alte Kasseler Stra\u00dfe'\" or \"ak_ortsteil.options[6].value = '2'\"\n if (\"length\" in part):\n continue\n if part.split(\" = \")[1][1:-1].lower() == self._ortsteil.lower():\n ortsteil_id = last_orts_id\n break\n last_orts_id = part.split(\" = \")[1][1:-1]\n\n if not ortsteil_id:\n raise Exception(f\"ortsteil not found\")\n\n street_id = None\n\n # get street id if steet given\n if self._street is not None:\n r = session.get(API_URL.format(\n file=\"get_strassen.php\"), params={\"ot_id\": ortsteil_id.split(\"-\")[0]})\n r.raise_for_status()\n last_street_id = None\n for part in r.text.split(\";\")[2:-1]: \n # part is \"f.ak_strasse.options[5].text = 'Alte Kasseler Stra\u00dfe'\" or \"ak_strasse.options[6].value = '2'\"\n if (\"length\" in part):\n continue\n if part.split(\" = \")[1][1:-1].lower() == self._street.lower():\n street_id = last_street_id\n break\n last_street_id = part.split(\" = \")[1][1:-1]\n\n if not street_id:\n raise Exception(f\"street not found\")\n\n args = {\n \"year\": str(year),\n \"ak_bezirk\": bezirk_id,\n \"ak_ortsteil\": ortsteil_id,\n \"alle_arten\": \"\",\n \"iCalEnde\": 6,\n \"iCalBeginn\": 17,\n }\n if self._street is not None:\n args[\"ak_strasse\"] = street_id\n\n r = session.post(\n SERVLET,\n data=args,\n )\n\n r.raise_for_status()\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], re.sub(\n \"[ ]*am [0-9]+.[0-9]+.[0-9]+[ ]*\", \"\", d[1])))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/zva_sek_de.py"}]}
| 2,306 | 170 |
gh_patches_debug_2811
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-4707
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rules/participate in project
As you can see in the test, the paricipate_project rule behaves a bit weird for project group members. I think, they should also be allowed to participate. The question is what it is used for.
Cool! The participate_project rule is a bit unexpected, so we should check that out. Like where it is used and what for. But anyway, will merge for now and add an issue.
_Originally posted by @fuzzylogic2000 in https://github.com/liqd/a4-meinberlin/pull/4077#pullrequestreview-837466549_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/projects/rules.py`
Content:
```
1 import rules
2 from rules.predicates import is_superuser
3
4 from adhocracy4.organisations.predicates import is_initiator
5 from adhocracy4.projects.predicates import is_live
6 from adhocracy4.projects.predicates import is_moderator
7 from adhocracy4.projects.predicates import is_prj_group_member
8 from adhocracy4.projects.predicates import is_project_member
9 from adhocracy4.projects.predicates import is_public
10 from adhocracy4.projects.predicates import is_semipublic
11
12 rules.remove_perm('a4projects.view_project')
13 rules.add_perm('a4projects.view_project',
14 is_superuser | is_initiator |
15 is_moderator | is_prj_group_member |
16 ((is_public | is_semipublic | is_project_member)
17 & is_live))
18
19 rules.set_perm('a4projects.participate_in_project',
20 is_superuser | is_initiator | is_moderator |
21 ((is_public | is_project_member) & is_live))
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/projects/rules.py b/meinberlin/apps/projects/rules.py
--- a/meinberlin/apps/projects/rules.py
+++ b/meinberlin/apps/projects/rules.py
@@ -17,5 +17,6 @@
& is_live))
rules.set_perm('a4projects.participate_in_project',
- is_superuser | is_initiator | is_moderator |
+ is_superuser | is_initiator |
+ is_moderator | is_prj_group_member |
((is_public | is_project_member) & is_live))
|
{"golden_diff": "diff --git a/meinberlin/apps/projects/rules.py b/meinberlin/apps/projects/rules.py\n--- a/meinberlin/apps/projects/rules.py\n+++ b/meinberlin/apps/projects/rules.py\n@@ -17,5 +17,6 @@\n & is_live))\n \n rules.set_perm('a4projects.participate_in_project',\n- is_superuser | is_initiator | is_moderator |\n+ is_superuser | is_initiator |\n+ is_moderator | is_prj_group_member |\n ((is_public | is_project_member) & is_live))\n", "issue": "rules/participate in project\nAs you can see in the test, the paricipate_project rule behaves a bit weird for project group members. I think, they should also be allowed to participate. The question is what it is used for.\r\n\r\nCool! The participate_project rule is a bit unexpected, so we should check that out. Like where it is used and what for. But anyway, will merge for now and add an issue.\r\n\r\n_Originally posted by @fuzzylogic2000 in https://github.com/liqd/a4-meinberlin/pull/4077#pullrequestreview-837466549_\n", "before_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.organisations.predicates import is_initiator\nfrom adhocracy4.projects.predicates import is_live\nfrom adhocracy4.projects.predicates import is_moderator\nfrom adhocracy4.projects.predicates import is_prj_group_member\nfrom adhocracy4.projects.predicates import is_project_member\nfrom adhocracy4.projects.predicates import is_public\nfrom adhocracy4.projects.predicates import is_semipublic\n\nrules.remove_perm('a4projects.view_project')\nrules.add_perm('a4projects.view_project',\n is_superuser | is_initiator |\n is_moderator | is_prj_group_member |\n ((is_public | is_semipublic | is_project_member)\n & is_live))\n\nrules.set_perm('a4projects.participate_in_project',\n is_superuser | is_initiator | is_moderator |\n ((is_public | is_project_member) & is_live))\n", "path": "meinberlin/apps/projects/rules.py"}], "after_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.organisations.predicates import is_initiator\nfrom adhocracy4.projects.predicates import is_live\nfrom adhocracy4.projects.predicates import is_moderator\nfrom adhocracy4.projects.predicates import is_prj_group_member\nfrom adhocracy4.projects.predicates import is_project_member\nfrom adhocracy4.projects.predicates import is_public\nfrom adhocracy4.projects.predicates import is_semipublic\n\nrules.remove_perm('a4projects.view_project')\nrules.add_perm('a4projects.view_project',\n is_superuser | is_initiator |\n is_moderator | is_prj_group_member |\n ((is_public | is_semipublic | is_project_member)\n & is_live))\n\nrules.set_perm('a4projects.participate_in_project',\n is_superuser | is_initiator |\n is_moderator | is_prj_group_member |\n ((is_public | is_project_member) & is_live))\n", "path": "meinberlin/apps/projects/rules.py"}]}
| 639 | 124 |
gh_patches_debug_28703
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement InMemoryMetricExporter
See [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk_exporters/in-memory.md). This will be great for testing.
IMO this should be a "pull exporter" (metric reader atm) that has a method `get_metrics()` or similar to return metrics from the SDK.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import os
17 from abc import ABC, abstractmethod
18 from enum import Enum
19 from os import environ, linesep
20 from sys import stdout
21 from threading import Event, Thread
22 from typing import IO, Callable, Iterable, Optional, Sequence
23
24 from opentelemetry.context import (
25 _SUPPRESS_INSTRUMENTATION_KEY,
26 attach,
27 detach,
28 set_value,
29 )
30 from opentelemetry.sdk._metrics.metric_reader import MetricReader
31 from opentelemetry.sdk._metrics.point import AggregationTemporality, Metric
32 from opentelemetry.util._once import Once
33
34 _logger = logging.getLogger(__name__)
35
36
37 class MetricExportResult(Enum):
38 SUCCESS = 0
39 FAILURE = 1
40
41
42 class MetricExporter(ABC):
43 """Interface for exporting metrics.
44
45 Interface to be implemented by services that want to export metrics received
46 in their own format.
47 """
48
49 @property
50 def preferred_temporality(self) -> AggregationTemporality:
51 return AggregationTemporality.CUMULATIVE
52
53 @abstractmethod
54 def export(self, metrics: Sequence[Metric]) -> "MetricExportResult":
55 """Exports a batch of telemetry data.
56
57 Args:
58 metrics: The list of `opentelemetry.sdk._metrics.data.MetricData` objects to be exported
59
60 Returns:
61 The result of the export
62 """
63
64 @abstractmethod
65 def shutdown(self) -> None:
66 """Shuts down the exporter.
67
68 Called when the SDK is shut down.
69 """
70
71
72 class ConsoleMetricExporter(MetricExporter):
73 """Implementation of :class:`MetricExporter` that prints metrics to the
74 console.
75
76 This class can be used for diagnostic purposes. It prints the exported
77 metrics to the console STDOUT.
78 """
79
80 def __init__(
81 self,
82 out: IO = stdout,
83 formatter: Callable[[Metric], str] = lambda metric: metric.to_json()
84 + linesep,
85 ):
86 self.out = out
87 self.formatter = formatter
88
89 def export(self, metrics: Sequence[Metric]) -> MetricExportResult:
90 for metric in metrics:
91 self.out.write(self.formatter(metric))
92 self.out.flush()
93 return MetricExportResult.SUCCESS
94
95 def shutdown(self) -> None:
96 pass
97
98
99 class PeriodicExportingMetricReader(MetricReader):
100 """`PeriodicExportingMetricReader` is an implementation of `MetricReader`
101 that collects metrics based on a user-configurable time interval, and passes the
102 metrics to the configured exporter.
103 """
104
105 def __init__(
106 self,
107 exporter: MetricExporter,
108 export_interval_millis: Optional[float] = None,
109 export_timeout_millis: Optional[float] = None,
110 ) -> None:
111 super().__init__(preferred_temporality=exporter.preferred_temporality)
112 self._exporter = exporter
113 if export_interval_millis is None:
114 try:
115 export_interval_millis = float(
116 environ.get("OTEL_METRIC_EXPORT_INTERVAL", 60000)
117 )
118 except ValueError:
119 _logger.warning(
120 "Found invalid value for export interval, using default"
121 )
122 export_interval_millis = 60000
123 if export_timeout_millis is None:
124 try:
125 export_timeout_millis = float(
126 environ.get("OTEL_METRIC_EXPORT_TIMEOUT", 30000)
127 )
128 except ValueError:
129 _logger.warning(
130 "Found invalid value for export timeout, using default"
131 )
132 export_timeout_millis = 30000
133 self._export_interval_millis = export_interval_millis
134 self._export_timeout_millis = export_timeout_millis
135 self._shutdown = False
136 self._shutdown_event = Event()
137 self._shutdown_once = Once()
138 self._daemon_thread = Thread(target=self._ticker, daemon=True)
139 self._daemon_thread.start()
140 if hasattr(os, "register_at_fork"):
141 os.register_at_fork(
142 after_in_child=self._at_fork_reinit
143 ) # pylint: disable=protected-access
144
145 def _at_fork_reinit(self):
146 self._daemon_thread = Thread(target=self._ticker, daemon=True)
147 self._daemon_thread.start()
148
149 def _ticker(self) -> None:
150 interval_secs = self._export_interval_millis / 1e3
151 while not self._shutdown_event.wait(interval_secs):
152 self.collect()
153 # one last collection below before shutting down completely
154 self.collect()
155
156 def _receive_metrics(self, metrics: Iterable[Metric]) -> None:
157 if metrics is None:
158 return
159 token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
160 try:
161 self._exporter.export(metrics)
162 except Exception as e: # pylint: disable=broad-except,invalid-name
163 _logger.exception("Exception while exporting metrics %s", str(e))
164 detach(token)
165
166 def shutdown(self) -> bool:
167 def _shutdown():
168 self._shutdown = True
169
170 did_set = self._shutdown_once.do_once(_shutdown)
171 if not did_set:
172 _logger.warning("Can't shutdown multiple times")
173 return False
174
175 self._shutdown_event.set()
176 self._daemon_thread.join()
177 self._exporter.shutdown()
178 return True
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py
@@ -18,8 +18,8 @@
from enum import Enum
from os import environ, linesep
from sys import stdout
-from threading import Event, Thread
-from typing import IO, Callable, Iterable, Optional, Sequence
+from threading import Event, RLock, Thread
+from typing import IO, Callable, Iterable, List, Optional, Sequence
from opentelemetry.context import (
_SUPPRESS_INSTRUMENTATION_KEY,
@@ -96,6 +96,36 @@
pass
+class InMemoryMetricReader(MetricReader):
+ """Implementation of :class:`MetricReader` that returns its metrics from :func:`metrics`.
+
+ This is useful for e.g. unit tests.
+ """
+
+ def __init__(
+ self,
+ preferred_temporality: AggregationTemporality = AggregationTemporality.CUMULATIVE,
+ ) -> None:
+ super().__init__(preferred_temporality=preferred_temporality)
+ self._lock = RLock()
+ self._metrics: List[Metric] = []
+
+ def get_metrics(self) -> List[Metric]:
+ """Reads and returns current metrics from the SDK"""
+ with self._lock:
+ self.collect()
+ metrics = self._metrics
+ self._metrics = []
+ return metrics
+
+ def _receive_metrics(self, metrics: Iterable[Metric]):
+ with self._lock:
+ self._metrics = list(metrics)
+
+ def shutdown(self) -> bool:
+ return True
+
+
class PeriodicExportingMetricReader(MetricReader):
"""`PeriodicExportingMetricReader` is an implementation of `MetricReader`
that collects metrics based on a user-configurable time interval, and passes the
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py\n@@ -18,8 +18,8 @@\n from enum import Enum\n from os import environ, linesep\n from sys import stdout\n-from threading import Event, Thread\n-from typing import IO, Callable, Iterable, Optional, Sequence\n+from threading import Event, RLock, Thread\n+from typing import IO, Callable, Iterable, List, Optional, Sequence\n \n from opentelemetry.context import (\n _SUPPRESS_INSTRUMENTATION_KEY,\n@@ -96,6 +96,36 @@\n pass\n \n \n+class InMemoryMetricReader(MetricReader):\n+ \"\"\"Implementation of :class:`MetricReader` that returns its metrics from :func:`metrics`.\n+\n+ This is useful for e.g. unit tests.\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ preferred_temporality: AggregationTemporality = AggregationTemporality.CUMULATIVE,\n+ ) -> None:\n+ super().__init__(preferred_temporality=preferred_temporality)\n+ self._lock = RLock()\n+ self._metrics: List[Metric] = []\n+\n+ def get_metrics(self) -> List[Metric]:\n+ \"\"\"Reads and returns current metrics from the SDK\"\"\"\n+ with self._lock:\n+ self.collect()\n+ metrics = self._metrics\n+ self._metrics = []\n+ return metrics\n+\n+ def _receive_metrics(self, metrics: Iterable[Metric]):\n+ with self._lock:\n+ self._metrics = list(metrics)\n+\n+ def shutdown(self) -> bool:\n+ return True\n+\n+\n class PeriodicExportingMetricReader(MetricReader):\n \"\"\"`PeriodicExportingMetricReader` is an implementation of `MetricReader`\n that collects metrics based on a user-configurable time interval, and passes the\n", "issue": "Implement InMemoryMetricExporter\nSee [spec](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk_exporters/in-memory.md). This will be great for testing.\r\n\r\nIMO this should be a \"pull exporter\" (metric reader atm) that has a method `get_metrics()` or similar to return metrics from the SDK.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom os import environ, linesep\nfrom sys import stdout\nfrom threading import Event, Thread\nfrom typing import IO, Callable, Iterable, Optional, Sequence\n\nfrom opentelemetry.context import (\n _SUPPRESS_INSTRUMENTATION_KEY,\n attach,\n detach,\n set_value,\n)\nfrom opentelemetry.sdk._metrics.metric_reader import MetricReader\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.util._once import Once\n\n_logger = logging.getLogger(__name__)\n\n\nclass MetricExportResult(Enum):\n SUCCESS = 0\n FAILURE = 1\n\n\nclass MetricExporter(ABC):\n \"\"\"Interface for exporting metrics.\n\n Interface to be implemented by services that want to export metrics received\n in their own format.\n \"\"\"\n\n @property\n def preferred_temporality(self) -> AggregationTemporality:\n return AggregationTemporality.CUMULATIVE\n\n @abstractmethod\n def export(self, metrics: Sequence[Metric]) -> \"MetricExportResult\":\n \"\"\"Exports a batch of telemetry data.\n\n Args:\n metrics: The list of `opentelemetry.sdk._metrics.data.MetricData` objects to be exported\n\n Returns:\n The result of the export\n \"\"\"\n\n @abstractmethod\n def shutdown(self) -> None:\n \"\"\"Shuts down the exporter.\n\n Called when the SDK is shut down.\n \"\"\"\n\n\nclass ConsoleMetricExporter(MetricExporter):\n \"\"\"Implementation of :class:`MetricExporter` that prints metrics to the\n console.\n\n This class can be used for diagnostic purposes. It prints the exported\n metrics to the console STDOUT.\n \"\"\"\n\n def __init__(\n self,\n out: IO = stdout,\n formatter: Callable[[Metric], str] = lambda metric: metric.to_json()\n + linesep,\n ):\n self.out = out\n self.formatter = formatter\n\n def export(self, metrics: Sequence[Metric]) -> MetricExportResult:\n for metric in metrics:\n self.out.write(self.formatter(metric))\n self.out.flush()\n return MetricExportResult.SUCCESS\n\n def shutdown(self) -> None:\n pass\n\n\nclass PeriodicExportingMetricReader(MetricReader):\n \"\"\"`PeriodicExportingMetricReader` is an implementation of `MetricReader`\n that collects metrics based on a user-configurable time interval, and passes the\n metrics to the configured exporter.\n \"\"\"\n\n def __init__(\n self,\n exporter: MetricExporter,\n export_interval_millis: Optional[float] = None,\n export_timeout_millis: Optional[float] = None,\n ) -> None:\n super().__init__(preferred_temporality=exporter.preferred_temporality)\n self._exporter = exporter\n if export_interval_millis is None:\n try:\n export_interval_millis = float(\n environ.get(\"OTEL_METRIC_EXPORT_INTERVAL\", 60000)\n )\n except ValueError:\n _logger.warning(\n \"Found invalid value for export interval, using default\"\n )\n export_interval_millis = 60000\n if export_timeout_millis is None:\n try:\n export_timeout_millis = float(\n environ.get(\"OTEL_METRIC_EXPORT_TIMEOUT\", 30000)\n )\n except ValueError:\n _logger.warning(\n \"Found invalid value for export timeout, using default\"\n )\n export_timeout_millis = 30000\n self._export_interval_millis = export_interval_millis\n self._export_timeout_millis = export_timeout_millis\n self._shutdown = False\n self._shutdown_event = Event()\n self._shutdown_once = Once()\n self._daemon_thread = Thread(target=self._ticker, daemon=True)\n self._daemon_thread.start()\n if hasattr(os, \"register_at_fork\"):\n os.register_at_fork(\n after_in_child=self._at_fork_reinit\n ) # pylint: disable=protected-access\n\n def _at_fork_reinit(self):\n self._daemon_thread = Thread(target=self._ticker, daemon=True)\n self._daemon_thread.start()\n\n def _ticker(self) -> None:\n interval_secs = self._export_interval_millis / 1e3\n while not self._shutdown_event.wait(interval_secs):\n self.collect()\n # one last collection below before shutting down completely\n self.collect()\n\n def _receive_metrics(self, metrics: Iterable[Metric]) -> None:\n if metrics is None:\n return\n token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))\n try:\n self._exporter.export(metrics)\n except Exception as e: # pylint: disable=broad-except,invalid-name\n _logger.exception(\"Exception while exporting metrics %s\", str(e))\n detach(token)\n\n def shutdown(self) -> bool:\n def _shutdown():\n self._shutdown = True\n\n did_set = self._shutdown_once.do_once(_shutdown)\n if not did_set:\n _logger.warning(\"Can't shutdown multiple times\")\n return False\n\n self._shutdown_event.set()\n self._daemon_thread.join()\n self._exporter.shutdown()\n return True\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\nfrom enum import Enum\nfrom os import environ, linesep\nfrom sys import stdout\nfrom threading import Event, RLock, Thread\nfrom typing import IO, Callable, Iterable, List, Optional, Sequence\n\nfrom opentelemetry.context import (\n _SUPPRESS_INSTRUMENTATION_KEY,\n attach,\n detach,\n set_value,\n)\nfrom opentelemetry.sdk._metrics.metric_reader import MetricReader\nfrom opentelemetry.sdk._metrics.point import AggregationTemporality, Metric\nfrom opentelemetry.util._once import Once\n\n_logger = logging.getLogger(__name__)\n\n\nclass MetricExportResult(Enum):\n SUCCESS = 0\n FAILURE = 1\n\n\nclass MetricExporter(ABC):\n \"\"\"Interface for exporting metrics.\n\n Interface to be implemented by services that want to export metrics received\n in their own format.\n \"\"\"\n\n @property\n def preferred_temporality(self) -> AggregationTemporality:\n return AggregationTemporality.CUMULATIVE\n\n @abstractmethod\n def export(self, metrics: Sequence[Metric]) -> \"MetricExportResult\":\n \"\"\"Exports a batch of telemetry data.\n\n Args:\n metrics: The list of `opentelemetry.sdk._metrics.data.MetricData` objects to be exported\n\n Returns:\n The result of the export\n \"\"\"\n\n @abstractmethod\n def shutdown(self) -> None:\n \"\"\"Shuts down the exporter.\n\n Called when the SDK is shut down.\n \"\"\"\n\n\nclass ConsoleMetricExporter(MetricExporter):\n \"\"\"Implementation of :class:`MetricExporter` that prints metrics to the\n console.\n\n This class can be used for diagnostic purposes. It prints the exported\n metrics to the console STDOUT.\n \"\"\"\n\n def __init__(\n self,\n out: IO = stdout,\n formatter: Callable[[Metric], str] = lambda metric: metric.to_json()\n + linesep,\n ):\n self.out = out\n self.formatter = formatter\n\n def export(self, metrics: Sequence[Metric]) -> MetricExportResult:\n for metric in metrics:\n self.out.write(self.formatter(metric))\n self.out.flush()\n return MetricExportResult.SUCCESS\n\n def shutdown(self) -> None:\n pass\n\n\nclass InMemoryMetricReader(MetricReader):\n \"\"\"Implementation of :class:`MetricReader` that returns its metrics from :func:`metrics`.\n\n This is useful for e.g. unit tests.\n \"\"\"\n\n def __init__(\n self,\n preferred_temporality: AggregationTemporality = AggregationTemporality.CUMULATIVE,\n ) -> None:\n super().__init__(preferred_temporality=preferred_temporality)\n self._lock = RLock()\n self._metrics: List[Metric] = []\n\n def get_metrics(self) -> List[Metric]:\n \"\"\"Reads and returns current metrics from the SDK\"\"\"\n with self._lock:\n self.collect()\n metrics = self._metrics\n self._metrics = []\n return metrics\n\n def _receive_metrics(self, metrics: Iterable[Metric]):\n with self._lock:\n self._metrics = list(metrics)\n\n def shutdown(self) -> bool:\n return True\n\n\nclass PeriodicExportingMetricReader(MetricReader):\n \"\"\"`PeriodicExportingMetricReader` is an implementation of `MetricReader`\n that collects metrics based on a user-configurable time interval, and passes the\n metrics to the configured exporter.\n \"\"\"\n\n def __init__(\n self,\n exporter: MetricExporter,\n export_interval_millis: Optional[float] = None,\n export_timeout_millis: Optional[float] = None,\n ) -> None:\n super().__init__(preferred_temporality=exporter.preferred_temporality)\n self._exporter = exporter\n if export_interval_millis is None:\n try:\n export_interval_millis = float(\n environ.get(\"OTEL_METRIC_EXPORT_INTERVAL\", 60000)\n )\n except ValueError:\n _logger.warning(\n \"Found invalid value for export interval, using default\"\n )\n export_interval_millis = 60000\n if export_timeout_millis is None:\n try:\n export_timeout_millis = float(\n environ.get(\"OTEL_METRIC_EXPORT_TIMEOUT\", 30000)\n )\n except ValueError:\n _logger.warning(\n \"Found invalid value for export timeout, using default\"\n )\n export_timeout_millis = 30000\n self._export_interval_millis = export_interval_millis\n self._export_timeout_millis = export_timeout_millis\n self._shutdown = False\n self._shutdown_event = Event()\n self._shutdown_once = Once()\n self._daemon_thread = Thread(target=self._ticker, daemon=True)\n self._daemon_thread.start()\n if hasattr(os, \"register_at_fork\"):\n os.register_at_fork(\n after_in_child=self._at_fork_reinit\n ) # pylint: disable=protected-access\n\n def _at_fork_reinit(self):\n self._daemon_thread = Thread(target=self._ticker, daemon=True)\n self._daemon_thread.start()\n\n def _ticker(self) -> None:\n interval_secs = self._export_interval_millis / 1e3\n while not self._shutdown_event.wait(interval_secs):\n self.collect()\n # one last collection below before shutting down completely\n self.collect()\n\n def _receive_metrics(self, metrics: Iterable[Metric]) -> None:\n if metrics is None:\n return\n token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))\n try:\n self._exporter.export(metrics)\n except Exception as e: # pylint: disable=broad-except,invalid-name\n _logger.exception(\"Exception while exporting metrics %s\", str(e))\n detach(token)\n\n def shutdown(self) -> bool:\n def _shutdown():\n self._shutdown = True\n\n did_set = self._shutdown_once.do_once(_shutdown)\n if not did_set:\n _logger.warning(\"Can't shutdown multiple times\")\n return False\n\n self._shutdown_event.set()\n self._daemon_thread.join()\n self._exporter.shutdown()\n return True\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/_metrics/export/__init__.py"}]}
| 2,042 | 463 |
gh_patches_debug_23064
|
rasdani/github-patches
|
git_diff
|
modoboa__modoboa-515
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
handle_mailbox_operations command not working
Hello,
This is a new Modoboa 1.1.0 installation. When I try to run:
```
python /opt/modoboa_admin/manage.py handle_mailbox_operations
```
I get the following error:
```
NotDefined: Application 'admin' and/or parameter 'HANDLE_MAILBOXES' not defined
```
According to the [documentation](http://modoboa.readthedocs.org/en/1.1.0/getting_started/configuration.html#admin-params) there should be an option in Modoboa->Parameters->General to activate this HANDLE_MAILBOXES. But I don't see it anywhere.
I tried to outsmart the system by inserting the value in the lib_parameter table but no luck. I guess something else is required.
```
insert into lib_parameter (name, value) values ('admin.HANDLE_MAILBOXES', 'yes')
```
Am I missing something ? Here is the screenshot of my admin interface, logged as the default admin user:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modoboa/extensions/admin/app_settings.py`
Content:
```
1 from django import forms
2 from django.utils.translation import ugettext_lazy
3 from modoboa.lib.formutils import YesNoField, SeparatorField
4 from modoboa.lib.sysutils import exec_cmd
5 from modoboa.lib import parameters
6
7
8 class AdminParametersForm(parameters.AdminParametersForm):
9 app = "admin"
10
11 mbsep = SeparatorField(label=ugettext_lazy("Mailboxes"))
12
13 handle_mailboxes = YesNoField(
14 label=ugettext_lazy("Handle mailboxes on filesystem"),
15 initial="no",
16 help_text=ugettext_lazy("Rename or remove mailboxes on the filesystem when they get renamed or removed within Modoboa")
17 )
18
19 mailboxes_owner = forms.CharField(
20 label=ugettext_lazy("Mailboxes ower"),
21 initial="vmail",
22 help_text=ugettext_lazy("The UNIX account who owns mailboxes on the filesystem")
23 )
24
25 default_domain_quota = forms.IntegerField(
26 label=ugettext_lazy("Default domain quota"),
27 initial=0,
28 help_text=ugettext_lazy(
29 "Default quota (in MB) applied to freshly created domains with no "
30 "value specified. A value of 0 means no quota."
31 ),
32 widget=forms.TextInput(attrs={'class': 'span2'})
33 )
34
35 auto_account_removal = YesNoField(
36 label=ugettext_lazy("Automatic account removal"),
37 initial="no",
38 help_text=ugettext_lazy("When a mailbox is removed, also remove the associated account")
39 )
40
41 # Visibility rules
42 visibility_rules = {
43 "mailboxes_owner": "handle_mailboxes=yes",
44 }
45
46 def __init__(self, *args, **kwargs):
47 super(AdminParametersForm, self).__init__(*args, **kwargs)
48 hide_fields = False
49 code, output = exec_cmd("which dovecot")
50 if not code:
51 dpath = output.strip()
52 try:
53 code, version = exec_cmd("%s --version" % dpath)
54 except OSError:
55 hide_fields = True
56 else:
57 if code or not version.strip().startswith("2"):
58 hide_fields = True
59 else:
60 hide_fields = True
61 if hide_fields:
62 del self.fields["handle_mailboxes"]
63 del self.fields["mailboxes_owner"]
64
65 def clean_default_domain_quota(self):
66 """Ensure quota is a positive integer."""
67 if self.cleaned_data['default_domain_quota'] < 0:
68 raise forms.ValidationError(
69 ugettext_lazy('Must be a positive integer')
70 )
71 return self.cleaned_data['default_domain_quota']
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modoboa/extensions/admin/app_settings.py b/modoboa/extensions/admin/app_settings.py
--- a/modoboa/extensions/admin/app_settings.py
+++ b/modoboa/extensions/admin/app_settings.py
@@ -3,6 +3,7 @@
from modoboa.lib.formutils import YesNoField, SeparatorField
from modoboa.lib.sysutils import exec_cmd
from modoboa.lib import parameters
+import os
class AdminParametersForm(parameters.AdminParametersForm):
@@ -46,9 +47,16 @@
def __init__(self, *args, **kwargs):
super(AdminParametersForm, self).__init__(*args, **kwargs)
hide_fields = False
+ dpath = None
code, output = exec_cmd("which dovecot")
+ known_paths = ("/usr/sbin/dovecot", "/usr/local/sbin/dovecot")
if not code:
dpath = output.strip()
+ else:
+ for fpath in known_paths:
+ if os.path.isfile(fpath) and os.access(fpath, os.X_OK):
+ dpath = fpath
+ if dpath:
try:
code, version = exec_cmd("%s --version" % dpath)
except OSError:
|
{"golden_diff": "diff --git a/modoboa/extensions/admin/app_settings.py b/modoboa/extensions/admin/app_settings.py\n--- a/modoboa/extensions/admin/app_settings.py\n+++ b/modoboa/extensions/admin/app_settings.py\n@@ -3,6 +3,7 @@\n from modoboa.lib.formutils import YesNoField, SeparatorField\n from modoboa.lib.sysutils import exec_cmd\n from modoboa.lib import parameters\n+import os\n \n \n class AdminParametersForm(parameters.AdminParametersForm):\n@@ -46,9 +47,16 @@\n def __init__(self, *args, **kwargs):\n super(AdminParametersForm, self).__init__(*args, **kwargs)\n hide_fields = False\n+ dpath = None\n code, output = exec_cmd(\"which dovecot\")\n+ known_paths = (\"/usr/sbin/dovecot\", \"/usr/local/sbin/dovecot\")\n if not code:\n dpath = output.strip()\n+ else:\n+ for fpath in known_paths:\n+ if os.path.isfile(fpath) and os.access(fpath, os.X_OK):\n+ dpath = fpath\n+ if dpath:\n try:\n code, version = exec_cmd(\"%s --version\" % dpath)\n except OSError:\n", "issue": "handle_mailbox_operations command not working\nHello,\n\nThis is a new Modoboa 1.1.0 installation. When I try to run:\n\n```\npython /opt/modoboa_admin/manage.py handle_mailbox_operations\n```\n\nI get the following error:\n\n```\nNotDefined: Application 'admin' and/or parameter 'HANDLE_MAILBOXES' not defined\n```\n\nAccording to the [documentation](http://modoboa.readthedocs.org/en/1.1.0/getting_started/configuration.html#admin-params) there should be an option in Modoboa->Parameters->General to activate this HANDLE_MAILBOXES. But I don't see it anywhere.\n\nI tried to outsmart the system by inserting the value in the lib_parameter table but no luck. I guess something else is required.\n\n```\ninsert into lib_parameter (name, value) values ('admin.HANDLE_MAILBOXES', 'yes')\n```\n\nAm I missing something ? Here is the screenshot of my admin interface, logged as the default admin user:\n\n\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy\nfrom modoboa.lib.formutils import YesNoField, SeparatorField\nfrom modoboa.lib.sysutils import exec_cmd\nfrom modoboa.lib import parameters\n\n\nclass AdminParametersForm(parameters.AdminParametersForm):\n app = \"admin\"\n\n mbsep = SeparatorField(label=ugettext_lazy(\"Mailboxes\"))\n\n handle_mailboxes = YesNoField(\n label=ugettext_lazy(\"Handle mailboxes on filesystem\"),\n initial=\"no\",\n help_text=ugettext_lazy(\"Rename or remove mailboxes on the filesystem when they get renamed or removed within Modoboa\")\n )\n\n mailboxes_owner = forms.CharField(\n label=ugettext_lazy(\"Mailboxes ower\"),\n initial=\"vmail\",\n help_text=ugettext_lazy(\"The UNIX account who owns mailboxes on the filesystem\")\n )\n\n default_domain_quota = forms.IntegerField(\n label=ugettext_lazy(\"Default domain quota\"),\n initial=0,\n help_text=ugettext_lazy(\n \"Default quota (in MB) applied to freshly created domains with no \"\n \"value specified. A value of 0 means no quota.\"\n ),\n widget=forms.TextInput(attrs={'class': 'span2'})\n )\n\n auto_account_removal = YesNoField(\n label=ugettext_lazy(\"Automatic account removal\"),\n initial=\"no\",\n help_text=ugettext_lazy(\"When a mailbox is removed, also remove the associated account\")\n )\n\n # Visibility rules\n visibility_rules = {\n \"mailboxes_owner\": \"handle_mailboxes=yes\",\n }\n\n def __init__(self, *args, **kwargs):\n super(AdminParametersForm, self).__init__(*args, **kwargs)\n hide_fields = False\n code, output = exec_cmd(\"which dovecot\")\n if not code:\n dpath = output.strip()\n try:\n code, version = exec_cmd(\"%s --version\" % dpath)\n except OSError:\n hide_fields = True\n else:\n if code or not version.strip().startswith(\"2\"):\n hide_fields = True\n else:\n hide_fields = True\n if hide_fields:\n del self.fields[\"handle_mailboxes\"]\n del self.fields[\"mailboxes_owner\"]\n\n def clean_default_domain_quota(self):\n \"\"\"Ensure quota is a positive integer.\"\"\"\n if self.cleaned_data['default_domain_quota'] < 0:\n raise forms.ValidationError(\n ugettext_lazy('Must be a positive integer')\n )\n return self.cleaned_data['default_domain_quota']\n", "path": "modoboa/extensions/admin/app_settings.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import ugettext_lazy\nfrom modoboa.lib.formutils import YesNoField, SeparatorField\nfrom modoboa.lib.sysutils import exec_cmd\nfrom modoboa.lib import parameters\nimport os\n\n\nclass AdminParametersForm(parameters.AdminParametersForm):\n app = \"admin\"\n\n mbsep = SeparatorField(label=ugettext_lazy(\"Mailboxes\"))\n\n handle_mailboxes = YesNoField(\n label=ugettext_lazy(\"Handle mailboxes on filesystem\"),\n initial=\"no\",\n help_text=ugettext_lazy(\"Rename or remove mailboxes on the filesystem when they get renamed or removed within Modoboa\")\n )\n\n mailboxes_owner = forms.CharField(\n label=ugettext_lazy(\"Mailboxes ower\"),\n initial=\"vmail\",\n help_text=ugettext_lazy(\"The UNIX account who owns mailboxes on the filesystem\")\n )\n\n default_domain_quota = forms.IntegerField(\n label=ugettext_lazy(\"Default domain quota\"),\n initial=0,\n help_text=ugettext_lazy(\n \"Default quota (in MB) applied to freshly created domains with no \"\n \"value specified. A value of 0 means no quota.\"\n ),\n widget=forms.TextInput(attrs={'class': 'span2'})\n )\n\n auto_account_removal = YesNoField(\n label=ugettext_lazy(\"Automatic account removal\"),\n initial=\"no\",\n help_text=ugettext_lazy(\"When a mailbox is removed, also remove the associated account\")\n )\n\n # Visibility rules\n visibility_rules = {\n \"mailboxes_owner\": \"handle_mailboxes=yes\",\n }\n\n def __init__(self, *args, **kwargs):\n super(AdminParametersForm, self).__init__(*args, **kwargs)\n hide_fields = False\n dpath = None\n code, output = exec_cmd(\"which dovecot\")\n known_paths = (\"/usr/sbin/dovecot\", \"/usr/local/sbin/dovecot\")\n if not code:\n dpath = output.strip()\n else:\n for fpath in known_paths:\n if os.path.isfile(fpath) and os.access(fpath, os.X_OK):\n dpath = fpath\n if dpath:\n try:\n code, version = exec_cmd(\"%s --version\" % dpath)\n except OSError:\n hide_fields = True\n else:\n if code or not version.strip().startswith(\"2\"):\n hide_fields = True\n else:\n hide_fields = True\n if hide_fields:\n del self.fields[\"handle_mailboxes\"]\n del self.fields[\"mailboxes_owner\"]\n\n def clean_default_domain_quota(self):\n \"\"\"Ensure quota is a positive integer.\"\"\"\n if self.cleaned_data['default_domain_quota'] < 0:\n raise forms.ValidationError(\n ugettext_lazy('Must be a positive integer')\n )\n return self.cleaned_data['default_domain_quota']\n", "path": "modoboa/extensions/admin/app_settings.py"}]}
| 1,206 | 271 |
gh_patches_debug_22129
|
rasdani/github-patches
|
git_diff
|
nonebot__nonebot2-679
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: 部分情况下,加载失败的插件依然会出现在当前已导入的所有插件中
**描述问题:**
通过 nonebot.get_loaded_plugins() 获取当前已导入的所有插件,加载失败的插件也出现在其中
**如何复现?**
1. 复制 2.0.0 a16 版本文档的[注册事件响应器](https://61d3d9dbcadf413fd3238e89--nonebot2.netlify.app/guide/creating-a-matcher.html)一节的 weather.py 的代码作为插件,并在 2.0.0 beta1 版本导入它(很显然,这会发生错误)
2. 通过 nonebot.get_loaded_plugins() 获取已导入的插件,可以发现这个加载失败的插件出现在了已导入的插件中
**期望的结果**
加载失败的插件不出现在已导入的所有插件中
**环境信息:**
- OS: [Windows]
- Python Version: [3.10]
- Nonebot Version: [2.0.0 b1]
**协议端信息:**
- 协议端: [go-cqhttp]
- 协议端版本: [1.0.0]
**截图或日志**
```
[ERROR] nonebot | Failed to import "kirami.plugins.weather"
ValueError: Unknown parameter state for function <function handle_first_receive at 0x000001924056AB90> with type typing.Dict[typing.Any, typing.Any]
{Plugin(name='weather', module=<module 'kirami.plugins.weather' from 'D:\\Users\\Documents\\gitee\\KiramiBot\\kirami\\plugins\\weather\\__init__.py'>, module_name='kirami.plugins.weather', manager=<nonebot.plugin.manager.PluginManager object at 0x000002140ECEF130>, export={}, matcher=set(), parent_plugin=None, sub_plugins=set())}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/plugin/plugin.py`
Content:
```
1 from types import ModuleType
2 from dataclasses import field, dataclass
3 from typing import TYPE_CHECKING, Set, Dict, Type, Optional
4
5 from .export import Export
6 from nonebot.matcher import Matcher
7
8 if TYPE_CHECKING:
9 from .manager import PluginManager
10
11 plugins: Dict[str, "Plugin"] = {}
12 """
13 :类型: ``Dict[str, Plugin]``
14 :说明: 已加载的插件
15 """
16
17
18 @dataclass(eq=False)
19 class Plugin(object):
20 """存储插件信息"""
21
22 name: str
23 """
24 - **类型**: ``str``
25 - **说明**: 插件名称,使用 文件/文件夹 名称作为插件名
26 """
27 module: ModuleType
28 """
29 - **类型**: ``ModuleType``
30 - **说明**: 插件模块对象
31 """
32 module_name: str
33 """
34 - **类型**: ``str``
35 - **说明**: 点分割模块路径
36 """
37 manager: "PluginManager"
38 """
39 - **类型**: ``PluginManager``
40 - **说明**: 导入该插件的插件管理器
41 """
42 export: Export = field(default_factory=Export)
43 """
44 - **类型**: ``Export``
45 - **说明**: 插件内定义的导出内容
46 """
47 matcher: Set[Type[Matcher]] = field(default_factory=set)
48 """
49 - **类型**: ``Set[Type[Matcher]]``
50 - **说明**: 插件内定义的 ``Matcher``
51 """
52 parent_plugin: Optional["Plugin"] = None
53 """
54 - **类型**: ``Optional[Plugin]``
55 - **说明**: 父插件
56 """
57 sub_plugins: Set["Plugin"] = field(default_factory=set)
58 """
59 - **类型**: ``Set[Plugin]``
60 - **说明**: 子插件集合
61 """
62
63
64 def get_plugin(name: str) -> Optional[Plugin]:
65 """
66 :说明:
67
68 获取当前导入的某个插件。
69
70 :参数:
71
72 * ``name: str``: 插件名,与 ``load_plugin`` 参数一致。如果为 ``load_plugins`` 导入的插件,则为文件(夹)名。
73
74 :返回:
75
76 - ``Optional[Plugin]``
77 """
78 return plugins.get(name)
79
80
81 def get_loaded_plugins() -> Set[Plugin]:
82 """
83 :说明:
84
85 获取当前已导入的所有插件。
86
87 :返回:
88
89 - ``Set[Plugin]``
90 """
91 return set(plugins.values())
92
93
94 def _new_plugin(fullname: str, module: ModuleType, manager: "PluginManager") -> Plugin:
95 name = fullname.rsplit(".", 1)[-1] if "." in fullname else fullname
96 if name in plugins:
97 raise RuntimeError("Plugin already exists! Check your plugin name.")
98 plugin = Plugin(name, module, fullname, manager)
99 plugins[name] = plugin
100 return plugin
101
```
Path: `nonebot/plugin/manager.py`
Content:
```
1 import sys
2 import pkgutil
3 import importlib
4 from pathlib import Path
5 from itertools import chain
6 from types import ModuleType
7 from importlib.abc import MetaPathFinder
8 from importlib.machinery import PathFinder, SourceFileLoader
9 from typing import Set, Dict, List, Union, Iterable, Optional, Sequence
10
11 from nonebot.log import logger
12 from nonebot.utils import escape_tag
13 from .plugin import Plugin, _new_plugin
14 from . import _managers, _current_plugin
15
16
17 class PluginManager:
18 def __init__(
19 self,
20 plugins: Optional[Iterable[str]] = None,
21 search_path: Optional[Iterable[str]] = None,
22 ):
23
24 # simple plugin not in search path
25 self.plugins: Set[str] = set(plugins or [])
26 self.search_path: Set[str] = set(search_path or [])
27 # cache plugins
28 self.searched_plugins: Dict[str, Path] = {}
29 self.list_plugins()
30
31 def _path_to_module_name(self, path: Path) -> str:
32 rel_path = path.resolve().relative_to(Path(".").resolve())
33 if rel_path.stem == "__init__":
34 return ".".join(rel_path.parts[:-1])
35 else:
36 return ".".join(rel_path.parts[:-1] + (rel_path.stem,))
37
38 def _previous_plugins(self) -> List[str]:
39 _pre_managers: List[PluginManager]
40 if self in _managers:
41 _pre_managers = _managers[: _managers.index(self)]
42 else:
43 _pre_managers = _managers[:]
44
45 return [
46 *chain.from_iterable(
47 [*manager.plugins, *manager.searched_plugins.keys()]
48 for manager in _pre_managers
49 )
50 ]
51
52 def list_plugins(self) -> Set[str]:
53 # get all previous ready to load plugins
54 previous_plugins = self._previous_plugins()
55 searched_plugins: Dict[str, Path] = {}
56 third_party_plugins: Set[str] = set()
57
58 for plugin in self.plugins:
59 name = plugin.rsplit(".", 1)[-1] if "." in plugin else plugin
60 if name in third_party_plugins or name in previous_plugins:
61 raise RuntimeError(
62 f"Plugin already exists: {name}! Check your plugin name"
63 )
64 third_party_plugins.add(plugin)
65
66 for module_info in pkgutil.iter_modules(self.search_path):
67 if module_info.name.startswith("_"):
68 continue
69 if (
70 module_info.name in searched_plugins.keys()
71 or module_info.name in previous_plugins
72 or module_info.name in third_party_plugins
73 ):
74 raise RuntimeError(
75 f"Plugin already exists: {module_info.name}! Check your plugin name"
76 )
77 module_spec = module_info.module_finder.find_spec(module_info.name, None)
78 if not module_spec:
79 continue
80 module_path = module_spec.origin
81 if not module_path:
82 continue
83 searched_plugins[module_info.name] = Path(module_path).resolve()
84
85 self.searched_plugins = searched_plugins
86
87 return third_party_plugins | set(self.searched_plugins.keys())
88
89 def load_plugin(self, name) -> Optional[Plugin]:
90 try:
91 if name in self.plugins:
92 module = importlib.import_module(name)
93 elif name not in self.searched_plugins:
94 raise RuntimeError(f"Plugin not found: {name}! Check your plugin name")
95 else:
96 module = importlib.import_module(
97 self._path_to_module_name(self.searched_plugins[name])
98 )
99
100 logger.opt(colors=True).success(
101 f'Succeeded to import "<y>{escape_tag(name)}</y>"'
102 )
103 return getattr(module, "__plugin__", None)
104 except Exception as e:
105 logger.opt(colors=True, exception=e).error(
106 f'<r><bg #f8bbd0>Failed to import "{escape_tag(name)}"</bg #f8bbd0></r>'
107 )
108
109 def load_all_plugins(self) -> Set[Plugin]:
110 return set(
111 filter(None, (self.load_plugin(name) for name in self.list_plugins()))
112 )
113
114
115 class PluginFinder(MetaPathFinder):
116 def find_spec(
117 self,
118 fullname: str,
119 path: Optional[Sequence[Union[bytes, str]]],
120 target: Optional[ModuleType] = None,
121 ):
122 if _managers:
123 index = -1
124 module_spec = PathFinder.find_spec(fullname, path, target)
125 if not module_spec:
126 return
127 module_origin = module_spec.origin
128 if not module_origin:
129 return
130 module_path = Path(module_origin).resolve()
131
132 while -index <= len(_managers):
133 manager = _managers[index]
134
135 if (
136 fullname in manager.plugins
137 or module_path in manager.searched_plugins.values()
138 ):
139 module_spec.loader = PluginLoader(manager, fullname, module_origin)
140 return module_spec
141
142 index -= 1
143 return
144
145
146 class PluginLoader(SourceFileLoader):
147 def __init__(self, manager: PluginManager, fullname: str, path) -> None:
148 self.manager = manager
149 self.loaded = False
150 super().__init__(fullname, path)
151
152 def create_module(self, spec) -> Optional[ModuleType]:
153 if self.name in sys.modules:
154 self.loaded = True
155 return sys.modules[self.name]
156 # return None to use default module creation
157 return super().create_module(spec)
158
159 def exec_module(self, module: ModuleType) -> None:
160 if self.loaded:
161 return
162
163 plugin = _new_plugin(self.name, module, self.manager)
164 parent_plugin = _current_plugin.get()
165 if parent_plugin and _managers.index(parent_plugin.manager) < _managers.index(
166 self.manager
167 ):
168 plugin.parent_plugin = parent_plugin
169 parent_plugin.sub_plugins.add(plugin)
170
171 _plugin_token = _current_plugin.set(plugin)
172
173 setattr(module, "__plugin__", plugin)
174
175 # try:
176 # super().exec_module(module)
177 # except Exception as e:
178 # raise ImportError(
179 # f"Error when executing module {module_name} from {module.__file__}."
180 # ) from e
181 super().exec_module(module)
182
183 _current_plugin.reset(_plugin_token)
184 return
185
186
187 sys.meta_path.insert(0, PluginFinder())
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nonebot/plugin/manager.py b/nonebot/plugin/manager.py
--- a/nonebot/plugin/manager.py
+++ b/nonebot/plugin/manager.py
@@ -10,8 +10,8 @@
from nonebot.log import logger
from nonebot.utils import escape_tag
-from .plugin import Plugin, _new_plugin
from . import _managers, _current_plugin
+from .plugin import Plugin, _new_plugin, _confirm_plugin
class PluginManager:
@@ -180,6 +180,8 @@
# ) from e
super().exec_module(module)
+ _confirm_plugin(plugin)
+
_current_plugin.reset(_plugin_token)
return
diff --git a/nonebot/plugin/plugin.py b/nonebot/plugin/plugin.py
--- a/nonebot/plugin/plugin.py
+++ b/nonebot/plugin/plugin.py
@@ -96,5 +96,10 @@
if name in plugins:
raise RuntimeError("Plugin already exists! Check your plugin name.")
plugin = Plugin(name, module, fullname, manager)
- plugins[name] = plugin
return plugin
+
+
+def _confirm_plugin(plugin: Plugin) -> None:
+ if plugin.name in plugins:
+ raise RuntimeError("Plugin already exists! Check your plugin name.")
+ plugins[plugin.name] = plugin
|
{"golden_diff": "diff --git a/nonebot/plugin/manager.py b/nonebot/plugin/manager.py\n--- a/nonebot/plugin/manager.py\n+++ b/nonebot/plugin/manager.py\n@@ -10,8 +10,8 @@\n \n from nonebot.log import logger\n from nonebot.utils import escape_tag\n-from .plugin import Plugin, _new_plugin\n from . import _managers, _current_plugin\n+from .plugin import Plugin, _new_plugin, _confirm_plugin\n \n \n class PluginManager:\n@@ -180,6 +180,8 @@\n # ) from e\n super().exec_module(module)\n \n+ _confirm_plugin(plugin)\n+\n _current_plugin.reset(_plugin_token)\n return\n \ndiff --git a/nonebot/plugin/plugin.py b/nonebot/plugin/plugin.py\n--- a/nonebot/plugin/plugin.py\n+++ b/nonebot/plugin/plugin.py\n@@ -96,5 +96,10 @@\n if name in plugins:\n raise RuntimeError(\"Plugin already exists! Check your plugin name.\")\n plugin = Plugin(name, module, fullname, manager)\n- plugins[name] = plugin\n return plugin\n+\n+\n+def _confirm_plugin(plugin: Plugin) -> None:\n+ if plugin.name in plugins:\n+ raise RuntimeError(\"Plugin already exists! Check your plugin name.\")\n+ plugins[plugin.name] = plugin\n", "issue": "Bug: \u90e8\u5206\u60c5\u51b5\u4e0b\uff0c\u52a0\u8f7d\u5931\u8d25\u7684\u63d2\u4ef6\u4f9d\u7136\u4f1a\u51fa\u73b0\u5728\u5f53\u524d\u5df2\u5bfc\u5165\u7684\u6240\u6709\u63d2\u4ef6\u4e2d\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u901a\u8fc7 nonebot.get_loaded_plugins() \u83b7\u53d6\u5f53\u524d\u5df2\u5bfc\u5165\u7684\u6240\u6709\u63d2\u4ef6\uff0c\u52a0\u8f7d\u5931\u8d25\u7684\u63d2\u4ef6\u4e5f\u51fa\u73b0\u5728\u5176\u4e2d\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n1. \u590d\u5236 2.0.0 a16 \u7248\u672c\u6587\u6863\u7684[\u6ce8\u518c\u4e8b\u4ef6\u54cd\u5e94\u5668](https://61d3d9dbcadf413fd3238e89--nonebot2.netlify.app/guide/creating-a-matcher.html)\u4e00\u8282\u7684 weather.py \u7684\u4ee3\u7801\u4f5c\u4e3a\u63d2\u4ef6\uff0c\u5e76\u5728 2.0.0 beta1 \u7248\u672c\u5bfc\u5165\u5b83\uff08\u5f88\u663e\u7136\uff0c\u8fd9\u4f1a\u53d1\u751f\u9519\u8bef\uff09\r\n2. \u901a\u8fc7 nonebot.get_loaded_plugins() \u83b7\u53d6\u5df2\u5bfc\u5165\u7684\u63d2\u4ef6\uff0c\u53ef\u4ee5\u53d1\u73b0\u8fd9\u4e2a\u52a0\u8f7d\u5931\u8d25\u7684\u63d2\u4ef6\u51fa\u73b0\u5728\u4e86\u5df2\u5bfc\u5165\u7684\u63d2\u4ef6\u4e2d\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n\u52a0\u8f7d\u5931\u8d25\u7684\u63d2\u4ef6\u4e0d\u51fa\u73b0\u5728\u5df2\u5bfc\u5165\u7684\u6240\u6709\u63d2\u4ef6\u4e2d\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: [Windows]\r\n - Python Version: [3.10]\r\n - Nonebot Version: [2.0.0 b1]\r\n\r\n**\u534f\u8bae\u7aef\u4fe1\u606f\uff1a**\r\n\r\n - \u534f\u8bae\u7aef: [go-cqhttp]\r\n - \u534f\u8bae\u7aef\u7248\u672c: [1.0.0]\r\n\r\n**\u622a\u56fe\u6216\u65e5\u5fd7**\r\n\r\n```\r\n[ERROR] nonebot | Failed to import \"kirami.plugins.weather\"\r\nValueError: Unknown parameter state for function <function handle_first_receive at 0x000001924056AB90> with type typing.Dict[typing.Any, typing.Any]\r\n\r\n{Plugin(name='weather', module=<module 'kirami.plugins.weather' from 'D:\\\\Users\\\\Documents\\\\gitee\\\\KiramiBot\\\\kirami\\\\plugins\\\\weather\\\\__init__.py'>, module_name='kirami.plugins.weather', manager=<nonebot.plugin.manager.PluginManager object at 0x000002140ECEF130>, export={}, matcher=set(), parent_plugin=None, sub_plugins=set())}\r\n```\n", "before_files": [{"content": "from types import ModuleType\nfrom dataclasses import field, dataclass\nfrom typing import TYPE_CHECKING, Set, Dict, Type, Optional\n\nfrom .export import Export\nfrom nonebot.matcher import Matcher\n\nif TYPE_CHECKING:\n from .manager import PluginManager\n\nplugins: Dict[str, \"Plugin\"] = {}\n\"\"\"\n:\u7c7b\u578b: ``Dict[str, Plugin]``\n:\u8bf4\u660e: \u5df2\u52a0\u8f7d\u7684\u63d2\u4ef6\n\"\"\"\n\n\n@dataclass(eq=False)\nclass Plugin(object):\n \"\"\"\u5b58\u50a8\u63d2\u4ef6\u4fe1\u606f\"\"\"\n\n name: str\n \"\"\"\n - **\u7c7b\u578b**: ``str``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u540d\u79f0\uff0c\u4f7f\u7528 \u6587\u4ef6/\u6587\u4ef6\u5939 \u540d\u79f0\u4f5c\u4e3a\u63d2\u4ef6\u540d\n \"\"\"\n module: ModuleType\n \"\"\"\n - **\u7c7b\u578b**: ``ModuleType``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u6a21\u5757\u5bf9\u8c61\n \"\"\"\n module_name: str\n \"\"\"\n - **\u7c7b\u578b**: ``str``\n - **\u8bf4\u660e**: \u70b9\u5206\u5272\u6a21\u5757\u8def\u5f84\n \"\"\"\n manager: \"PluginManager\"\n \"\"\"\n - **\u7c7b\u578b**: ``PluginManager``\n - **\u8bf4\u660e**: \u5bfc\u5165\u8be5\u63d2\u4ef6\u7684\u63d2\u4ef6\u7ba1\u7406\u5668\n \"\"\"\n export: Export = field(default_factory=Export)\n \"\"\"\n - **\u7c7b\u578b**: ``Export``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u5185\u5b9a\u4e49\u7684\u5bfc\u51fa\u5185\u5bb9\n \"\"\"\n matcher: Set[Type[Matcher]] = field(default_factory=set)\n \"\"\"\n - **\u7c7b\u578b**: ``Set[Type[Matcher]]``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u5185\u5b9a\u4e49\u7684 ``Matcher``\n \"\"\"\n parent_plugin: Optional[\"Plugin\"] = None\n \"\"\"\n - **\u7c7b\u578b**: ``Optional[Plugin]``\n - **\u8bf4\u660e**: \u7236\u63d2\u4ef6\n \"\"\"\n sub_plugins: Set[\"Plugin\"] = field(default_factory=set)\n \"\"\"\n - **\u7c7b\u578b**: ``Set[Plugin]``\n - **\u8bf4\u660e**: \u5b50\u63d2\u4ef6\u96c6\u5408\n \"\"\"\n\n\ndef get_plugin(name: str) -> Optional[Plugin]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u83b7\u53d6\u5f53\u524d\u5bfc\u5165\u7684\u67d0\u4e2a\u63d2\u4ef6\u3002\n\n :\u53c2\u6570:\n\n * ``name: str``: \u63d2\u4ef6\u540d\uff0c\u4e0e ``load_plugin`` \u53c2\u6570\u4e00\u81f4\u3002\u5982\u679c\u4e3a ``load_plugins`` \u5bfc\u5165\u7684\u63d2\u4ef6\uff0c\u5219\u4e3a\u6587\u4ef6(\u5939)\u540d\u3002\n\n :\u8fd4\u56de:\n\n - ``Optional[Plugin]``\n \"\"\"\n return plugins.get(name)\n\n\ndef get_loaded_plugins() -> Set[Plugin]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u83b7\u53d6\u5f53\u524d\u5df2\u5bfc\u5165\u7684\u6240\u6709\u63d2\u4ef6\u3002\n\n :\u8fd4\u56de:\n\n - ``Set[Plugin]``\n \"\"\"\n return set(plugins.values())\n\n\ndef _new_plugin(fullname: str, module: ModuleType, manager: \"PluginManager\") -> Plugin:\n name = fullname.rsplit(\".\", 1)[-1] if \".\" in fullname else fullname\n if name in plugins:\n raise RuntimeError(\"Plugin already exists! Check your plugin name.\")\n plugin = Plugin(name, module, fullname, manager)\n plugins[name] = plugin\n return plugin\n", "path": "nonebot/plugin/plugin.py"}, {"content": "import sys\nimport pkgutil\nimport importlib\nfrom pathlib import Path\nfrom itertools import chain\nfrom types import ModuleType\nfrom importlib.abc import MetaPathFinder\nfrom importlib.machinery import PathFinder, SourceFileLoader\nfrom typing import Set, Dict, List, Union, Iterable, Optional, Sequence\n\nfrom nonebot.log import logger\nfrom nonebot.utils import escape_tag\nfrom .plugin import Plugin, _new_plugin\nfrom . import _managers, _current_plugin\n\n\nclass PluginManager:\n def __init__(\n self,\n plugins: Optional[Iterable[str]] = None,\n search_path: Optional[Iterable[str]] = None,\n ):\n\n # simple plugin not in search path\n self.plugins: Set[str] = set(plugins or [])\n self.search_path: Set[str] = set(search_path or [])\n # cache plugins\n self.searched_plugins: Dict[str, Path] = {}\n self.list_plugins()\n\n def _path_to_module_name(self, path: Path) -> str:\n rel_path = path.resolve().relative_to(Path(\".\").resolve())\n if rel_path.stem == \"__init__\":\n return \".\".join(rel_path.parts[:-1])\n else:\n return \".\".join(rel_path.parts[:-1] + (rel_path.stem,))\n\n def _previous_plugins(self) -> List[str]:\n _pre_managers: List[PluginManager]\n if self in _managers:\n _pre_managers = _managers[: _managers.index(self)]\n else:\n _pre_managers = _managers[:]\n\n return [\n *chain.from_iterable(\n [*manager.plugins, *manager.searched_plugins.keys()]\n for manager in _pre_managers\n )\n ]\n\n def list_plugins(self) -> Set[str]:\n # get all previous ready to load plugins\n previous_plugins = self._previous_plugins()\n searched_plugins: Dict[str, Path] = {}\n third_party_plugins: Set[str] = set()\n\n for plugin in self.plugins:\n name = plugin.rsplit(\".\", 1)[-1] if \".\" in plugin else plugin\n if name in third_party_plugins or name in previous_plugins:\n raise RuntimeError(\n f\"Plugin already exists: {name}! Check your plugin name\"\n )\n third_party_plugins.add(plugin)\n\n for module_info in pkgutil.iter_modules(self.search_path):\n if module_info.name.startswith(\"_\"):\n continue\n if (\n module_info.name in searched_plugins.keys()\n or module_info.name in previous_plugins\n or module_info.name in third_party_plugins\n ):\n raise RuntimeError(\n f\"Plugin already exists: {module_info.name}! Check your plugin name\"\n )\n module_spec = module_info.module_finder.find_spec(module_info.name, None)\n if not module_spec:\n continue\n module_path = module_spec.origin\n if not module_path:\n continue\n searched_plugins[module_info.name] = Path(module_path).resolve()\n\n self.searched_plugins = searched_plugins\n\n return third_party_plugins | set(self.searched_plugins.keys())\n\n def load_plugin(self, name) -> Optional[Plugin]:\n try:\n if name in self.plugins:\n module = importlib.import_module(name)\n elif name not in self.searched_plugins:\n raise RuntimeError(f\"Plugin not found: {name}! Check your plugin name\")\n else:\n module = importlib.import_module(\n self._path_to_module_name(self.searched_plugins[name])\n )\n\n logger.opt(colors=True).success(\n f'Succeeded to import \"<y>{escape_tag(name)}</y>\"'\n )\n return getattr(module, \"__plugin__\", None)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f'<r><bg #f8bbd0>Failed to import \"{escape_tag(name)}\"</bg #f8bbd0></r>'\n )\n\n def load_all_plugins(self) -> Set[Plugin]:\n return set(\n filter(None, (self.load_plugin(name) for name in self.list_plugins()))\n )\n\n\nclass PluginFinder(MetaPathFinder):\n def find_spec(\n self,\n fullname: str,\n path: Optional[Sequence[Union[bytes, str]]],\n target: Optional[ModuleType] = None,\n ):\n if _managers:\n index = -1\n module_spec = PathFinder.find_spec(fullname, path, target)\n if not module_spec:\n return\n module_origin = module_spec.origin\n if not module_origin:\n return\n module_path = Path(module_origin).resolve()\n\n while -index <= len(_managers):\n manager = _managers[index]\n\n if (\n fullname in manager.plugins\n or module_path in manager.searched_plugins.values()\n ):\n module_spec.loader = PluginLoader(manager, fullname, module_origin)\n return module_spec\n\n index -= 1\n return\n\n\nclass PluginLoader(SourceFileLoader):\n def __init__(self, manager: PluginManager, fullname: str, path) -> None:\n self.manager = manager\n self.loaded = False\n super().__init__(fullname, path)\n\n def create_module(self, spec) -> Optional[ModuleType]:\n if self.name in sys.modules:\n self.loaded = True\n return sys.modules[self.name]\n # return None to use default module creation\n return super().create_module(spec)\n\n def exec_module(self, module: ModuleType) -> None:\n if self.loaded:\n return\n\n plugin = _new_plugin(self.name, module, self.manager)\n parent_plugin = _current_plugin.get()\n if parent_plugin and _managers.index(parent_plugin.manager) < _managers.index(\n self.manager\n ):\n plugin.parent_plugin = parent_plugin\n parent_plugin.sub_plugins.add(plugin)\n\n _plugin_token = _current_plugin.set(plugin)\n\n setattr(module, \"__plugin__\", plugin)\n\n # try:\n # super().exec_module(module)\n # except Exception as e:\n # raise ImportError(\n # f\"Error when executing module {module_name} from {module.__file__}.\"\n # ) from e\n super().exec_module(module)\n\n _current_plugin.reset(_plugin_token)\n return\n\n\nsys.meta_path.insert(0, PluginFinder())\n", "path": "nonebot/plugin/manager.py"}], "after_files": [{"content": "from types import ModuleType\nfrom dataclasses import field, dataclass\nfrom typing import TYPE_CHECKING, Set, Dict, Type, Optional\n\nfrom .export import Export\nfrom nonebot.matcher import Matcher\n\nif TYPE_CHECKING:\n from .manager import PluginManager\n\nplugins: Dict[str, \"Plugin\"] = {}\n\"\"\"\n:\u7c7b\u578b: ``Dict[str, Plugin]``\n:\u8bf4\u660e: \u5df2\u52a0\u8f7d\u7684\u63d2\u4ef6\n\"\"\"\n\n\n@dataclass(eq=False)\nclass Plugin(object):\n \"\"\"\u5b58\u50a8\u63d2\u4ef6\u4fe1\u606f\"\"\"\n\n name: str\n \"\"\"\n - **\u7c7b\u578b**: ``str``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u540d\u79f0\uff0c\u4f7f\u7528 \u6587\u4ef6/\u6587\u4ef6\u5939 \u540d\u79f0\u4f5c\u4e3a\u63d2\u4ef6\u540d\n \"\"\"\n module: ModuleType\n \"\"\"\n - **\u7c7b\u578b**: ``ModuleType``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u6a21\u5757\u5bf9\u8c61\n \"\"\"\n module_name: str\n \"\"\"\n - **\u7c7b\u578b**: ``str``\n - **\u8bf4\u660e**: \u70b9\u5206\u5272\u6a21\u5757\u8def\u5f84\n \"\"\"\n manager: \"PluginManager\"\n \"\"\"\n - **\u7c7b\u578b**: ``PluginManager``\n - **\u8bf4\u660e**: \u5bfc\u5165\u8be5\u63d2\u4ef6\u7684\u63d2\u4ef6\u7ba1\u7406\u5668\n \"\"\"\n export: Export = field(default_factory=Export)\n \"\"\"\n - **\u7c7b\u578b**: ``Export``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u5185\u5b9a\u4e49\u7684\u5bfc\u51fa\u5185\u5bb9\n \"\"\"\n matcher: Set[Type[Matcher]] = field(default_factory=set)\n \"\"\"\n - **\u7c7b\u578b**: ``Set[Type[Matcher]]``\n - **\u8bf4\u660e**: \u63d2\u4ef6\u5185\u5b9a\u4e49\u7684 ``Matcher``\n \"\"\"\n parent_plugin: Optional[\"Plugin\"] = None\n \"\"\"\n - **\u7c7b\u578b**: ``Optional[Plugin]``\n - **\u8bf4\u660e**: \u7236\u63d2\u4ef6\n \"\"\"\n sub_plugins: Set[\"Plugin\"] = field(default_factory=set)\n \"\"\"\n - **\u7c7b\u578b**: ``Set[Plugin]``\n - **\u8bf4\u660e**: \u5b50\u63d2\u4ef6\u96c6\u5408\n \"\"\"\n\n\ndef get_plugin(name: str) -> Optional[Plugin]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u83b7\u53d6\u5f53\u524d\u5bfc\u5165\u7684\u67d0\u4e2a\u63d2\u4ef6\u3002\n\n :\u53c2\u6570:\n\n * ``name: str``: \u63d2\u4ef6\u540d\uff0c\u4e0e ``load_plugin`` \u53c2\u6570\u4e00\u81f4\u3002\u5982\u679c\u4e3a ``load_plugins`` \u5bfc\u5165\u7684\u63d2\u4ef6\uff0c\u5219\u4e3a\u6587\u4ef6(\u5939)\u540d\u3002\n\n :\u8fd4\u56de:\n\n - ``Optional[Plugin]``\n \"\"\"\n return plugins.get(name)\n\n\ndef get_loaded_plugins() -> Set[Plugin]:\n \"\"\"\n :\u8bf4\u660e:\n\n \u83b7\u53d6\u5f53\u524d\u5df2\u5bfc\u5165\u7684\u6240\u6709\u63d2\u4ef6\u3002\n\n :\u8fd4\u56de:\n\n - ``Set[Plugin]``\n \"\"\"\n return set(plugins.values())\n\n\ndef _new_plugin(fullname: str, module: ModuleType, manager: \"PluginManager\") -> Plugin:\n name = fullname.rsplit(\".\", 1)[-1] if \".\" in fullname else fullname\n if name in plugins:\n raise RuntimeError(\"Plugin already exists! Check your plugin name.\")\n plugin = Plugin(name, module, fullname, manager)\n return plugin\n\n\ndef _confirm_plugin(plugin: Plugin) -> None:\n if plugin.name in plugins:\n raise RuntimeError(\"Plugin already exists! Check your plugin name.\")\n plugins[plugin.name] = plugin\n", "path": "nonebot/plugin/plugin.py"}, {"content": "import sys\nimport pkgutil\nimport importlib\nfrom pathlib import Path\nfrom itertools import chain\nfrom types import ModuleType\nfrom importlib.abc import MetaPathFinder\nfrom importlib.machinery import PathFinder, SourceFileLoader\nfrom typing import Set, Dict, List, Union, Iterable, Optional, Sequence\n\nfrom nonebot.log import logger\nfrom nonebot.utils import escape_tag\nfrom . import _managers, _current_plugin\nfrom .plugin import Plugin, _new_plugin, _confirm_plugin\n\n\nclass PluginManager:\n def __init__(\n self,\n plugins: Optional[Iterable[str]] = None,\n search_path: Optional[Iterable[str]] = None,\n ):\n\n # simple plugin not in search path\n self.plugins: Set[str] = set(plugins or [])\n self.search_path: Set[str] = set(search_path or [])\n # cache plugins\n self.searched_plugins: Dict[str, Path] = {}\n self.list_plugins()\n\n def _path_to_module_name(self, path: Path) -> str:\n rel_path = path.resolve().relative_to(Path(\".\").resolve())\n if rel_path.stem == \"__init__\":\n return \".\".join(rel_path.parts[:-1])\n else:\n return \".\".join(rel_path.parts[:-1] + (rel_path.stem,))\n\n def _previous_plugins(self) -> List[str]:\n _pre_managers: List[PluginManager]\n if self in _managers:\n _pre_managers = _managers[: _managers.index(self)]\n else:\n _pre_managers = _managers[:]\n\n return [\n *chain.from_iterable(\n [*manager.plugins, *manager.searched_plugins.keys()]\n for manager in _pre_managers\n )\n ]\n\n def list_plugins(self) -> Set[str]:\n # get all previous ready to load plugins\n previous_plugins = self._previous_plugins()\n searched_plugins: Dict[str, Path] = {}\n third_party_plugins: Set[str] = set()\n\n for plugin in self.plugins:\n name = plugin.rsplit(\".\", 1)[-1] if \".\" in plugin else plugin\n if name in third_party_plugins or name in previous_plugins:\n raise RuntimeError(\n f\"Plugin already exists: {name}! Check your plugin name\"\n )\n third_party_plugins.add(plugin)\n\n for module_info in pkgutil.iter_modules(self.search_path):\n if module_info.name.startswith(\"_\"):\n continue\n if (\n module_info.name in searched_plugins.keys()\n or module_info.name in previous_plugins\n or module_info.name in third_party_plugins\n ):\n raise RuntimeError(\n f\"Plugin already exists: {module_info.name}! Check your plugin name\"\n )\n module_spec = module_info.module_finder.find_spec(module_info.name, None)\n if not module_spec:\n continue\n module_path = module_spec.origin\n if not module_path:\n continue\n searched_plugins[module_info.name] = Path(module_path).resolve()\n\n self.searched_plugins = searched_plugins\n\n return third_party_plugins | set(self.searched_plugins.keys())\n\n def load_plugin(self, name) -> Optional[Plugin]:\n try:\n if name in self.plugins:\n module = importlib.import_module(name)\n elif name not in self.searched_plugins:\n raise RuntimeError(f\"Plugin not found: {name}! Check your plugin name\")\n else:\n module = importlib.import_module(\n self._path_to_module_name(self.searched_plugins[name])\n )\n\n logger.opt(colors=True).success(\n f'Succeeded to import \"<y>{escape_tag(name)}</y>\"'\n )\n return getattr(module, \"__plugin__\", None)\n except Exception as e:\n logger.opt(colors=True, exception=e).error(\n f'<r><bg #f8bbd0>Failed to import \"{escape_tag(name)}\"</bg #f8bbd0></r>'\n )\n\n def load_all_plugins(self) -> Set[Plugin]:\n return set(\n filter(None, (self.load_plugin(name) for name in self.list_plugins()))\n )\n\n\nclass PluginFinder(MetaPathFinder):\n def find_spec(\n self,\n fullname: str,\n path: Optional[Sequence[Union[bytes, str]]],\n target: Optional[ModuleType] = None,\n ):\n if _managers:\n index = -1\n module_spec = PathFinder.find_spec(fullname, path, target)\n if not module_spec:\n return\n module_origin = module_spec.origin\n if not module_origin:\n return\n module_path = Path(module_origin).resolve()\n\n while -index <= len(_managers):\n manager = _managers[index]\n\n if (\n fullname in manager.plugins\n or module_path in manager.searched_plugins.values()\n ):\n module_spec.loader = PluginLoader(manager, fullname, module_origin)\n return module_spec\n\n index -= 1\n return\n\n\nclass PluginLoader(SourceFileLoader):\n def __init__(self, manager: PluginManager, fullname: str, path) -> None:\n self.manager = manager\n self.loaded = False\n super().__init__(fullname, path)\n\n def create_module(self, spec) -> Optional[ModuleType]:\n if self.name in sys.modules:\n self.loaded = True\n return sys.modules[self.name]\n # return None to use default module creation\n return super().create_module(spec)\n\n def exec_module(self, module: ModuleType) -> None:\n if self.loaded:\n return\n\n plugin = _new_plugin(self.name, module, self.manager)\n parent_plugin = _current_plugin.get()\n if parent_plugin and _managers.index(parent_plugin.manager) < _managers.index(\n self.manager\n ):\n plugin.parent_plugin = parent_plugin\n parent_plugin.sub_plugins.add(plugin)\n\n _plugin_token = _current_plugin.set(plugin)\n\n setattr(module, \"__plugin__\", plugin)\n\n # try:\n # super().exec_module(module)\n # except Exception as e:\n # raise ImportError(\n # f\"Error when executing module {module_name} from {module.__file__}.\"\n # ) from e\n super().exec_module(module)\n\n _confirm_plugin(plugin)\n\n _current_plugin.reset(_plugin_token)\n return\n\n\nsys.meta_path.insert(0, PluginFinder())\n", "path": "nonebot/plugin/manager.py"}]}
| 3,365 | 296 |
gh_patches_debug_33043
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-702
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in CalibrationError metric docs
I just noticed typos in the CE metrics - the max calibration error and RMS calibration error labels should be switched, and there's a square root sign missing from the root mean squared error (whoops). That's my mistake, I'll submit a PR to fix.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchmetrics/classification/calibration_error.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, List, Optional
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.functional.classification.calibration_error import _ce_compute, _ce_update
20 from torchmetrics.metric import Metric
21 from torchmetrics.utilities.data import dim_zero_cat
22
23
24 class CalibrationError(Metric):
25 r"""
26
27 `Computes the Top-label Calibration Error`_
28 Three different norms are implemented, each corresponding to variations on the calibration error metric.
29
30 L1 norm (Expected Calibration Error)
31
32 .. math::
33 \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\|
34
35 Infinity norm (Maximum Calibration Error)
36
37 .. math::
38 \text{RMSCE} = \max_{i} (p_i - c_i)
39
40 L2 norm (Root Mean Square Calibration Error)
41
42 .. math::
43 \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2
44
45 Where :math:`p_i` is the top-1 prediction accuracy in bin i
46 and :math:`c_i` is the average confidence of predictions in bin i.
47
48 .. note::
49 L2-norm debiasing is not yet supported.
50
51 Args:
52 n_bins: Number of bins to use when computing probabilites and accuracies.
53 norm: Norm used to compare empirical and expected probability bins.
54 Defaults to "l1", or Expected Calibration Error.
55 debias: Applies debiasing term, only implemented for l2 norm. Defaults to True.
56 compute_on_step: Forward only calls ``update()`` and return None if this is set to False.
57 dist_sync_on_step: Synchronize metric state across processes at each ``forward()``
58 before returning the value at the step
59 process_group: Specify the process group on which synchronization is called.
60 """
61 DISTANCES = {"l1", "l2", "max"}
62 higher_is_better = False
63 confidences: List[Tensor]
64 accuracies: List[Tensor]
65
66 def __init__(
67 self,
68 n_bins: int = 15,
69 norm: str = "l1",
70 compute_on_step: bool = False,
71 dist_sync_on_step: bool = False,
72 process_group: Optional[Any] = None,
73 ):
74
75 super().__init__(
76 compute_on_step=compute_on_step,
77 dist_sync_on_step=dist_sync_on_step,
78 process_group=process_group,
79 dist_sync_fn=None,
80 )
81
82 if norm not in self.DISTANCES:
83 raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
84
85 if not isinstance(n_bins, int) or n_bins <= 0:
86 raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}")
87 self.n_bins = n_bins
88 self.register_buffer("bin_boundaries", torch.linspace(0, 1, n_bins + 1))
89 self.norm = norm
90
91 self.add_state("confidences", [], dist_reduce_fx="cat")
92 self.add_state("accuracies", [], dist_reduce_fx="cat")
93
94 def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore
95 """Computes top-level confidences and accuracies for the input probabilites and appends them to internal
96 state.
97
98 Args:
99 preds (Tensor): Model output probabilities.
100 target (Tensor): Ground-truth target class labels.
101 """
102 confidences, accuracies = _ce_update(preds, target)
103
104 self.confidences.append(confidences)
105 self.accuracies.append(accuracies)
106
107 def compute(self) -> Tensor:
108 """Computes calibration error across all confidences and accuracies.
109
110 Returns:
111 Tensor: Calibration error across previously collected examples.
112 """
113 confidences = dim_zero_cat(self.confidences)
114 accuracies = dim_zero_cat(self.accuracies)
115 return _ce_compute(confidences, accuracies, self.bin_boundaries, norm=self.norm)
116
```
Path: `torchmetrics/functional/classification/calibration_error.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import FloatTensor, Tensor
18
19 from torchmetrics.utilities.checks import _input_format_classification
20 from torchmetrics.utilities.enums import DataType
21
22
23 def _ce_compute(
24 confidences: FloatTensor,
25 accuracies: FloatTensor,
26 bin_boundaries: FloatTensor,
27 norm: str = "l1",
28 debias: bool = False,
29 ) -> Tensor:
30 """Computes the calibration error given the provided bin boundaries and norm.
31
32 Args:
33 confidences (FloatTensor): The confidence (i.e. predicted prob) of the top1 prediction.
34 accuracies (FloatTensor): 1.0 if the top-1 prediction was correct, 0.0 otherwise.
35 bin_boundaries (FloatTensor): Bin boundaries separating the linspace from 0 to 1.
36 norm (str, optional): Norm function to use when computing calibration error. Defaults to "l1".
37 debias (bool, optional): Apply debiasing to L2 norm computation as in
38 `Verified Uncertainty Calibration`_. Defaults to False.
39
40 Raises:
41 ValueError: If an unsupported norm function is provided.
42
43 Returns:
44 Tensor: Calibration error scalar.
45 """
46 if norm not in {"l1", "l2", "max"}:
47 raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
48
49 conf_bin = torch.zeros_like(bin_boundaries)
50 acc_bin = torch.zeros_like(bin_boundaries)
51 prop_bin = torch.zeros_like(bin_boundaries)
52 for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])):
53 # Calculated confidence and accuracy in each bin
54 in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
55 prop_in_bin = in_bin.float().mean()
56 if prop_in_bin.item() > 0:
57 acc_bin[i] = accuracies[in_bin].float().mean()
58 conf_bin[i] = confidences[in_bin].mean()
59 prop_bin[i] = prop_in_bin
60
61 if norm == "l1":
62 ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)
63 elif norm == "max":
64 ce = torch.max(torch.abs(acc_bin - conf_bin))
65 elif norm == "l2":
66 ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)
67 # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.
68 if debias:
69 # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from
70 # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/
71 debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)
72 ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin
73 ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0)
74 return ce
75
76
77 def _ce_update(preds: Tensor, target: Tensor) -> Tuple[FloatTensor, FloatTensor]:
78 """Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their
79 correctness.
80
81 Args:
82 preds (Tensor): Input softmaxed predictions.
83 target (Tensor): Labels.
84
85 Raises:
86 ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass.
87
88 Returns:
89 Tuple[FloatTensor, FloatTensor]: [description]
90 """
91 _, _, mode = _input_format_classification(preds, target)
92
93 if mode == DataType.BINARY:
94 confidences, accuracies = preds, target
95 elif mode == DataType.MULTICLASS:
96 confidences, predictions = preds.max(dim=1)
97 accuracies = predictions.eq(target)
98 elif mode == DataType.MULTIDIM_MULTICLASS:
99 # reshape tensors
100 # for preds, move the class dimension to the final axis and flatten the rest
101 confidences, predictions = torch.transpose(preds, 1, -1).flatten(0, -2).max(dim=1)
102 # for targets, just flatten the target
103 accuracies = predictions.eq(target.flatten())
104 else:
105 raise ValueError(
106 f"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}."
107 )
108 # must be cast to float for ddp allgather to work
109 return confidences.float(), accuracies.float()
110
111
112 def calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = "l1") -> Tensor:
113 r"""
114 `Computes the Top-label Calibration Error`_
115
116 Three different norms are implemented, each corresponding to variations on the calibration error metric.
117
118 L1 norm (Expected Calibration Error)
119
120 .. math::
121 \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\|
122
123 Infinity norm (Maximum Calibration Error)
124
125 .. math::
126 \text{RMSCE} = \max_{i} (p_i - c_i)
127
128 L2 norm (Root Mean Square Calibration Error)
129
130 .. math::
131 \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2
132
133 Where :math:`p_i` is the top-1 prediction accuracy in
134 bin i and :math:`c_i` is the average confidence of predictions in bin i.
135
136 .. note:
137 L2-norm debiasing is not yet supported.
138
139 Args:
140 preds (Tensor): Model output probabilities.
141 target (Tensor): Ground-truth target class labels.
142 n_bins (int, optional): Number of bins to use when computing t. Defaults to 15.
143 norm (str, optional): Norm used to compare empirical and expected probability bins.
144 Defaults to "l1", or Expected Calibration Error.
145 """
146 if norm not in ("l1", "l2", "max"):
147 raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
148
149 if not isinstance(n_bins, int) or n_bins <= 0:
150 raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}")
151
152 confidences, accuracies = _ce_update(preds, target)
153
154 bin_boundaries = torch.linspace(0, 1, n_bins + 1, dtype=torch.float, device=preds.device)
155
156 return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchmetrics/classification/calibration_error.py b/torchmetrics/classification/calibration_error.py
--- a/torchmetrics/classification/calibration_error.py
+++ b/torchmetrics/classification/calibration_error.py
@@ -30,20 +30,21 @@
L1 norm (Expected Calibration Error)
.. math::
- \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\|
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|
Infinity norm (Maximum Calibration Error)
.. math::
- \text{RMSCE} = \max_{i} (p_i - c_i)
+ \text{MCE} = \max_{i} (p_i - c_i)
L2 norm (Root Mean Square Calibration Error)
.. math::
- \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}
- Where :math:`p_i` is the top-1 prediction accuracy in bin i
- and :math:`c_i` is the average confidence of predictions in bin i.
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,
+ :math:`c_i` is the average confidence of predictions in bin :math:`i`, and
+ :math:`b_i` is the fraction of data points in bin :math:`i`.
.. note::
L2-norm debiasing is not yet supported.
diff --git a/torchmetrics/functional/classification/calibration_error.py b/torchmetrics/functional/classification/calibration_error.py
--- a/torchmetrics/functional/classification/calibration_error.py
+++ b/torchmetrics/functional/classification/calibration_error.py
@@ -118,20 +118,21 @@
L1 norm (Expected Calibration Error)
.. math::
- \text{ECE} = \frac{1}{N}\sum_i^N \|(p_i - c_i)\|
+ \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|
Infinity norm (Maximum Calibration Error)
.. math::
- \text{RMSCE} = \max_{i} (p_i - c_i)
+ \text{MCE} = \max_{i} (p_i - c_i)
L2 norm (Root Mean Square Calibration Error)
.. math::
- \text{MCE} = \frac{1}{N}\sum_i^N (p_i - c_i)^2
+ \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}
- Where :math:`p_i` is the top-1 prediction accuracy in
- bin i and :math:`c_i` is the average confidence of predictions in bin i.
+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,
+ :math:`c_i` is the average confidence of predictions in bin :math:`i`, and
+ :math:`b_i` is the fraction of data points in bin :math:`i`.
.. note:
L2-norm debiasing is not yet supported.
|
{"golden_diff": "diff --git a/torchmetrics/classification/calibration_error.py b/torchmetrics/classification/calibration_error.py\n--- a/torchmetrics/classification/calibration_error.py\n+++ b/torchmetrics/classification/calibration_error.py\n@@ -30,20 +30,21 @@\n L1 norm (Expected Calibration Error)\n \n .. math::\n- \\text{ECE} = \\frac{1}{N}\\sum_i^N \\|(p_i - c_i)\\|\n+ \\text{ECE} = \\sum_i^N b_i \\|(p_i - c_i)\\|\n \n Infinity norm (Maximum Calibration Error)\n \n .. math::\n- \\text{RMSCE} = \\max_{i} (p_i - c_i)\n+ \\text{MCE} = \\max_{i} (p_i - c_i)\n \n L2 norm (Root Mean Square Calibration Error)\n \n .. math::\n- \\text{MCE} = \\frac{1}{N}\\sum_i^N (p_i - c_i)^2\n+ \\text{RMSCE} = \\sqrt{\\sum_i^N b_i(p_i - c_i)^2}\n \n- Where :math:`p_i` is the top-1 prediction accuracy in bin i\n- and :math:`c_i` is the average confidence of predictions in bin i.\n+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,\n+ :math:`c_i` is the average confidence of predictions in bin :math:`i`, and\n+ :math:`b_i` is the fraction of data points in bin :math:`i`.\n \n .. note::\n L2-norm debiasing is not yet supported.\ndiff --git a/torchmetrics/functional/classification/calibration_error.py b/torchmetrics/functional/classification/calibration_error.py\n--- a/torchmetrics/functional/classification/calibration_error.py\n+++ b/torchmetrics/functional/classification/calibration_error.py\n@@ -118,20 +118,21 @@\n L1 norm (Expected Calibration Error)\n \n .. math::\n- \\text{ECE} = \\frac{1}{N}\\sum_i^N \\|(p_i - c_i)\\|\n+ \\text{ECE} = \\sum_i^N b_i \\|(p_i - c_i)\\|\n \n Infinity norm (Maximum Calibration Error)\n \n .. math::\n- \\text{RMSCE} = \\max_{i} (p_i - c_i)\n+ \\text{MCE} = \\max_{i} (p_i - c_i)\n \n L2 norm (Root Mean Square Calibration Error)\n \n .. math::\n- \\text{MCE} = \\frac{1}{N}\\sum_i^N (p_i - c_i)^2\n+ \\text{RMSCE} = \\sqrt{\\sum_i^N b_i(p_i - c_i)^2}\n \n- Where :math:`p_i` is the top-1 prediction accuracy in\n- bin i and :math:`c_i` is the average confidence of predictions in bin i.\n+ Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,\n+ :math:`c_i` is the average confidence of predictions in bin :math:`i`, and\n+ :math:`b_i` is the fraction of data points in bin :math:`i`.\n \n .. note:\n L2-norm debiasing is not yet supported.\n", "issue": "Typo in CalibrationError metric docs\nI just noticed typos in the CE metrics - the max calibration error and RMS calibration error labels should be switched, and there's a square root sign missing from the root mean squared error (whoops). That's my mistake, I'll submit a PR to fix. \r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.calibration_error import _ce_compute, _ce_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.data import dim_zero_cat\n\n\nclass CalibrationError(Metric):\n r\"\"\"\n\n `Computes the Top-label Calibration Error`_\n Three different norms are implemented, each corresponding to variations on the calibration error metric.\n\n L1 norm (Expected Calibration Error)\n\n .. math::\n \\text{ECE} = \\frac{1}{N}\\sum_i^N \\|(p_i - c_i)\\|\n\n Infinity norm (Maximum Calibration Error)\n\n .. math::\n \\text{RMSCE} = \\max_{i} (p_i - c_i)\n\n L2 norm (Root Mean Square Calibration Error)\n\n .. math::\n \\text{MCE} = \\frac{1}{N}\\sum_i^N (p_i - c_i)^2\n\n Where :math:`p_i` is the top-1 prediction accuracy in bin i\n and :math:`c_i` is the average confidence of predictions in bin i.\n\n .. note::\n L2-norm debiasing is not yet supported.\n\n Args:\n n_bins: Number of bins to use when computing probabilites and accuracies.\n norm: Norm used to compare empirical and expected probability bins.\n Defaults to \"l1\", or Expected Calibration Error.\n debias: Applies debiasing term, only implemented for l2 norm. Defaults to True.\n compute_on_step: Forward only calls ``update()`` and return None if this is set to False.\n dist_sync_on_step: Synchronize metric state across processes at each ``forward()``\n before returning the value at the step\n process_group: Specify the process group on which synchronization is called.\n \"\"\"\n DISTANCES = {\"l1\", \"l2\", \"max\"}\n higher_is_better = False\n confidences: List[Tensor]\n accuracies: List[Tensor]\n\n def __init__(\n self,\n n_bins: int = 15,\n norm: str = \"l1\",\n compute_on_step: bool = False,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=None,\n )\n\n if norm not in self.DISTANCES:\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n if not isinstance(n_bins, int) or n_bins <= 0:\n raise ValueError(f\"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}\")\n self.n_bins = n_bins\n self.register_buffer(\"bin_boundaries\", torch.linspace(0, 1, n_bins + 1))\n self.norm = norm\n\n self.add_state(\"confidences\", [], dist_reduce_fx=\"cat\")\n self.add_state(\"accuracies\", [], dist_reduce_fx=\"cat\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Computes top-level confidences and accuracies for the input probabilites and appends them to internal\n state.\n\n Args:\n preds (Tensor): Model output probabilities.\n target (Tensor): Ground-truth target class labels.\n \"\"\"\n confidences, accuracies = _ce_update(preds, target)\n\n self.confidences.append(confidences)\n self.accuracies.append(accuracies)\n\n def compute(self) -> Tensor:\n \"\"\"Computes calibration error across all confidences and accuracies.\n\n Returns:\n Tensor: Calibration error across previously collected examples.\n \"\"\"\n confidences = dim_zero_cat(self.confidences)\n accuracies = dim_zero_cat(self.accuracies)\n return _ce_compute(confidences, accuracies, self.bin_boundaries, norm=self.norm)\n", "path": "torchmetrics/classification/calibration_error.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import FloatTensor, Tensor\n\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import DataType\n\n\ndef _ce_compute(\n confidences: FloatTensor,\n accuracies: FloatTensor,\n bin_boundaries: FloatTensor,\n norm: str = \"l1\",\n debias: bool = False,\n) -> Tensor:\n \"\"\"Computes the calibration error given the provided bin boundaries and norm.\n\n Args:\n confidences (FloatTensor): The confidence (i.e. predicted prob) of the top1 prediction.\n accuracies (FloatTensor): 1.0 if the top-1 prediction was correct, 0.0 otherwise.\n bin_boundaries (FloatTensor): Bin boundaries separating the linspace from 0 to 1.\n norm (str, optional): Norm function to use when computing calibration error. Defaults to \"l1\".\n debias (bool, optional): Apply debiasing to L2 norm computation as in\n `Verified Uncertainty Calibration`_. Defaults to False.\n\n Raises:\n ValueError: If an unsupported norm function is provided.\n\n Returns:\n Tensor: Calibration error scalar.\n \"\"\"\n if norm not in {\"l1\", \"l2\", \"max\"}:\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n conf_bin = torch.zeros_like(bin_boundaries)\n acc_bin = torch.zeros_like(bin_boundaries)\n prop_bin = torch.zeros_like(bin_boundaries)\n for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])):\n # Calculated confidence and accuracy in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n acc_bin[i] = accuracies[in_bin].float().mean()\n conf_bin[i] = confidences[in_bin].mean()\n prop_bin[i] = prop_in_bin\n\n if norm == \"l1\":\n ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)\n elif norm == \"max\":\n ce = torch.max(torch.abs(acc_bin - conf_bin))\n elif norm == \"l2\":\n ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)\n # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.\n if debias:\n # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from\n # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/\n debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)\n ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin\n ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0)\n return ce\n\n\ndef _ce_update(preds: Tensor, target: Tensor) -> Tuple[FloatTensor, FloatTensor]:\n \"\"\"Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their\n correctness.\n\n Args:\n preds (Tensor): Input softmaxed predictions.\n target (Tensor): Labels.\n\n Raises:\n ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass.\n\n Returns:\n Tuple[FloatTensor, FloatTensor]: [description]\n \"\"\"\n _, _, mode = _input_format_classification(preds, target)\n\n if mode == DataType.BINARY:\n confidences, accuracies = preds, target\n elif mode == DataType.MULTICLASS:\n confidences, predictions = preds.max(dim=1)\n accuracies = predictions.eq(target)\n elif mode == DataType.MULTIDIM_MULTICLASS:\n # reshape tensors\n # for preds, move the class dimension to the final axis and flatten the rest\n confidences, predictions = torch.transpose(preds, 1, -1).flatten(0, -2).max(dim=1)\n # for targets, just flatten the target\n accuracies = predictions.eq(target.flatten())\n else:\n raise ValueError(\n f\"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}.\"\n )\n # must be cast to float for ddp allgather to work\n return confidences.float(), accuracies.float()\n\n\ndef calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = \"l1\") -> Tensor:\n r\"\"\"\n `Computes the Top-label Calibration Error`_\n\n Three different norms are implemented, each corresponding to variations on the calibration error metric.\n\n L1 norm (Expected Calibration Error)\n\n .. math::\n \\text{ECE} = \\frac{1}{N}\\sum_i^N \\|(p_i - c_i)\\|\n\n Infinity norm (Maximum Calibration Error)\n\n .. math::\n \\text{RMSCE} = \\max_{i} (p_i - c_i)\n\n L2 norm (Root Mean Square Calibration Error)\n\n .. math::\n \\text{MCE} = \\frac{1}{N}\\sum_i^N (p_i - c_i)^2\n\n Where :math:`p_i` is the top-1 prediction accuracy in\n bin i and :math:`c_i` is the average confidence of predictions in bin i.\n\n .. note:\n L2-norm debiasing is not yet supported.\n\n Args:\n preds (Tensor): Model output probabilities.\n target (Tensor): Ground-truth target class labels.\n n_bins (int, optional): Number of bins to use when computing t. Defaults to 15.\n norm (str, optional): Norm used to compare empirical and expected probability bins.\n Defaults to \"l1\", or Expected Calibration Error.\n \"\"\"\n if norm not in (\"l1\", \"l2\", \"max\"):\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n if not isinstance(n_bins, int) or n_bins <= 0:\n raise ValueError(f\"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}\")\n\n confidences, accuracies = _ce_update(preds, target)\n\n bin_boundaries = torch.linspace(0, 1, n_bins + 1, dtype=torch.float, device=preds.device)\n\n return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm)\n", "path": "torchmetrics/functional/classification/calibration_error.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, List, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.classification.calibration_error import _ce_compute, _ce_update\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.data import dim_zero_cat\n\n\nclass CalibrationError(Metric):\n r\"\"\"\n\n `Computes the Top-label Calibration Error`_\n Three different norms are implemented, each corresponding to variations on the calibration error metric.\n\n L1 norm (Expected Calibration Error)\n\n .. math::\n \\text{ECE} = \\sum_i^N b_i \\|(p_i - c_i)\\|\n\n Infinity norm (Maximum Calibration Error)\n\n .. math::\n \\text{MCE} = \\max_{i} (p_i - c_i)\n\n L2 norm (Root Mean Square Calibration Error)\n\n .. math::\n \\text{RMSCE} = \\sqrt{\\sum_i^N b_i(p_i - c_i)^2}\n\n Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,\n :math:`c_i` is the average confidence of predictions in bin :math:`i`, and\n :math:`b_i` is the fraction of data points in bin :math:`i`.\n\n .. note::\n L2-norm debiasing is not yet supported.\n\n Args:\n n_bins: Number of bins to use when computing probabilites and accuracies.\n norm: Norm used to compare empirical and expected probability bins.\n Defaults to \"l1\", or Expected Calibration Error.\n debias: Applies debiasing term, only implemented for l2 norm. Defaults to True.\n compute_on_step: Forward only calls ``update()`` and return None if this is set to False.\n dist_sync_on_step: Synchronize metric state across processes at each ``forward()``\n before returning the value at the step\n process_group: Specify the process group on which synchronization is called.\n \"\"\"\n DISTANCES = {\"l1\", \"l2\", \"max\"}\n higher_is_better = False\n confidences: List[Tensor]\n accuracies: List[Tensor]\n\n def __init__(\n self,\n n_bins: int = 15,\n norm: str = \"l1\",\n compute_on_step: bool = False,\n dist_sync_on_step: bool = False,\n process_group: Optional[Any] = None,\n ):\n\n super().__init__(\n compute_on_step=compute_on_step,\n dist_sync_on_step=dist_sync_on_step,\n process_group=process_group,\n dist_sync_fn=None,\n )\n\n if norm not in self.DISTANCES:\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n if not isinstance(n_bins, int) or n_bins <= 0:\n raise ValueError(f\"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}\")\n self.n_bins = n_bins\n self.register_buffer(\"bin_boundaries\", torch.linspace(0, 1, n_bins + 1))\n self.norm = norm\n\n self.add_state(\"confidences\", [], dist_reduce_fx=\"cat\")\n self.add_state(\"accuracies\", [], dist_reduce_fx=\"cat\")\n\n def update(self, preds: Tensor, target: Tensor) -> None: # type: ignore\n \"\"\"Computes top-level confidences and accuracies for the input probabilites and appends them to internal\n state.\n\n Args:\n preds (Tensor): Model output probabilities.\n target (Tensor): Ground-truth target class labels.\n \"\"\"\n confidences, accuracies = _ce_update(preds, target)\n\n self.confidences.append(confidences)\n self.accuracies.append(accuracies)\n\n def compute(self) -> Tensor:\n \"\"\"Computes calibration error across all confidences and accuracies.\n\n Returns:\n Tensor: Calibration error across previously collected examples.\n \"\"\"\n confidences = dim_zero_cat(self.confidences)\n accuracies = dim_zero_cat(self.accuracies)\n return _ce_compute(confidences, accuracies, self.bin_boundaries, norm=self.norm)\n", "path": "torchmetrics/classification/calibration_error.py"}, {"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import FloatTensor, Tensor\n\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import DataType\n\n\ndef _ce_compute(\n confidences: FloatTensor,\n accuracies: FloatTensor,\n bin_boundaries: FloatTensor,\n norm: str = \"l1\",\n debias: bool = False,\n) -> Tensor:\n \"\"\"Computes the calibration error given the provided bin boundaries and norm.\n\n Args:\n confidences (FloatTensor): The confidence (i.e. predicted prob) of the top1 prediction.\n accuracies (FloatTensor): 1.0 if the top-1 prediction was correct, 0.0 otherwise.\n bin_boundaries (FloatTensor): Bin boundaries separating the linspace from 0 to 1.\n norm (str, optional): Norm function to use when computing calibration error. Defaults to \"l1\".\n debias (bool, optional): Apply debiasing to L2 norm computation as in\n `Verified Uncertainty Calibration`_. Defaults to False.\n\n Raises:\n ValueError: If an unsupported norm function is provided.\n\n Returns:\n Tensor: Calibration error scalar.\n \"\"\"\n if norm not in {\"l1\", \"l2\", \"max\"}:\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n conf_bin = torch.zeros_like(bin_boundaries)\n acc_bin = torch.zeros_like(bin_boundaries)\n prop_bin = torch.zeros_like(bin_boundaries)\n for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])):\n # Calculated confidence and accuracy in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n acc_bin[i] = accuracies[in_bin].float().mean()\n conf_bin[i] = confidences[in_bin].mean()\n prop_bin[i] = prop_in_bin\n\n if norm == \"l1\":\n ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)\n elif norm == \"max\":\n ce = torch.max(torch.abs(acc_bin - conf_bin))\n elif norm == \"l2\":\n ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)\n # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.\n if debias:\n # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from\n # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/\n debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)\n ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin\n ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0)\n return ce\n\n\ndef _ce_update(preds: Tensor, target: Tensor) -> Tuple[FloatTensor, FloatTensor]:\n \"\"\"Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their\n correctness.\n\n Args:\n preds (Tensor): Input softmaxed predictions.\n target (Tensor): Labels.\n\n Raises:\n ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass.\n\n Returns:\n Tuple[FloatTensor, FloatTensor]: [description]\n \"\"\"\n _, _, mode = _input_format_classification(preds, target)\n\n if mode == DataType.BINARY:\n confidences, accuracies = preds, target\n elif mode == DataType.MULTICLASS:\n confidences, predictions = preds.max(dim=1)\n accuracies = predictions.eq(target)\n elif mode == DataType.MULTIDIM_MULTICLASS:\n # reshape tensors\n # for preds, move the class dimension to the final axis and flatten the rest\n confidences, predictions = torch.transpose(preds, 1, -1).flatten(0, -2).max(dim=1)\n # for targets, just flatten the target\n accuracies = predictions.eq(target.flatten())\n else:\n raise ValueError(\n f\"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}.\"\n )\n # must be cast to float for ddp allgather to work\n return confidences.float(), accuracies.float()\n\n\ndef calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = \"l1\") -> Tensor:\n r\"\"\"\n `Computes the Top-label Calibration Error`_\n\n Three different norms are implemented, each corresponding to variations on the calibration error metric.\n\n L1 norm (Expected Calibration Error)\n\n .. math::\n \\text{ECE} = \\sum_i^N b_i \\|(p_i - c_i)\\|\n\n Infinity norm (Maximum Calibration Error)\n\n .. math::\n \\text{MCE} = \\max_{i} (p_i - c_i)\n\n L2 norm (Root Mean Square Calibration Error)\n\n .. math::\n \\text{RMSCE} = \\sqrt{\\sum_i^N b_i(p_i - c_i)^2}\n\n Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,\n :math:`c_i` is the average confidence of predictions in bin :math:`i`, and\n :math:`b_i` is the fraction of data points in bin :math:`i`.\n\n .. note:\n L2-norm debiasing is not yet supported.\n\n Args:\n preds (Tensor): Model output probabilities.\n target (Tensor): Ground-truth target class labels.\n n_bins (int, optional): Number of bins to use when computing t. Defaults to 15.\n norm (str, optional): Norm used to compare empirical and expected probability bins.\n Defaults to \"l1\", or Expected Calibration Error.\n \"\"\"\n if norm not in (\"l1\", \"l2\", \"max\"):\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n if not isinstance(n_bins, int) or n_bins <= 0:\n raise ValueError(f\"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}\")\n\n confidences, accuracies = _ce_update(preds, target)\n\n bin_boundaries = torch.linspace(0, 1, n_bins + 1, dtype=torch.float, device=preds.device)\n\n return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm)\n", "path": "torchmetrics/functional/classification/calibration_error.py"}]}
| 3,572 | 784 |
gh_patches_debug_28041
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-5393
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Trigger build on default branch when saving a project
Currently when we save a project form (admin panel),
it triggers a build to latest
https://github.com/rtfd/readthedocs.org/blob/9874b866fea9696fa8495d7b3699f1bf1a3f923d/readthedocs/projects/forms.py#L69-L74
https://github.com/rtfd/readthedocs.org/blob/9874b866fea9696fa8495d7b3699f1bf1a3f923d/readthedocs/core/utils/__init__.py#L97-L98
Even if latest is deactivated, we should trigger a build to the default branch instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/core/utils/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Common utilty functions."""
4
5 from __future__ import absolute_import
6
7 import errno
8 import getpass
9 import logging
10 import os
11 import re
12
13 from django.conf import settings
14 from django.utils.functional import keep_lazy
15 from django.utils.safestring import SafeText, mark_safe
16 from django.utils.text import slugify as slugify_base
17 from celery import group, chord
18
19 from readthedocs.builds.constants import LATEST, BUILD_STATE_TRIGGERED
20 from readthedocs.doc_builder.constants import DOCKER_LIMITS
21
22 log = logging.getLogger(__name__)
23
24 SYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())
25
26
27 def broadcast(type, task, args, kwargs=None, callback=None): # pylint: disable=redefined-builtin
28 """
29 Run a broadcast across our servers.
30
31 Returns a task group that can be checked for results.
32
33 `callback` should be a task signature that will be run once,
34 after all of the broadcast tasks have finished running.
35 """
36 assert type in ['web', 'app', 'build']
37 if kwargs is None:
38 kwargs = {}
39 default_queue = getattr(settings, 'CELERY_DEFAULT_QUEUE', 'celery')
40 if type in ['web', 'app']:
41 servers = getattr(settings, 'MULTIPLE_APP_SERVERS', [default_queue])
42 elif type in ['build']:
43 servers = getattr(settings, 'MULTIPLE_BUILD_SERVERS', [default_queue])
44
45 tasks = []
46 for server in servers:
47 task_sig = task.s(*args, **kwargs).set(queue=server)
48 tasks.append(task_sig)
49 if callback:
50 task_promise = chord(tasks, callback).apply_async()
51 else:
52 # Celery's Group class does some special handling when an iterable with
53 # len() == 1 is passed in. This will be hit if there is only one server
54 # defined in the above queue lists
55 if len(tasks) > 1:
56 task_promise = group(*tasks).apply_async()
57 else:
58 task_promise = group(tasks).apply_async()
59 return task_promise
60
61
62 def prepare_build(
63 project,
64 version=None,
65 record=True,
66 force=False,
67 immutable=True,
68 ):
69 """
70 Prepare a build in a Celery task for project and version.
71
72 If project has a ``build_queue``, execute the task on this build queue. If
73 project has ``skip=True``, the build is not triggered.
74
75 :param project: project's documentation to be built
76 :param version: version of the project to be built. Default: ``latest``
77 :param record: whether or not record the build in a new Build object
78 :param force: build the HTML documentation even if the files haven't changed
79 :param immutable: whether or not create an immutable Celery signature
80 :returns: Celery signature of update_docs_task and Build instance
81 :rtype: tuple
82 """
83 # Avoid circular import
84 from readthedocs.builds.models import Build
85 from readthedocs.projects.models import Project
86 from readthedocs.projects.tasks import update_docs_task
87
88 build = None
89
90 if not Project.objects.is_active(project):
91 log.warning(
92 'Build not triggered because Project is not active: project=%s',
93 project.slug,
94 )
95 return (None, None)
96
97 if not version:
98 version = project.versions.get(slug=LATEST)
99
100 kwargs = {
101 'version_pk': version.pk,
102 'record': record,
103 'force': force,
104 }
105
106 if record:
107 build = Build.objects.create(
108 project=project,
109 version=version,
110 type='html',
111 state=BUILD_STATE_TRIGGERED,
112 success=True,
113 )
114 kwargs['build_pk'] = build.pk
115
116 options = {}
117 if project.build_queue:
118 options['queue'] = project.build_queue
119
120 # Set per-task time limit
121 time_limit = DOCKER_LIMITS['time']
122 try:
123 if project.container_time_limit:
124 time_limit = int(project.container_time_limit)
125 except ValueError:
126 log.warning('Invalid time_limit for project: %s', project.slug)
127
128 # Add 20% overhead to task, to ensure the build can timeout and the task
129 # will cleanly finish.
130 options['soft_time_limit'] = time_limit
131 options['time_limit'] = int(time_limit * 1.2)
132
133 return (
134 update_docs_task.signature(
135 args=(project.pk,),
136 kwargs=kwargs,
137 options=options,
138 immutable=True,
139 ),
140 build,
141 )
142
143
144 def trigger_build(project, version=None, record=True, force=False):
145 """
146 Trigger a Build.
147
148 Helper that calls ``prepare_build`` and just effectively trigger the Celery
149 task to be executed by a worker.
150
151 :param project: project's documentation to be built
152 :param version: version of the project to be built. Default: ``latest``
153 :param record: whether or not record the build in a new Build object
154 :param force: build the HTML documentation even if the files haven't changed
155 :returns: Celery AsyncResult promise and Build instance
156 :rtype: tuple
157 """
158 update_docs_task, build = prepare_build(
159 project,
160 version,
161 record,
162 force,
163 immutable=True,
164 )
165
166 if (update_docs_task, build) == (None, None):
167 # Build was skipped
168 return (None, None)
169
170 return (update_docs_task.apply_async(), build)
171
172
173 def send_email(
174 recipient, subject, template, template_html, context=None, request=None,
175 from_email=None, **kwargs
176 ): # pylint: disable=unused-argument
177 """
178 Alter context passed in and call email send task.
179
180 .. seealso::
181
182 Task :py:func:`readthedocs.core.tasks.send_email_task`
183 Task that handles templating and sending email message
184 """
185 from ..tasks import send_email_task
186
187 if context is None:
188 context = {}
189 context['uri'] = '{scheme}://{host}'.format(
190 scheme='https',
191 host=settings.PRODUCTION_DOMAIN,
192 )
193 send_email_task.delay(
194 recipient=recipient, subject=subject, template=template,
195 template_html=template_html, context=context, from_email=from_email,
196 **kwargs
197 )
198
199
200 @keep_lazy(str, SafeText)
201 def slugify(value, *args, **kwargs):
202 """
203 Add a DNS safe option to slugify.
204
205 :param dns_safe: Remove underscores from slug as well
206 """
207 dns_safe = kwargs.pop('dns_safe', True)
208 value = slugify_base(value, *args, **kwargs)
209 if dns_safe:
210 value = mark_safe(re.sub('[-_]+', '-', value))
211 return value
212
213
214 def safe_makedirs(directory_name):
215 """
216 Safely create a directory.
217
218 Makedirs has an issue where it has a race condition around checking for a
219 directory and then creating it. This catches the exception in the case where
220 the dir already exists.
221 """
222 try:
223 os.makedirs(directory_name)
224 except OSError as e:
225 if e.errno != errno.EEXIST: # 17, FileExistsError
226 raise
227
228
229 def safe_unlink(path):
230 """
231 Unlink ``path`` symlink using ``os.unlink``.
232
233 This helper handles the exception ``FileNotFoundError`` to avoid logging in
234 cases where the symlink does not exist already and there is nothing to
235 unlink.
236
237 :param path: symlink path to unlink
238 :type path: str
239 """
240 try:
241 os.unlink(path)
242 except FileNotFoundError:
243 log.warning('Unlink failed. Path %s does not exists', path)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/core/utils/__init__.py b/readthedocs/core/utils/__init__.py
--- a/readthedocs/core/utils/__init__.py
+++ b/readthedocs/core/utils/__init__.py
@@ -16,7 +16,7 @@
from django.utils.text import slugify as slugify_base
from celery import group, chord
-from readthedocs.builds.constants import LATEST, BUILD_STATE_TRIGGERED
+from readthedocs.builds.constants import BUILD_STATE_TRIGGERED
from readthedocs.doc_builder.constants import DOCKER_LIMITS
log = logging.getLogger(__name__)
@@ -73,7 +73,7 @@
project has ``skip=True``, the build is not triggered.
:param project: project's documentation to be built
- :param version: version of the project to be built. Default: ``latest``
+ :param version: version of the project to be built. Default: ``project.get_default_version()``
:param record: whether or not record the build in a new Build object
:param force: build the HTML documentation even if the files haven't changed
:param immutable: whether or not create an immutable Celery signature
@@ -95,7 +95,8 @@
return (None, None)
if not version:
- version = project.versions.get(slug=LATEST)
+ default_version = project.get_default_version()
+ version = project.versions.get(slug=default_version)
kwargs = {
'version_pk': version.pk,
|
{"golden_diff": "diff --git a/readthedocs/core/utils/__init__.py b/readthedocs/core/utils/__init__.py\n--- a/readthedocs/core/utils/__init__.py\n+++ b/readthedocs/core/utils/__init__.py\n@@ -16,7 +16,7 @@\n from django.utils.text import slugify as slugify_base\n from celery import group, chord\n \n-from readthedocs.builds.constants import LATEST, BUILD_STATE_TRIGGERED\n+from readthedocs.builds.constants import BUILD_STATE_TRIGGERED\n from readthedocs.doc_builder.constants import DOCKER_LIMITS\n \n log = logging.getLogger(__name__)\n@@ -73,7 +73,7 @@\n project has ``skip=True``, the build is not triggered.\n \n :param project: project's documentation to be built\n- :param version: version of the project to be built. Default: ``latest``\n+ :param version: version of the project to be built. Default: ``project.get_default_version()``\n :param record: whether or not record the build in a new Build object\n :param force: build the HTML documentation even if the files haven't changed\n :param immutable: whether or not create an immutable Celery signature\n@@ -95,7 +95,8 @@\n return (None, None)\n \n if not version:\n- version = project.versions.get(slug=LATEST)\n+ default_version = project.get_default_version()\n+ version = project.versions.get(slug=default_version)\n \n kwargs = {\n 'version_pk': version.pk,\n", "issue": "Trigger build on default branch when saving a project\nCurrently when we save a project form (admin panel),\r\nit triggers a build to latest\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/9874b866fea9696fa8495d7b3699f1bf1a3f923d/readthedocs/projects/forms.py#L69-L74\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/9874b866fea9696fa8495d7b3699f1bf1a3f923d/readthedocs/core/utils/__init__.py#L97-L98\r\n\r\nEven if latest is deactivated, we should trigger a build to the default branch instead.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Common utilty functions.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport errno\nimport getpass\nimport logging\nimport os\nimport re\n\nfrom django.conf import settings\nfrom django.utils.functional import keep_lazy\nfrom django.utils.safestring import SafeText, mark_safe\nfrom django.utils.text import slugify as slugify_base\nfrom celery import group, chord\n\nfrom readthedocs.builds.constants import LATEST, BUILD_STATE_TRIGGERED\nfrom readthedocs.doc_builder.constants import DOCKER_LIMITS\n\nlog = logging.getLogger(__name__)\n\nSYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())\n\n\ndef broadcast(type, task, args, kwargs=None, callback=None): # pylint: disable=redefined-builtin\n \"\"\"\n Run a broadcast across our servers.\n\n Returns a task group that can be checked for results.\n\n `callback` should be a task signature that will be run once,\n after all of the broadcast tasks have finished running.\n \"\"\"\n assert type in ['web', 'app', 'build']\n if kwargs is None:\n kwargs = {}\n default_queue = getattr(settings, 'CELERY_DEFAULT_QUEUE', 'celery')\n if type in ['web', 'app']:\n servers = getattr(settings, 'MULTIPLE_APP_SERVERS', [default_queue])\n elif type in ['build']:\n servers = getattr(settings, 'MULTIPLE_BUILD_SERVERS', [default_queue])\n\n tasks = []\n for server in servers:\n task_sig = task.s(*args, **kwargs).set(queue=server)\n tasks.append(task_sig)\n if callback:\n task_promise = chord(tasks, callback).apply_async()\n else:\n # Celery's Group class does some special handling when an iterable with\n # len() == 1 is passed in. This will be hit if there is only one server\n # defined in the above queue lists\n if len(tasks) > 1:\n task_promise = group(*tasks).apply_async()\n else:\n task_promise = group(tasks).apply_async()\n return task_promise\n\n\ndef prepare_build(\n project,\n version=None,\n record=True,\n force=False,\n immutable=True,\n):\n \"\"\"\n Prepare a build in a Celery task for project and version.\n\n If project has a ``build_queue``, execute the task on this build queue. If\n project has ``skip=True``, the build is not triggered.\n\n :param project: project's documentation to be built\n :param version: version of the project to be built. Default: ``latest``\n :param record: whether or not record the build in a new Build object\n :param force: build the HTML documentation even if the files haven't changed\n :param immutable: whether or not create an immutable Celery signature\n :returns: Celery signature of update_docs_task and Build instance\n :rtype: tuple\n \"\"\"\n # Avoid circular import\n from readthedocs.builds.models import Build\n from readthedocs.projects.models import Project\n from readthedocs.projects.tasks import update_docs_task\n\n build = None\n\n if not Project.objects.is_active(project):\n log.warning(\n 'Build not triggered because Project is not active: project=%s',\n project.slug,\n )\n return (None, None)\n\n if not version:\n version = project.versions.get(slug=LATEST)\n\n kwargs = {\n 'version_pk': version.pk,\n 'record': record,\n 'force': force,\n }\n\n if record:\n build = Build.objects.create(\n project=project,\n version=version,\n type='html',\n state=BUILD_STATE_TRIGGERED,\n success=True,\n )\n kwargs['build_pk'] = build.pk\n\n options = {}\n if project.build_queue:\n options['queue'] = project.build_queue\n\n # Set per-task time limit\n time_limit = DOCKER_LIMITS['time']\n try:\n if project.container_time_limit:\n time_limit = int(project.container_time_limit)\n except ValueError:\n log.warning('Invalid time_limit for project: %s', project.slug)\n\n # Add 20% overhead to task, to ensure the build can timeout and the task\n # will cleanly finish.\n options['soft_time_limit'] = time_limit\n options['time_limit'] = int(time_limit * 1.2)\n\n return (\n update_docs_task.signature(\n args=(project.pk,),\n kwargs=kwargs,\n options=options,\n immutable=True,\n ),\n build,\n )\n\n\ndef trigger_build(project, version=None, record=True, force=False):\n \"\"\"\n Trigger a Build.\n\n Helper that calls ``prepare_build`` and just effectively trigger the Celery\n task to be executed by a worker.\n\n :param project: project's documentation to be built\n :param version: version of the project to be built. Default: ``latest``\n :param record: whether or not record the build in a new Build object\n :param force: build the HTML documentation even if the files haven't changed\n :returns: Celery AsyncResult promise and Build instance\n :rtype: tuple\n \"\"\"\n update_docs_task, build = prepare_build(\n project,\n version,\n record,\n force,\n immutable=True,\n )\n\n if (update_docs_task, build) == (None, None):\n # Build was skipped\n return (None, None)\n\n return (update_docs_task.apply_async(), build)\n\n\ndef send_email(\n recipient, subject, template, template_html, context=None, request=None,\n from_email=None, **kwargs\n): # pylint: disable=unused-argument\n \"\"\"\n Alter context passed in and call email send task.\n\n .. seealso::\n\n Task :py:func:`readthedocs.core.tasks.send_email_task`\n Task that handles templating and sending email message\n \"\"\"\n from ..tasks import send_email_task\n\n if context is None:\n context = {}\n context['uri'] = '{scheme}://{host}'.format(\n scheme='https',\n host=settings.PRODUCTION_DOMAIN,\n )\n send_email_task.delay(\n recipient=recipient, subject=subject, template=template,\n template_html=template_html, context=context, from_email=from_email,\n **kwargs\n )\n\n\n@keep_lazy(str, SafeText)\ndef slugify(value, *args, **kwargs):\n \"\"\"\n Add a DNS safe option to slugify.\n\n :param dns_safe: Remove underscores from slug as well\n \"\"\"\n dns_safe = kwargs.pop('dns_safe', True)\n value = slugify_base(value, *args, **kwargs)\n if dns_safe:\n value = mark_safe(re.sub('[-_]+', '-', value))\n return value\n\n\ndef safe_makedirs(directory_name):\n \"\"\"\n Safely create a directory.\n\n Makedirs has an issue where it has a race condition around checking for a\n directory and then creating it. This catches the exception in the case where\n the dir already exists.\n \"\"\"\n try:\n os.makedirs(directory_name)\n except OSError as e:\n if e.errno != errno.EEXIST: # 17, FileExistsError\n raise\n\n\ndef safe_unlink(path):\n \"\"\"\n Unlink ``path`` symlink using ``os.unlink``.\n\n This helper handles the exception ``FileNotFoundError`` to avoid logging in\n cases where the symlink does not exist already and there is nothing to\n unlink.\n\n :param path: symlink path to unlink\n :type path: str\n \"\"\"\n try:\n os.unlink(path)\n except FileNotFoundError:\n log.warning('Unlink failed. Path %s does not exists', path)\n", "path": "readthedocs/core/utils/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Common utilty functions.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport errno\nimport getpass\nimport logging\nimport os\nimport re\n\nfrom django.conf import settings\nfrom django.utils.functional import keep_lazy\nfrom django.utils.safestring import SafeText, mark_safe\nfrom django.utils.text import slugify as slugify_base\nfrom celery import group, chord\n\nfrom readthedocs.builds.constants import BUILD_STATE_TRIGGERED\nfrom readthedocs.doc_builder.constants import DOCKER_LIMITS\n\nlog = logging.getLogger(__name__)\n\nSYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())\n\n\ndef broadcast(type, task, args, kwargs=None, callback=None): # pylint: disable=redefined-builtin\n \"\"\"\n Run a broadcast across our servers.\n\n Returns a task group that can be checked for results.\n\n `callback` should be a task signature that will be run once,\n after all of the broadcast tasks have finished running.\n \"\"\"\n assert type in ['web', 'app', 'build']\n if kwargs is None:\n kwargs = {}\n default_queue = getattr(settings, 'CELERY_DEFAULT_QUEUE', 'celery')\n if type in ['web', 'app']:\n servers = getattr(settings, 'MULTIPLE_APP_SERVERS', [default_queue])\n elif type in ['build']:\n servers = getattr(settings, 'MULTIPLE_BUILD_SERVERS', [default_queue])\n\n tasks = []\n for server in servers:\n task_sig = task.s(*args, **kwargs).set(queue=server)\n tasks.append(task_sig)\n if callback:\n task_promise = chord(tasks, callback).apply_async()\n else:\n # Celery's Group class does some special handling when an iterable with\n # len() == 1 is passed in. This will be hit if there is only one server\n # defined in the above queue lists\n if len(tasks) > 1:\n task_promise = group(*tasks).apply_async()\n else:\n task_promise = group(tasks).apply_async()\n return task_promise\n\n\ndef prepare_build(\n project,\n version=None,\n record=True,\n force=False,\n immutable=True,\n):\n \"\"\"\n Prepare a build in a Celery task for project and version.\n\n If project has a ``build_queue``, execute the task on this build queue. If\n project has ``skip=True``, the build is not triggered.\n\n :param project: project's documentation to be built\n :param version: version of the project to be built. Default: ``project.get_default_version()``\n :param record: whether or not record the build in a new Build object\n :param force: build the HTML documentation even if the files haven't changed\n :param immutable: whether or not create an immutable Celery signature\n :returns: Celery signature of update_docs_task and Build instance\n :rtype: tuple\n \"\"\"\n # Avoid circular import\n from readthedocs.builds.models import Build\n from readthedocs.projects.models import Project\n from readthedocs.projects.tasks import update_docs_task\n\n build = None\n\n if not Project.objects.is_active(project):\n log.warning(\n 'Build not triggered because Project is not active: project=%s',\n project.slug,\n )\n return (None, None)\n\n if not version:\n default_version = project.get_default_version()\n version = project.versions.get(slug=default_version)\n\n kwargs = {\n 'version_pk': version.pk,\n 'record': record,\n 'force': force,\n }\n\n if record:\n build = Build.objects.create(\n project=project,\n version=version,\n type='html',\n state=BUILD_STATE_TRIGGERED,\n success=True,\n )\n kwargs['build_pk'] = build.pk\n\n options = {}\n if project.build_queue:\n options['queue'] = project.build_queue\n\n # Set per-task time limit\n time_limit = DOCKER_LIMITS['time']\n try:\n if project.container_time_limit:\n time_limit = int(project.container_time_limit)\n except ValueError:\n log.warning('Invalid time_limit for project: %s', project.slug)\n\n # Add 20% overhead to task, to ensure the build can timeout and the task\n # will cleanly finish.\n options['soft_time_limit'] = time_limit\n options['time_limit'] = int(time_limit * 1.2)\n\n return (\n update_docs_task.signature(\n args=(project.pk,),\n kwargs=kwargs,\n options=options,\n immutable=True,\n ),\n build,\n )\n\n\ndef trigger_build(project, version=None, record=True, force=False):\n \"\"\"\n Trigger a Build.\n\n Helper that calls ``prepare_build`` and just effectively trigger the Celery\n task to be executed by a worker.\n\n :param project: project's documentation to be built\n :param version: version of the project to be built. Default: ``latest``\n :param record: whether or not record the build in a new Build object\n :param force: build the HTML documentation even if the files haven't changed\n :returns: Celery AsyncResult promise and Build instance\n :rtype: tuple\n \"\"\"\n update_docs_task, build = prepare_build(\n project,\n version,\n record,\n force,\n immutable=True,\n )\n\n if (update_docs_task, build) == (None, None):\n # Build was skipped\n return (None, None)\n\n return (update_docs_task.apply_async(), build)\n\n\ndef send_email(\n recipient, subject, template, template_html, context=None, request=None,\n from_email=None, **kwargs\n): # pylint: disable=unused-argument\n \"\"\"\n Alter context passed in and call email send task.\n\n .. seealso::\n\n Task :py:func:`readthedocs.core.tasks.send_email_task`\n Task that handles templating and sending email message\n \"\"\"\n from ..tasks import send_email_task\n\n if context is None:\n context = {}\n context['uri'] = '{scheme}://{host}'.format(\n scheme='https',\n host=settings.PRODUCTION_DOMAIN,\n )\n send_email_task.delay(\n recipient=recipient, subject=subject, template=template,\n template_html=template_html, context=context, from_email=from_email,\n **kwargs\n )\n\n\n@keep_lazy(str, SafeText)\ndef slugify(value, *args, **kwargs):\n \"\"\"\n Add a DNS safe option to slugify.\n\n :param dns_safe: Remove underscores from slug as well\n \"\"\"\n dns_safe = kwargs.pop('dns_safe', True)\n value = slugify_base(value, *args, **kwargs)\n if dns_safe:\n value = mark_safe(re.sub('[-_]+', '-', value))\n return value\n\n\ndef safe_makedirs(directory_name):\n \"\"\"\n Safely create a directory.\n\n Makedirs has an issue where it has a race condition around checking for a\n directory and then creating it. This catches the exception in the case where\n the dir already exists.\n \"\"\"\n try:\n os.makedirs(directory_name)\n except OSError as e:\n if e.errno != errno.EEXIST: # 17, FileExistsError\n raise\n\n\ndef safe_unlink(path):\n \"\"\"\n Unlink ``path`` symlink using ``os.unlink``.\n\n This helper handles the exception ``FileNotFoundError`` to avoid logging in\n cases where the symlink does not exist already and there is nothing to\n unlink.\n\n :param path: symlink path to unlink\n :type path: str\n \"\"\"\n try:\n os.unlink(path)\n except FileNotFoundError:\n log.warning('Unlink failed. Path %s does not exists', path)\n", "path": "readthedocs/core/utils/__init__.py"}]}
| 2,732 | 330 |
gh_patches_debug_50240
|
rasdani/github-patches
|
git_diff
|
sopel-irc__sopel-1437
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
config: default_timezone has no default
I need to check for side-effects, but there shouldn't be any reason that `core.default_timezone` can't default to `'UTC'`. It currently defaults to `None`, which caused errors when I was testing #1162 just now.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/config/core_section.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals, absolute_import, print_function, division
4
5 import os.path
6
7 from sopel.config.types import (
8 StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,
9 FilenameAttribute, NO_DEFAULT
10 )
11 from sopel.tools import Identifier
12
13
14 def _find_certs():
15 """
16 Find the TLS root CA store.
17
18 :returns: str (path to file)
19 """
20 # check if the root CA store is at a known location
21 locations = [
22 '/etc/pki/tls/cert.pem', # best first guess
23 '/etc/ssl/certs/ca-certificates.crt', # Debian
24 '/etc/ssl/cert.pem', # FreeBSD base OpenSSL
25 '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL
26 '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora
27 '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS
28 '/etc/pki/tls/cacert.pem', # OpenELEC
29 '/etc/ssl/ca-bundle.pem', # OpenSUSE
30 ]
31 for certs in locations:
32 if os.path.isfile(certs):
33 return certs
34 return None
35
36
37 def configure(config):
38 config.core.configure_setting('nick', 'Enter the nickname for your bot.')
39 config.core.configure_setting('host', 'Enter the server to connect to.')
40 config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')
41 if config.core.use_ssl:
42 default_port = 6697
43 else:
44 default_port = 6667
45 config.core.configure_setting('port', 'Enter the port to connect on.',
46 default=default_port)
47 config.core.configure_setting(
48 'owner', "Enter your own IRC name (or that of the bot's owner)")
49 config.core.configure_setting(
50 'channels',
51 'Enter the channels to connect to at startup, separated by commas.'
52 )
53
54
55 class CoreSection(StaticSection):
56 """The config section used for configuring the bot itself."""
57 admins = ListAttribute('admins')
58 """The list of people (other than the owner) who can administer the bot"""
59
60 admin_accounts = ListAttribute('admin_accounts')
61 """The list of accounts (other than the owner's) who can administer the bot.
62
63 This should not be set for networks that do not support IRCv3 account
64 capabilities."""
65
66 alias_nicks = ListAttribute('alias_nicks')
67 """List of alternate names recognized as the bot's nick for $nick and
68 $nickname regex substitutions"""
69
70 auth_method = ChoiceAttribute('auth_method', choices=[
71 'nickserv', 'authserv', 'Q', 'sasl', 'server', 'userserv'])
72 """The method to use to authenticate with the server.
73
74 Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server`` or ``userserv``."""
75
76 auth_password = ValidatedAttribute('auth_password')
77 """The password to use to authenticate with the server."""
78
79 auth_target = ValidatedAttribute('auth_target')
80 """The user to use for nickserv authentication, or the SASL mechanism.
81
82 May not apply, depending on ``auth_method``. Defaults to NickServ for
83 nickserv auth, and PLAIN for SASL auth."""
84
85 auth_username = ValidatedAttribute('auth_username')
86 """The username/account to use to authenticate with the server.
87
88 May not apply, depending on ``auth_method``."""
89
90 bind_host = ValidatedAttribute('bind_host')
91 """Bind the connection to a specific IP"""
92
93 ca_certs = FilenameAttribute('ca_certs', default=_find_certs())
94 """The path of the CA certs pem file"""
95
96 channels = ListAttribute('channels')
97 """List of channels for the bot to join when it connects"""
98
99 db_filename = ValidatedAttribute('db_filename')
100 """The filename for Sopel's database."""
101
102 default_time_format = ValidatedAttribute('default_time_format',
103 default='%Y-%m-%d - %T%Z')
104 """The default format to use for time in messages."""
105
106 default_timezone = ValidatedAttribute('default_timezone')
107 """The default timezone to use for time in messages."""
108
109 enable = ListAttribute('enable')
110 """A whitelist of the only modules you want to enable."""
111
112 exclude = ListAttribute('exclude')
113 """A list of modules which should not be loaded."""
114
115 extra = ListAttribute('extra')
116 """A list of other directories you'd like to include modules from."""
117
118 help_prefix = ValidatedAttribute('help_prefix', default='.')
119 """The prefix to use in help"""
120
121 @property
122 def homedir(self):
123 """The directory in which various files are stored at runtime.
124
125 By default, this is the same directory as the config. It can not be
126 changed at runtime.
127 """
128 return self._parent.homedir
129
130 host = ValidatedAttribute('host', default='irc.dftba.net')
131 """The server to connect to."""
132
133 host_blocks = ListAttribute('host_blocks')
134 """A list of hostmasks which Sopel should ignore.
135
136 Regular expression syntax is used"""
137
138 log_raw = ValidatedAttribute('log_raw', bool, default=False)
139 """Whether a log of raw lines as sent and received should be kept."""
140
141 logdir = FilenameAttribute('logdir', directory=True, default='logs')
142 """Directory in which to place logs."""
143
144 logging_channel = ValidatedAttribute('logging_channel', Identifier)
145 """The channel to send logging messages to."""
146
147 logging_level = ChoiceAttribute('logging_level',
148 ['CRITICAL', 'ERROR', 'WARNING', 'INFO',
149 'DEBUG'],
150 'WARNING')
151 """The lowest severity of logs to display."""
152
153 modes = ValidatedAttribute('modes', default='B')
154 """User modes to be set on connection."""
155
156 name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')
157 """The "real name" of your bot for WHOIS responses."""
158
159 nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))
160 """The nickname for the bot"""
161
162 nick_blocks = ListAttribute('nick_blocks')
163 """A list of nicks which Sopel should ignore.
164
165 Regular expression syntax is used."""
166
167 not_configured = ValidatedAttribute('not_configured', bool, default=False)
168 """For package maintainers. Not used in normal configurations.
169
170 This allows software packages to install a default config file, with this
171 set to true, so that the bot will not run until it has been properly
172 configured."""
173
174 owner = ValidatedAttribute('owner', default=NO_DEFAULT)
175 """The IRC name of the owner of the bot."""
176
177 owner_account = ValidatedAttribute('owner_account')
178 """The services account name of the owner of the bot.
179
180 This should only be set on networks which support IRCv3 account
181 capabilities.
182 """
183
184 pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')
185 """The directory in which to put the file Sopel uses to track its process ID.
186
187 You probably do not need to change this unless you're managing Sopel with
188 systemd or similar."""
189
190 port = ValidatedAttribute('port', int, default=6667)
191 """The port to connect on."""
192
193 prefix = ValidatedAttribute('prefix', default='\\.')
194 """The prefix to add to the beginning of commands.
195
196 It is a regular expression (so the default, ``\\.``, means commands start
197 with a period), though using capturing groups will create problems."""
198
199 reply_errors = ValidatedAttribute('reply_errors', bool, default=True)
200 """Whether to message the sender of a message that triggered an error with the exception."""
201
202 throttle_join = ValidatedAttribute('throttle_join', int)
203 """Slow down the initial join of channels to prevent getting kicked.
204
205 Sopel will only join this many channels at a time, sleeping for a second
206 between each batch. This is unnecessary on most networks."""
207
208 timeout = ValidatedAttribute('timeout', int, default=120)
209 """The amount of time acceptable between pings before timing out."""
210
211 use_ssl = ValidatedAttribute('use_ssl', bool, default=False)
212 """Whether to use a SSL secured connection."""
213
214 user = ValidatedAttribute('user', default='sopel')
215 """The "user" for your bot (the part before the @ in the hostname)."""
216
217 verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)
218 """Whether to require a trusted SSL certificate for SSL connections."""
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sopel/config/core_section.py b/sopel/config/core_section.py
--- a/sopel/config/core_section.py
+++ b/sopel/config/core_section.py
@@ -103,7 +103,7 @@
default='%Y-%m-%d - %T%Z')
"""The default format to use for time in messages."""
- default_timezone = ValidatedAttribute('default_timezone')
+ default_timezone = ValidatedAttribute('default_timezone', default='UTC')
"""The default timezone to use for time in messages."""
enable = ListAttribute('enable')
|
{"golden_diff": "diff --git a/sopel/config/core_section.py b/sopel/config/core_section.py\n--- a/sopel/config/core_section.py\n+++ b/sopel/config/core_section.py\n@@ -103,7 +103,7 @@\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n \n- default_timezone = ValidatedAttribute('default_timezone')\n+ default_timezone = ValidatedAttribute('default_timezone', default='UTC')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n \n enable = ListAttribute('enable')\n", "issue": "config: default_timezone has no default\nI need to check for side-effects, but there shouldn't be any reason that `core.default_timezone` can't default to `'UTC'`. It currently defaults to `None`, which caused errors when I was testing #1162 just now.\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,\n FilenameAttribute, NO_DEFAULT\n)\nfrom sopel.tools import Identifier\n\n\ndef _find_certs():\n \"\"\"\n Find the TLS root CA store.\n\n :returns: str (path to file)\n \"\"\"\n # check if the root CA store is at a known location\n locations = [\n '/etc/pki/tls/cert.pem', # best first guess\n '/etc/ssl/certs/ca-certificates.crt', # Debian\n '/etc/ssl/cert.pem', # FreeBSD base OpenSSL\n '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL\n '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora\n '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS\n '/etc/pki/tls/cacert.pem', # OpenELEC\n '/etc/ssl/ca-bundle.pem', # OpenSUSE\n ]\n for certs in locations:\n if os.path.isfile(certs):\n return certs\n return None\n\n\ndef configure(config):\n config.core.configure_setting('nick', 'Enter the nickname for your bot.')\n config.core.configure_setting('host', 'Enter the server to connect to.')\n config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')\n if config.core.use_ssl:\n default_port = 6697\n else:\n default_port = 6667\n config.core.configure_setting('port', 'Enter the port to connect on.',\n default=default_port)\n config.core.configure_setting(\n 'owner', \"Enter your own IRC name (or that of the bot's owner)\")\n config.core.configure_setting(\n 'channels',\n 'Enter the channels to connect to at startup, separated by commas.'\n )\n\n\nclass CoreSection(StaticSection):\n \"\"\"The config section used for configuring the bot itself.\"\"\"\n admins = ListAttribute('admins')\n \"\"\"The list of people (other than the owner) who can administer the bot\"\"\"\n\n admin_accounts = ListAttribute('admin_accounts')\n \"\"\"The list of accounts (other than the owner's) who can administer the bot.\n\n This should not be set for networks that do not support IRCv3 account\n capabilities.\"\"\"\n\n alias_nicks = ListAttribute('alias_nicks')\n \"\"\"List of alternate names recognized as the bot's nick for $nick and\n $nickname regex substitutions\"\"\"\n\n auth_method = ChoiceAttribute('auth_method', choices=[\n 'nickserv', 'authserv', 'Q', 'sasl', 'server', 'userserv'])\n \"\"\"The method to use to authenticate with the server.\n\n Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server`` or ``userserv``.\"\"\"\n\n auth_password = ValidatedAttribute('auth_password')\n \"\"\"The password to use to authenticate with the server.\"\"\"\n\n auth_target = ValidatedAttribute('auth_target')\n \"\"\"The user to use for nickserv authentication, or the SASL mechanism.\n\n May not apply, depending on ``auth_method``. Defaults to NickServ for\n nickserv auth, and PLAIN for SASL auth.\"\"\"\n\n auth_username = ValidatedAttribute('auth_username')\n \"\"\"The username/account to use to authenticate with the server.\n\n May not apply, depending on ``auth_method``.\"\"\"\n\n bind_host = ValidatedAttribute('bind_host')\n \"\"\"Bind the connection to a specific IP\"\"\"\n\n ca_certs = FilenameAttribute('ca_certs', default=_find_certs())\n \"\"\"The path of the CA certs pem file\"\"\"\n\n channels = ListAttribute('channels')\n \"\"\"List of channels for the bot to join when it connects\"\"\"\n\n db_filename = ValidatedAttribute('db_filename')\n \"\"\"The filename for Sopel's database.\"\"\"\n\n default_time_format = ValidatedAttribute('default_time_format',\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n\n default_timezone = ValidatedAttribute('default_timezone')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n\n enable = ListAttribute('enable')\n \"\"\"A whitelist of the only modules you want to enable.\"\"\"\n\n exclude = ListAttribute('exclude')\n \"\"\"A list of modules which should not be loaded.\"\"\"\n\n extra = ListAttribute('extra')\n \"\"\"A list of other directories you'd like to include modules from.\"\"\"\n\n help_prefix = ValidatedAttribute('help_prefix', default='.')\n \"\"\"The prefix to use in help\"\"\"\n\n @property\n def homedir(self):\n \"\"\"The directory in which various files are stored at runtime.\n\n By default, this is the same directory as the config. It can not be\n changed at runtime.\n \"\"\"\n return self._parent.homedir\n\n host = ValidatedAttribute('host', default='irc.dftba.net')\n \"\"\"The server to connect to.\"\"\"\n\n host_blocks = ListAttribute('host_blocks')\n \"\"\"A list of hostmasks which Sopel should ignore.\n\n Regular expression syntax is used\"\"\"\n\n log_raw = ValidatedAttribute('log_raw', bool, default=False)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n\n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n \"\"\"Directory in which to place logs.\"\"\"\n\n logging_channel = ValidatedAttribute('logging_channel', Identifier)\n \"\"\"The channel to send logging messages to.\"\"\"\n\n logging_level = ChoiceAttribute('logging_level',\n ['CRITICAL', 'ERROR', 'WARNING', 'INFO',\n 'DEBUG'],\n 'WARNING')\n \"\"\"The lowest severity of logs to display.\"\"\"\n\n modes = ValidatedAttribute('modes', default='B')\n \"\"\"User modes to be set on connection.\"\"\"\n\n name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')\n \"\"\"The \"real name\" of your bot for WHOIS responses.\"\"\"\n\n nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))\n \"\"\"The nickname for the bot\"\"\"\n\n nick_blocks = ListAttribute('nick_blocks')\n \"\"\"A list of nicks which Sopel should ignore.\n\n Regular expression syntax is used.\"\"\"\n\n not_configured = ValidatedAttribute('not_configured', bool, default=False)\n \"\"\"For package maintainers. Not used in normal configurations.\n\n This allows software packages to install a default config file, with this\n set to true, so that the bot will not run until it has been properly\n configured.\"\"\"\n\n owner = ValidatedAttribute('owner', default=NO_DEFAULT)\n \"\"\"The IRC name of the owner of the bot.\"\"\"\n\n owner_account = ValidatedAttribute('owner_account')\n \"\"\"The services account name of the owner of the bot.\n\n This should only be set on networks which support IRCv3 account\n capabilities.\n \"\"\"\n\n pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')\n \"\"\"The directory in which to put the file Sopel uses to track its process ID.\n\n You probably do not need to change this unless you're managing Sopel with\n systemd or similar.\"\"\"\n\n port = ValidatedAttribute('port', int, default=6667)\n \"\"\"The port to connect on.\"\"\"\n\n prefix = ValidatedAttribute('prefix', default='\\\\.')\n \"\"\"The prefix to add to the beginning of commands.\n\n It is a regular expression (so the default, ``\\\\.``, means commands start\n with a period), though using capturing groups will create problems.\"\"\"\n\n reply_errors = ValidatedAttribute('reply_errors', bool, default=True)\n \"\"\"Whether to message the sender of a message that triggered an error with the exception.\"\"\"\n\n throttle_join = ValidatedAttribute('throttle_join', int)\n \"\"\"Slow down the initial join of channels to prevent getting kicked.\n\n Sopel will only join this many channels at a time, sleeping for a second\n between each batch. This is unnecessary on most networks.\"\"\"\n\n timeout = ValidatedAttribute('timeout', int, default=120)\n \"\"\"The amount of time acceptable between pings before timing out.\"\"\"\n\n use_ssl = ValidatedAttribute('use_ssl', bool, default=False)\n \"\"\"Whether to use a SSL secured connection.\"\"\"\n\n user = ValidatedAttribute('user', default='sopel')\n \"\"\"The \"user\" for your bot (the part before the @ in the hostname).\"\"\"\n\n verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)\n \"\"\"Whether to require a trusted SSL certificate for SSL connections.\"\"\"\n", "path": "sopel/config/core_section.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport os.path\n\nfrom sopel.config.types import (\n StaticSection, ValidatedAttribute, ListAttribute, ChoiceAttribute,\n FilenameAttribute, NO_DEFAULT\n)\nfrom sopel.tools import Identifier\n\n\ndef _find_certs():\n \"\"\"\n Find the TLS root CA store.\n\n :returns: str (path to file)\n \"\"\"\n # check if the root CA store is at a known location\n locations = [\n '/etc/pki/tls/cert.pem', # best first guess\n '/etc/ssl/certs/ca-certificates.crt', # Debian\n '/etc/ssl/cert.pem', # FreeBSD base OpenSSL\n '/usr/local/openssl/cert.pem', # FreeBSD userland OpenSSL\n '/etc/pki/tls/certs/ca-bundle.crt', # RHEL 6 / Fedora\n '/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', # RHEL 7 / CentOS\n '/etc/pki/tls/cacert.pem', # OpenELEC\n '/etc/ssl/ca-bundle.pem', # OpenSUSE\n ]\n for certs in locations:\n if os.path.isfile(certs):\n return certs\n return None\n\n\ndef configure(config):\n config.core.configure_setting('nick', 'Enter the nickname for your bot.')\n config.core.configure_setting('host', 'Enter the server to connect to.')\n config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?')\n if config.core.use_ssl:\n default_port = 6697\n else:\n default_port = 6667\n config.core.configure_setting('port', 'Enter the port to connect on.',\n default=default_port)\n config.core.configure_setting(\n 'owner', \"Enter your own IRC name (or that of the bot's owner)\")\n config.core.configure_setting(\n 'channels',\n 'Enter the channels to connect to at startup, separated by commas.'\n )\n\n\nclass CoreSection(StaticSection):\n \"\"\"The config section used for configuring the bot itself.\"\"\"\n admins = ListAttribute('admins')\n \"\"\"The list of people (other than the owner) who can administer the bot\"\"\"\n\n admin_accounts = ListAttribute('admin_accounts')\n \"\"\"The list of accounts (other than the owner's) who can administer the bot.\n\n This should not be set for networks that do not support IRCv3 account\n capabilities.\"\"\"\n\n alias_nicks = ListAttribute('alias_nicks')\n \"\"\"List of alternate names recognized as the bot's nick for $nick and\n $nickname regex substitutions\"\"\"\n\n auth_method = ChoiceAttribute('auth_method', choices=[\n 'nickserv', 'authserv', 'Q', 'sasl', 'server', 'userserv'])\n \"\"\"The method to use to authenticate with the server.\n\n Can be ``nickserv``, ``authserv``, ``Q``, ``sasl``, or ``server`` or ``userserv``.\"\"\"\n\n auth_password = ValidatedAttribute('auth_password')\n \"\"\"The password to use to authenticate with the server.\"\"\"\n\n auth_target = ValidatedAttribute('auth_target')\n \"\"\"The user to use for nickserv authentication, or the SASL mechanism.\n\n May not apply, depending on ``auth_method``. Defaults to NickServ for\n nickserv auth, and PLAIN for SASL auth.\"\"\"\n\n auth_username = ValidatedAttribute('auth_username')\n \"\"\"The username/account to use to authenticate with the server.\n\n May not apply, depending on ``auth_method``.\"\"\"\n\n bind_host = ValidatedAttribute('bind_host')\n \"\"\"Bind the connection to a specific IP\"\"\"\n\n ca_certs = FilenameAttribute('ca_certs', default=_find_certs())\n \"\"\"The path of the CA certs pem file\"\"\"\n\n channels = ListAttribute('channels')\n \"\"\"List of channels for the bot to join when it connects\"\"\"\n\n db_filename = ValidatedAttribute('db_filename')\n \"\"\"The filename for Sopel's database.\"\"\"\n\n default_time_format = ValidatedAttribute('default_time_format',\n default='%Y-%m-%d - %T%Z')\n \"\"\"The default format to use for time in messages.\"\"\"\n\n default_timezone = ValidatedAttribute('default_timezone', default='UTC')\n \"\"\"The default timezone to use for time in messages.\"\"\"\n\n enable = ListAttribute('enable')\n \"\"\"A whitelist of the only modules you want to enable.\"\"\"\n\n exclude = ListAttribute('exclude')\n \"\"\"A list of modules which should not be loaded.\"\"\"\n\n extra = ListAttribute('extra')\n \"\"\"A list of other directories you'd like to include modules from.\"\"\"\n\n help_prefix = ValidatedAttribute('help_prefix', default='.')\n \"\"\"The prefix to use in help\"\"\"\n\n @property\n def homedir(self):\n \"\"\"The directory in which various files are stored at runtime.\n\n By default, this is the same directory as the config. It can not be\n changed at runtime.\n \"\"\"\n return self._parent.homedir\n\n host = ValidatedAttribute('host', default='irc.dftba.net')\n \"\"\"The server to connect to.\"\"\"\n\n host_blocks = ListAttribute('host_blocks')\n \"\"\"A list of hostmasks which Sopel should ignore.\n\n Regular expression syntax is used\"\"\"\n\n log_raw = ValidatedAttribute('log_raw', bool, default=False)\n \"\"\"Whether a log of raw lines as sent and received should be kept.\"\"\"\n\n logdir = FilenameAttribute('logdir', directory=True, default='logs')\n \"\"\"Directory in which to place logs.\"\"\"\n\n logging_channel = ValidatedAttribute('logging_channel', Identifier)\n \"\"\"The channel to send logging messages to.\"\"\"\n\n logging_level = ChoiceAttribute('logging_level',\n ['CRITICAL', 'ERROR', 'WARNING', 'INFO',\n 'DEBUG'],\n 'WARNING')\n \"\"\"The lowest severity of logs to display.\"\"\"\n\n modes = ValidatedAttribute('modes', default='B')\n \"\"\"User modes to be set on connection.\"\"\"\n\n name = ValidatedAttribute('name', default='Sopel: https://sopel.chat')\n \"\"\"The \"real name\" of your bot for WHOIS responses.\"\"\"\n\n nick = ValidatedAttribute('nick', Identifier, default=Identifier('Sopel'))\n \"\"\"The nickname for the bot\"\"\"\n\n nick_blocks = ListAttribute('nick_blocks')\n \"\"\"A list of nicks which Sopel should ignore.\n\n Regular expression syntax is used.\"\"\"\n\n not_configured = ValidatedAttribute('not_configured', bool, default=False)\n \"\"\"For package maintainers. Not used in normal configurations.\n\n This allows software packages to install a default config file, with this\n set to true, so that the bot will not run until it has been properly\n configured.\"\"\"\n\n owner = ValidatedAttribute('owner', default=NO_DEFAULT)\n \"\"\"The IRC name of the owner of the bot.\"\"\"\n\n owner_account = ValidatedAttribute('owner_account')\n \"\"\"The services account name of the owner of the bot.\n\n This should only be set on networks which support IRCv3 account\n capabilities.\n \"\"\"\n\n pid_dir = FilenameAttribute('pid_dir', directory=True, default='.')\n \"\"\"The directory in which to put the file Sopel uses to track its process ID.\n\n You probably do not need to change this unless you're managing Sopel with\n systemd or similar.\"\"\"\n\n port = ValidatedAttribute('port', int, default=6667)\n \"\"\"The port to connect on.\"\"\"\n\n prefix = ValidatedAttribute('prefix', default='\\\\.')\n \"\"\"The prefix to add to the beginning of commands.\n\n It is a regular expression (so the default, ``\\\\.``, means commands start\n with a period), though using capturing groups will create problems.\"\"\"\n\n reply_errors = ValidatedAttribute('reply_errors', bool, default=True)\n \"\"\"Whether to message the sender of a message that triggered an error with the exception.\"\"\"\n\n throttle_join = ValidatedAttribute('throttle_join', int)\n \"\"\"Slow down the initial join of channels to prevent getting kicked.\n\n Sopel will only join this many channels at a time, sleeping for a second\n between each batch. This is unnecessary on most networks.\"\"\"\n\n timeout = ValidatedAttribute('timeout', int, default=120)\n \"\"\"The amount of time acceptable between pings before timing out.\"\"\"\n\n use_ssl = ValidatedAttribute('use_ssl', bool, default=False)\n \"\"\"Whether to use a SSL secured connection.\"\"\"\n\n user = ValidatedAttribute('user', default='sopel')\n \"\"\"The \"user\" for your bot (the part before the @ in the hostname).\"\"\"\n\n verify_ssl = ValidatedAttribute('verify_ssl', bool, default=True)\n \"\"\"Whether to require a trusted SSL certificate for SSL connections.\"\"\"\n", "path": "sopel/config/core_section.py"}]}
| 2,788 | 130 |
gh_patches_debug_25467
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-175
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IntegrityError in language tree
I just found a bug causing an `IntegrityError` in the language tree. The error can be reproduced reliably in the current state of the develop branch.
Steps to reproduce:
- In the network admin view:
- Create a new region
- Create at least two languages (in the following steps, we assume the two languages to be German and Englisch, works with any other languages as well)
- In the region view (in the region we just created):
- Create a new language node for the base language (German in this example)
- **Bug occurs in the next steps, therefore I provide a more precise description of the following steps:** in the language tree view, click on "create language tree node"
- Choose "English" as language, "German" as source language, check the checkbox for language activation
- click on "save", a success message should show up
- click on "save" again without changing any form fields
- now the form fields should have the following contents:
- language: "English"
- source language: "German"
- activate language: is checked (`true`)
- change language field to "German", as all languages can be chosen again
- now the form fields should have the following contents:
- language: "German"
- source language: "German"
- activate language: is checked (`true`)
- click on "save" again
- `IntegrityError` occurs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cms/views/language_tree/language_tree_node.py`
Content:
```
1 """
2
3 Returns:
4 [type]: [description]
5 """
6
7 from django.contrib import messages
8 from django.contrib.auth.decorators import login_required
9 from django.contrib.auth.mixins import PermissionRequiredMixin
10 from django.utils.translation import ugettext as _
11 from django.utils.decorators import method_decorator
12 from django.views.generic import TemplateView
13 from django.shortcuts import render, redirect
14
15 from .language_tree_node_form import LanguageTreeNodeForm
16 from ...models import Language, LanguageTreeNode, Site
17 from ...decorators import region_permission_required
18
19
20 @method_decorator(login_required, name='dispatch')
21 @method_decorator(region_permission_required, name='dispatch')
22 class LanguageTreeNodeView(PermissionRequiredMixin, TemplateView):
23 permission_required = 'cms.manage_language_tree'
24 raise_exception = True
25
26 template_name = 'language_tree/tree_node.html'
27 base_context = {'current_menu_item': 'language_tree'}
28
29 def get(self, request, *args, **kwargs):
30 language_tree_node_id = self.kwargs.get('language_tree_node_id')
31 # limit possible parents to nodes of current region
32 parent_queryset = Site.get_current_site(request).language_tree_nodes
33 # limit possible languages to those which are not yet included in the tree
34 language_queryset = Language.objects.exclude(
35 language_tree_nodes__in=parent_queryset.exclude(id=language_tree_node_id)
36 )
37 if language_tree_node_id:
38 language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)
39 children = language_tree_node.get_descendants(include_self=True)
40 parent_queryset = parent_queryset.difference(children)
41 form = LanguageTreeNodeForm(initial={
42 'language': language_tree_node.language,
43 'parent': language_tree_node.parent,
44 'active': language_tree_node.active,
45 })
46 else:
47 form = LanguageTreeNodeForm()
48 form.fields['parent'].queryset = parent_queryset
49 form.fields['language'].queryset = language_queryset
50 return render(request, self.template_name, {
51 'form': form, **self.base_context})
52
53 def post(self, request, site_slug, language_tree_node_id=None):
54 # TODO: error handling
55 form = LanguageTreeNodeForm(data=request.POST, site_slug=site_slug)
56 if form.is_valid():
57 if language_tree_node_id:
58 form.save_language_node(
59 language_tree_node_id=language_tree_node_id,
60 )
61 messages.success(request, _('Language tree node was saved successfully.'))
62 else:
63 language_tree_node = form.save_language_node()
64 messages.success(request, _('Language tree node was created successfully.'))
65 return redirect('edit_language_tree_node', **{
66 'language_tree_node_id': language_tree_node.id,
67 'site_slug': site_slug,
68 })
69 # TODO: improve messages
70 else:
71 messages.error(request, _('Errors have occurred.'))
72
73 return render(request, self.template_name, {
74 'form': form, **self.base_context})
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/cms/views/language_tree/language_tree_node.py b/backend/cms/views/language_tree/language_tree_node.py
--- a/backend/cms/views/language_tree/language_tree_node.py
+++ b/backend/cms/views/language_tree/language_tree_node.py
@@ -55,17 +55,17 @@
form = LanguageTreeNodeForm(data=request.POST, site_slug=site_slug)
if form.is_valid():
if language_tree_node_id:
- form.save_language_node(
+ language_tree_node = form.save_language_node(
language_tree_node_id=language_tree_node_id,
)
messages.success(request, _('Language tree node was saved successfully.'))
else:
language_tree_node = form.save_language_node()
messages.success(request, _('Language tree node was created successfully.'))
- return redirect('edit_language_tree_node', **{
- 'language_tree_node_id': language_tree_node.id,
- 'site_slug': site_slug,
- })
+ return redirect('edit_language_tree_node', **{
+ 'language_tree_node_id': language_tree_node.id,
+ 'site_slug': site_slug,
+ })
# TODO: improve messages
else:
messages.error(request, _('Errors have occurred.'))
|
{"golden_diff": "diff --git a/backend/cms/views/language_tree/language_tree_node.py b/backend/cms/views/language_tree/language_tree_node.py\n--- a/backend/cms/views/language_tree/language_tree_node.py\n+++ b/backend/cms/views/language_tree/language_tree_node.py\n@@ -55,17 +55,17 @@\n form = LanguageTreeNodeForm(data=request.POST, site_slug=site_slug)\n if form.is_valid():\n if language_tree_node_id:\n- form.save_language_node(\n+ language_tree_node = form.save_language_node(\n language_tree_node_id=language_tree_node_id,\n )\n messages.success(request, _('Language tree node was saved successfully.'))\n else:\n language_tree_node = form.save_language_node()\n messages.success(request, _('Language tree node was created successfully.'))\n- return redirect('edit_language_tree_node', **{\n- 'language_tree_node_id': language_tree_node.id,\n- 'site_slug': site_slug,\n- })\n+ return redirect('edit_language_tree_node', **{\n+ 'language_tree_node_id': language_tree_node.id,\n+ 'site_slug': site_slug,\n+ })\n # TODO: improve messages\n else:\n messages.error(request, _('Errors have occurred.'))\n", "issue": "IntegrityError in language tree\nI just found a bug causing an `IntegrityError` in the language tree. The error can be reproduced reliably in the current state of the develop branch.\r\n\r\nSteps to reproduce:\r\n- In the network admin view:\r\n - Create a new region\r\n - Create at least two languages (in the following steps, we assume the two languages to be German and Englisch, works with any other languages as well)\r\n- In the region view (in the region we just created):\r\n - Create a new language node for the base language (German in this example)\r\n - **Bug occurs in the next steps, therefore I provide a more precise description of the following steps:** in the language tree view, click on \"create language tree node\"\r\n - Choose \"English\" as language, \"German\" as source language, check the checkbox for language activation\r\n - click on \"save\", a success message should show up\r\n - click on \"save\" again without changing any form fields\r\n - now the form fields should have the following contents:\r\n - language: \"English\"\r\n - source language: \"German\"\r\n - activate language: is checked (`true`)\r\n - change language field to \"German\", as all languages can be chosen again\r\n - now the form fields should have the following contents:\r\n - language: \"German\"\r\n - source language: \"German\"\r\n - activate language: is checked (`true`)\r\n - click on \"save\" again\r\n - `IntegrityError` occurs\n", "before_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.utils.translation import ugettext as _\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import render, redirect\n\nfrom .language_tree_node_form import LanguageTreeNodeForm\nfrom ...models import Language, LanguageTreeNode, Site\nfrom ...decorators import region_permission_required\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass LanguageTreeNodeView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.manage_language_tree'\n raise_exception = True\n\n template_name = 'language_tree/tree_node.html'\n base_context = {'current_menu_item': 'language_tree'}\n\n def get(self, request, *args, **kwargs):\n language_tree_node_id = self.kwargs.get('language_tree_node_id')\n # limit possible parents to nodes of current region\n parent_queryset = Site.get_current_site(request).language_tree_nodes\n # limit possible languages to those which are not yet included in the tree\n language_queryset = Language.objects.exclude(\n language_tree_nodes__in=parent_queryset.exclude(id=language_tree_node_id)\n )\n if language_tree_node_id:\n language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)\n children = language_tree_node.get_descendants(include_self=True)\n parent_queryset = parent_queryset.difference(children)\n form = LanguageTreeNodeForm(initial={\n 'language': language_tree_node.language,\n 'parent': language_tree_node.parent,\n 'active': language_tree_node.active,\n })\n else:\n form = LanguageTreeNodeForm()\n form.fields['parent'].queryset = parent_queryset\n form.fields['language'].queryset = language_queryset\n return render(request, self.template_name, {\n 'form': form, **self.base_context})\n\n def post(self, request, site_slug, language_tree_node_id=None):\n # TODO: error handling\n form = LanguageTreeNodeForm(data=request.POST, site_slug=site_slug)\n if form.is_valid():\n if language_tree_node_id:\n form.save_language_node(\n language_tree_node_id=language_tree_node_id,\n )\n messages.success(request, _('Language tree node was saved successfully.'))\n else:\n language_tree_node = form.save_language_node()\n messages.success(request, _('Language tree node was created successfully.'))\n return redirect('edit_language_tree_node', **{\n 'language_tree_node_id': language_tree_node.id,\n 'site_slug': site_slug,\n })\n # TODO: improve messages\n else:\n messages.error(request, _('Errors have occurred.'))\n\n return render(request, self.template_name, {\n 'form': form, **self.base_context})\n", "path": "backend/cms/views/language_tree/language_tree_node.py"}], "after_files": [{"content": "\"\"\"\n\nReturns:\n [type]: [description]\n\"\"\"\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.utils.translation import ugettext as _\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import render, redirect\n\nfrom .language_tree_node_form import LanguageTreeNodeForm\nfrom ...models import Language, LanguageTreeNode, Site\nfrom ...decorators import region_permission_required\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass LanguageTreeNodeView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.manage_language_tree'\n raise_exception = True\n\n template_name = 'language_tree/tree_node.html'\n base_context = {'current_menu_item': 'language_tree'}\n\n def get(self, request, *args, **kwargs):\n language_tree_node_id = self.kwargs.get('language_tree_node_id')\n # limit possible parents to nodes of current region\n parent_queryset = Site.get_current_site(request).language_tree_nodes\n # limit possible languages to those which are not yet included in the tree\n language_queryset = Language.objects.exclude(\n language_tree_nodes__in=parent_queryset.exclude(id=language_tree_node_id)\n )\n if language_tree_node_id:\n language_tree_node = LanguageTreeNode.objects.get(id=language_tree_node_id)\n children = language_tree_node.get_descendants(include_self=True)\n parent_queryset = parent_queryset.difference(children)\n form = LanguageTreeNodeForm(initial={\n 'language': language_tree_node.language,\n 'parent': language_tree_node.parent,\n 'active': language_tree_node.active,\n })\n else:\n form = LanguageTreeNodeForm()\n form.fields['parent'].queryset = parent_queryset\n form.fields['language'].queryset = language_queryset\n return render(request, self.template_name, {\n 'form': form, **self.base_context})\n\n def post(self, request, site_slug, language_tree_node_id=None):\n # TODO: error handling\n form = LanguageTreeNodeForm(data=request.POST, site_slug=site_slug)\n if form.is_valid():\n if language_tree_node_id:\n language_tree_node = form.save_language_node(\n language_tree_node_id=language_tree_node_id,\n )\n messages.success(request, _('Language tree node was saved successfully.'))\n else:\n language_tree_node = form.save_language_node()\n messages.success(request, _('Language tree node was created successfully.'))\n return redirect('edit_language_tree_node', **{\n 'language_tree_node_id': language_tree_node.id,\n 'site_slug': site_slug,\n })\n # TODO: improve messages\n else:\n messages.error(request, _('Errors have occurred.'))\n\n return render(request, self.template_name, {\n 'form': form, **self.base_context})\n", "path": "backend/cms/views/language_tree/language_tree_node.py"}]}
| 1,323 | 258 |
gh_patches_debug_6605
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-996
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
websocket.disconnect events are not sent to applications when clients don’t close connections properly (wsproto only)
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
When WebSocket connections are not properly closed by clients, uvicorn + wsproto leaves one zombie asyncio tasks running per connection and do not emit disconnect events to applications.
### To reproduce
1. Start any application with uvicorn using wsproto;
2. Create a websocket connection;
3. Do not close the websocket properly;
4. Observe that no `websocket.disconnect` events have been sent to your application;
5. Check for all asyncio tasks (`asyncio.all_tasks()`): there should be a zombie `run_asgi` task for each non properly closed connection.
I’ve setup this repo with instructions to make it easier to reproduce this issue: https://github.com/sephioh/uvicorn-wsproto-issue
### Expected behavior
A [disconnect](https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event-ws) event should be sent to applications.
### Actual behavior
Disconnect event are not being sent to apps when client socket loss happens.
### Environment
Running uvicorn 0.13.4 with CPython 3.9.0 on Linux
Must be executed with wsproto (—ws wsproto).
### Additional context
https://docs.python.org/3/library/asyncio-protocol.html#asyncio.BaseProtocol.connection_lost
No problems are found when running with websockets impl (—ws websockets).
I’ve opened a PR with a possible fix: https://github.com/encode/uvicorn/pull/996
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/protocols/websockets/wsproto_impl.py`
Content:
```
1 import asyncio
2 import logging
3 from urllib.parse import unquote
4
5 import h11
6 import wsproto
7 from wsproto import ConnectionType, events
8 from wsproto.connection import ConnectionState
9 from wsproto.extensions import PerMessageDeflate
10 from wsproto.utilities import RemoteProtocolError
11
12 from uvicorn.logging import TRACE_LOG_LEVEL
13 from uvicorn.protocols.utils import (
14 get_local_addr,
15 get_path_with_query_string,
16 get_remote_addr,
17 is_ssl,
18 )
19
20
21 class WSProtocol(asyncio.Protocol):
22 def __init__(self, config, server_state, _loop=None):
23 if not config.loaded:
24 config.load()
25
26 self.config = config
27 self.app = config.loaded_app
28 self.loop = _loop or asyncio.get_event_loop()
29 self.logger = logging.getLogger("uvicorn.error")
30 self.root_path = config.root_path
31
32 # Shared server state
33 self.connections = server_state.connections
34 self.tasks = server_state.tasks
35 self.default_headers = server_state.default_headers
36
37 # Connection state
38 self.transport = None
39 self.server = None
40 self.client = None
41 self.scheme = None
42
43 # WebSocket state
44 self.connect_event = None
45 self.queue = asyncio.Queue()
46 self.handshake_complete = False
47 self.close_sent = False
48
49 self.conn = wsproto.WSConnection(connection_type=ConnectionType.SERVER)
50
51 self.read_paused = False
52 self.writable = asyncio.Event()
53 self.writable.set()
54
55 # Buffers
56 self.bytes = b""
57 self.text = ""
58
59 # Protocol interface
60
61 def connection_made(self, transport):
62 self.connections.add(self)
63 self.transport = transport
64 self.server = get_local_addr(transport)
65 self.client = get_remote_addr(transport)
66 self.scheme = "wss" if is_ssl(transport) else "ws"
67
68 if self.logger.level <= TRACE_LOG_LEVEL:
69 prefix = "%s:%d - " % tuple(self.client) if self.client else ""
70 self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix)
71
72 def connection_lost(self, exc):
73 if exc is not None:
74 self.queue.put_nowait({"type": "websocket.disconnect"})
75 self.connections.remove(self)
76
77 if self.logger.level <= TRACE_LOG_LEVEL:
78 prefix = "%s:%d - " % tuple(self.client) if self.client else ""
79 self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection lost", prefix)
80
81 if exc is None:
82 self.transport.close()
83
84 def eof_received(self):
85 pass
86
87 def data_received(self, data):
88 try:
89 self.conn.receive_data(data)
90 except RemoteProtocolError as err:
91 if err.event_hint is not None:
92 self.transport.write(self.conn.send(err.event_hint))
93 self.transport.close()
94 else:
95 self.handle_no_connect(events.CloseConnection())
96 else:
97 self.handle_events()
98
99 def handle_events(self):
100 for event in self.conn.events():
101 if isinstance(event, events.Request):
102 self.handle_connect(event)
103 elif isinstance(event, events.TextMessage):
104 self.handle_text(event)
105 elif isinstance(event, events.BytesMessage):
106 self.handle_bytes(event)
107 elif isinstance(event, events.RejectConnection):
108 self.handle_no_connect(event)
109 elif isinstance(event, events.RejectData):
110 self.handle_no_connect(event)
111 elif isinstance(event, events.CloseConnection):
112 self.handle_close(event)
113 elif isinstance(event, events.Ping):
114 self.handle_ping(event)
115
116 def pause_writing(self):
117 """
118 Called by the transport when the write buffer exceeds the high water mark.
119 """
120 self.writable.clear()
121
122 def resume_writing(self):
123 """
124 Called by the transport when the write buffer drops below the low water mark.
125 """
126 self.writable.set()
127
128 def shutdown(self):
129 self.queue.put_nowait({"type": "websocket.disconnect", "code": 1012})
130 output = self.conn.send(wsproto.events.CloseConnection(code=1012))
131 self.transport.write(output)
132 self.transport.close()
133
134 def on_task_complete(self, task):
135 self.tasks.discard(task)
136
137 # Event handlers
138
139 def handle_connect(self, event):
140 self.connect_event = event
141 headers = [(b"host", event.host.encode())]
142 headers += [(key.lower(), value) for key, value in event.extra_headers]
143 raw_path, _, query_string = event.target.partition("?")
144 self.scope = {
145 "type": "websocket",
146 "asgi": {"version": self.config.asgi_version, "spec_version": "2.3"},
147 "http_version": "1.1",
148 "scheme": self.scheme,
149 "server": self.server,
150 "client": self.client,
151 "root_path": self.root_path,
152 "path": unquote(raw_path),
153 "raw_path": raw_path.encode("ascii"),
154 "query_string": query_string.encode("ascii"),
155 "headers": headers,
156 "subprotocols": event.subprotocols,
157 }
158 self.queue.put_nowait({"type": "websocket.connect"})
159 task = self.loop.create_task(self.run_asgi())
160 task.add_done_callback(self.on_task_complete)
161 self.tasks.add(task)
162
163 def handle_no_connect(self, event):
164 headers = [
165 (b"content-type", b"text/plain; charset=utf-8"),
166 (b"connection", b"close"),
167 ]
168 msg = h11.Response(status_code=400, headers=headers, reason="Bad Request")
169 output = self.conn.send(msg)
170 msg = h11.Data(data=event.reason.encode("utf-8"))
171 output += self.conn.send(msg)
172 msg = h11.EndOfMessage()
173 output += self.conn.send(msg)
174 self.transport.write(output)
175 self.transport.close()
176
177 def handle_text(self, event):
178 self.text += event.data
179 if event.message_finished:
180 self.queue.put_nowait({"type": "websocket.receive", "text": self.text})
181 self.text = ""
182 if not self.read_paused:
183 self.read_paused = True
184 self.transport.pause_reading()
185
186 def handle_bytes(self, event):
187 self.bytes += event.data
188 # todo: we may want to guard the size of self.bytes and self.text
189 if event.message_finished:
190 self.queue.put_nowait({"type": "websocket.receive", "bytes": self.bytes})
191 self.bytes = b""
192 if not self.read_paused:
193 self.read_paused = True
194 self.transport.pause_reading()
195
196 def handle_close(self, event):
197 if self.conn.state == ConnectionState.REMOTE_CLOSING:
198 self.transport.write(self.conn.send(event.response()))
199 self.queue.put_nowait({"type": "websocket.disconnect", "code": event.code})
200 self.transport.close()
201
202 def handle_ping(self, event):
203 self.transport.write(self.conn.send(event.response()))
204
205 def send_500_response(self):
206 headers = [
207 (b"content-type", b"text/plain; charset=utf-8"),
208 (b"connection", b"close"),
209 ]
210 if self.conn.connection is None:
211 output = self.conn.send(wsproto.events.RejectConnection(status_code=500))
212 else:
213 msg = h11.Response(
214 status_code=500, headers=headers, reason="Internal Server Error"
215 )
216 output = self.conn.send(msg)
217 msg = h11.Data(data=b"Internal Server Error")
218 output += self.conn.send(msg)
219 msg = h11.EndOfMessage()
220 output += self.conn.send(msg)
221 self.transport.write(output)
222
223 async def run_asgi(self):
224 try:
225 result = await self.app(self.scope, self.receive, self.send)
226 except BaseException as exc:
227 msg = "Exception in ASGI application\n"
228 self.logger.error(msg, exc_info=exc)
229 if not self.handshake_complete:
230 self.send_500_response()
231 self.transport.close()
232 else:
233 if not self.handshake_complete:
234 msg = "ASGI callable returned without completing handshake."
235 self.logger.error(msg)
236 self.send_500_response()
237 self.transport.close()
238 elif result is not None:
239 msg = "ASGI callable should return None, but returned '%s'."
240 self.logger.error(msg, result)
241 self.transport.close()
242
243 async def send(self, message):
244 await self.writable.wait()
245
246 message_type = message["type"]
247
248 if not self.handshake_complete:
249 if message_type == "websocket.accept":
250 self.logger.info(
251 '%s - "WebSocket %s" [accepted]',
252 self.scope["client"],
253 get_path_with_query_string(self.scope),
254 )
255 self.handshake_complete = True
256 subprotocol = message.get("subprotocol")
257 extra_headers = self.default_headers + list(message.get("headers", []))
258 extensions = []
259 if self.config.ws_per_message_deflate:
260 extensions.append(PerMessageDeflate())
261 output = self.conn.send(
262 wsproto.events.AcceptConnection(
263 subprotocol=subprotocol,
264 extensions=extensions,
265 extra_headers=extra_headers,
266 )
267 )
268 self.transport.write(output)
269
270 elif message_type == "websocket.close":
271 self.queue.put_nowait({"type": "websocket.disconnect", "code": None})
272 self.logger.info(
273 '%s - "WebSocket %s" 403',
274 self.scope["client"],
275 get_path_with_query_string(self.scope),
276 )
277 self.handshake_complete = True
278 self.close_sent = True
279 msg = events.RejectConnection(status_code=403, headers=[])
280 output = self.conn.send(msg)
281 self.transport.write(output)
282 self.transport.close()
283
284 else:
285 msg = (
286 "Expected ASGI message 'websocket.accept' or 'websocket.close', "
287 "but got '%s'."
288 )
289 raise RuntimeError(msg % message_type)
290
291 elif not self.close_sent:
292 if message_type == "websocket.send":
293 bytes_data = message.get("bytes")
294 text_data = message.get("text")
295 data = text_data if bytes_data is None else bytes_data
296 output = self.conn.send(wsproto.events.Message(data=data))
297 if not self.transport.is_closing():
298 self.transport.write(output)
299
300 elif message_type == "websocket.close":
301 self.close_sent = True
302 code = message.get("code", 1000)
303 reason = message.get("reason", "") or ""
304 self.queue.put_nowait({"type": "websocket.disconnect", "code": code})
305 output = self.conn.send(
306 wsproto.events.CloseConnection(code=code, reason=reason)
307 )
308 if not self.transport.is_closing():
309 self.transport.write(output)
310 self.transport.close()
311
312 else:
313 msg = (
314 "Expected ASGI message 'websocket.send' or 'websocket.close',"
315 " but got '%s'."
316 )
317 raise RuntimeError(msg % message_type)
318
319 else:
320 msg = "Unexpected ASGI message '%s', after sending 'websocket.close'."
321 raise RuntimeError(msg % message_type)
322
323 async def receive(self):
324 message = await self.queue.get()
325 if self.read_paused and self.queue.empty():
326 self.read_paused = False
327 self.transport.resume_reading()
328 return message
329
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/protocols/websockets/wsproto_impl.py b/uvicorn/protocols/websockets/wsproto_impl.py
--- a/uvicorn/protocols/websockets/wsproto_impl.py
+++ b/uvicorn/protocols/websockets/wsproto_impl.py
@@ -70,8 +70,7 @@
self.logger.log(TRACE_LOG_LEVEL, "%sWebSocket connection made", prefix)
def connection_lost(self, exc):
- if exc is not None:
- self.queue.put_nowait({"type": "websocket.disconnect"})
+ self.queue.put_nowait({"type": "websocket.disconnect"})
self.connections.remove(self)
if self.logger.level <= TRACE_LOG_LEVEL:
|
{"golden_diff": "diff --git a/uvicorn/protocols/websockets/wsproto_impl.py b/uvicorn/protocols/websockets/wsproto_impl.py\n--- a/uvicorn/protocols/websockets/wsproto_impl.py\n+++ b/uvicorn/protocols/websockets/wsproto_impl.py\n@@ -70,8 +70,7 @@\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection made\", prefix)\n \n def connection_lost(self, exc):\n- if exc is not None:\n- self.queue.put_nowait({\"type\": \"websocket.disconnect\"})\n+ self.queue.put_nowait({\"type\": \"websocket.disconnect\"})\n self.connections.remove(self)\n \n if self.logger.level <= TRACE_LOG_LEVEL:\n", "issue": "websocket.disconnect events are not sent to applications when clients don\u2019t close connections properly (wsproto only)\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nWhen WebSocket connections are not properly closed by clients, uvicorn + wsproto leaves one zombie asyncio tasks running per connection and do not emit disconnect events to applications.\r\n\r\n### To reproduce\r\n1. Start any application with uvicorn using wsproto;\r\n2. Create a websocket connection;\r\n3. Do not close the websocket properly;\r\n4. Observe that no `websocket.disconnect` events have been sent to your application;\r\n5. Check for all asyncio tasks (`asyncio.all_tasks()`): there should be a zombie `run_asgi` task for each non properly closed connection.\r\n\r\nI\u2019ve setup this repo with instructions to make it easier to reproduce this issue: https://github.com/sephioh/uvicorn-wsproto-issue\r\n\r\n### Expected behavior\r\n\r\nA [disconnect](https://asgi.readthedocs.io/en/latest/specs/www.html#disconnect-receive-event-ws) event should be sent to applications. \r\n\r\n### Actual behavior\r\n\r\nDisconnect event are not being sent to apps when client socket loss happens. \r\n\r\n### Environment\r\n\r\nRunning uvicorn 0.13.4 with CPython 3.9.0 on Linux\r\nMust be executed with wsproto (\u2014ws wsproto).\r\n\r\n### Additional context\r\n\r\nhttps://docs.python.org/3/library/asyncio-protocol.html#asyncio.BaseProtocol.connection_lost\r\n\r\nNo problems are found when running with websockets impl (\u2014ws websockets).\r\n\r\nI\u2019ve opened a PR with a possible fix: https://github.com/encode/uvicorn/pull/996\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom urllib.parse import unquote\n\nimport h11\nimport wsproto\nfrom wsproto import ConnectionType, events\nfrom wsproto.connection import ConnectionState\nfrom wsproto.extensions import PerMessageDeflate\nfrom wsproto.utilities import RemoteProtocolError\n\nfrom uvicorn.logging import TRACE_LOG_LEVEL\nfrom uvicorn.protocols.utils import (\n get_local_addr,\n get_path_with_query_string,\n get_remote_addr,\n is_ssl,\n)\n\n\nclass WSProtocol(asyncio.Protocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n self.default_headers = server_state.default_headers\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # WebSocket state\n self.connect_event = None\n self.queue = asyncio.Queue()\n self.handshake_complete = False\n self.close_sent = False\n\n self.conn = wsproto.WSConnection(connection_type=ConnectionType.SERVER)\n\n self.read_paused = False\n self.writable = asyncio.Event()\n self.writable.set()\n\n # Buffers\n self.bytes = b\"\"\n self.text = \"\"\n\n # Protocol interface\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n\n if self.logger.level <= TRACE_LOG_LEVEL:\n prefix = \"%s:%d - \" % tuple(self.client) if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection made\", prefix)\n\n def connection_lost(self, exc):\n if exc is not None:\n self.queue.put_nowait({\"type\": \"websocket.disconnect\"})\n self.connections.remove(self)\n\n if self.logger.level <= TRACE_LOG_LEVEL:\n prefix = \"%s:%d - \" % tuple(self.client) if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection lost\", prefix)\n\n if exc is None:\n self.transport.close()\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n try:\n self.conn.receive_data(data)\n except RemoteProtocolError as err:\n if err.event_hint is not None:\n self.transport.write(self.conn.send(err.event_hint))\n self.transport.close()\n else:\n self.handle_no_connect(events.CloseConnection())\n else:\n self.handle_events()\n\n def handle_events(self):\n for event in self.conn.events():\n if isinstance(event, events.Request):\n self.handle_connect(event)\n elif isinstance(event, events.TextMessage):\n self.handle_text(event)\n elif isinstance(event, events.BytesMessage):\n self.handle_bytes(event)\n elif isinstance(event, events.RejectConnection):\n self.handle_no_connect(event)\n elif isinstance(event, events.RejectData):\n self.handle_no_connect(event)\n elif isinstance(event, events.CloseConnection):\n self.handle_close(event)\n elif isinstance(event, events.Ping):\n self.handle_ping(event)\n\n def pause_writing(self):\n \"\"\"\n Called by the transport when the write buffer exceeds the high water mark.\n \"\"\"\n self.writable.clear()\n\n def resume_writing(self):\n \"\"\"\n Called by the transport when the write buffer drops below the low water mark.\n \"\"\"\n self.writable.set()\n\n def shutdown(self):\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": 1012})\n output = self.conn.send(wsproto.events.CloseConnection(code=1012))\n self.transport.write(output)\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n # Event handlers\n\n def handle_connect(self, event):\n self.connect_event = event\n headers = [(b\"host\", event.host.encode())]\n headers += [(key.lower(), value) for key, value in event.extra_headers]\n raw_path, _, query_string = event.target.partition(\"?\")\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.3\"},\n \"http_version\": \"1.1\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(raw_path),\n \"raw_path\": raw_path.encode(\"ascii\"),\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": headers,\n \"subprotocols\": event.subprotocols,\n }\n self.queue.put_nowait({\"type\": \"websocket.connect\"})\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n\n def handle_no_connect(self, event):\n headers = [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ]\n msg = h11.Response(status_code=400, headers=headers, reason=\"Bad Request\")\n output = self.conn.send(msg)\n msg = h11.Data(data=event.reason.encode(\"utf-8\"))\n output += self.conn.send(msg)\n msg = h11.EndOfMessage()\n output += self.conn.send(msg)\n self.transport.write(output)\n self.transport.close()\n\n def handle_text(self, event):\n self.text += event.data\n if event.message_finished:\n self.queue.put_nowait({\"type\": \"websocket.receive\", \"text\": self.text})\n self.text = \"\"\n if not self.read_paused:\n self.read_paused = True\n self.transport.pause_reading()\n\n def handle_bytes(self, event):\n self.bytes += event.data\n # todo: we may want to guard the size of self.bytes and self.text\n if event.message_finished:\n self.queue.put_nowait({\"type\": \"websocket.receive\", \"bytes\": self.bytes})\n self.bytes = b\"\"\n if not self.read_paused:\n self.read_paused = True\n self.transport.pause_reading()\n\n def handle_close(self, event):\n if self.conn.state == ConnectionState.REMOTE_CLOSING:\n self.transport.write(self.conn.send(event.response()))\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": event.code})\n self.transport.close()\n\n def handle_ping(self, event):\n self.transport.write(self.conn.send(event.response()))\n\n def send_500_response(self):\n headers = [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ]\n if self.conn.connection is None:\n output = self.conn.send(wsproto.events.RejectConnection(status_code=500))\n else:\n msg = h11.Response(\n status_code=500, headers=headers, reason=\"Internal Server Error\"\n )\n output = self.conn.send(msg)\n msg = h11.Data(data=b\"Internal Server Error\")\n output += self.conn.send(msg)\n msg = h11.EndOfMessage()\n output += self.conn.send(msg)\n self.transport.write(output)\n\n async def run_asgi(self):\n try:\n result = await self.app(self.scope, self.receive, self.send)\n except BaseException as exc:\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_complete:\n self.send_500_response()\n self.transport.close()\n else:\n if not self.handshake_complete:\n msg = \"ASGI callable returned without completing handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n self.transport.close()\n\n async def send(self, message):\n await self.writable.wait()\n\n message_type = message[\"type\"]\n\n if not self.handshake_complete:\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.handshake_complete = True\n subprotocol = message.get(\"subprotocol\")\n extra_headers = self.default_headers + list(message.get(\"headers\", []))\n extensions = []\n if self.config.ws_per_message_deflate:\n extensions.append(PerMessageDeflate())\n output = self.conn.send(\n wsproto.events.AcceptConnection(\n subprotocol=subprotocol,\n extensions=extensions,\n extra_headers=extra_headers,\n )\n )\n self.transport.write(output)\n\n elif message_type == \"websocket.close\":\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": None})\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.handshake_complete = True\n self.close_sent = True\n msg = events.RejectConnection(status_code=403, headers=[])\n output = self.conn.send(msg)\n self.transport.write(output)\n self.transport.close()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.close_sent:\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n output = self.conn.send(wsproto.events.Message(data=data))\n if not self.transport.is_closing():\n self.transport.write(output)\n\n elif message_type == \"websocket.close\":\n self.close_sent = True\n code = message.get(\"code\", 1000)\n reason = message.get(\"reason\", \"\") or \"\"\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": code})\n output = self.conn.send(\n wsproto.events.CloseConnection(code=code, reason=reason)\n )\n if not self.transport.is_closing():\n self.transport.write(output)\n self.transport.close()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def receive(self):\n message = await self.queue.get()\n if self.read_paused and self.queue.empty():\n self.read_paused = False\n self.transport.resume_reading()\n return message\n", "path": "uvicorn/protocols/websockets/wsproto_impl.py"}], "after_files": [{"content": "import asyncio\nimport logging\nfrom urllib.parse import unquote\n\nimport h11\nimport wsproto\nfrom wsproto import ConnectionType, events\nfrom wsproto.connection import ConnectionState\nfrom wsproto.extensions import PerMessageDeflate\nfrom wsproto.utilities import RemoteProtocolError\n\nfrom uvicorn.logging import TRACE_LOG_LEVEL\nfrom uvicorn.protocols.utils import (\n get_local_addr,\n get_path_with_query_string,\n get_remote_addr,\n is_ssl,\n)\n\n\nclass WSProtocol(asyncio.Protocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n self.default_headers = server_state.default_headers\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # WebSocket state\n self.connect_event = None\n self.queue = asyncio.Queue()\n self.handshake_complete = False\n self.close_sent = False\n\n self.conn = wsproto.WSConnection(connection_type=ConnectionType.SERVER)\n\n self.read_paused = False\n self.writable = asyncio.Event()\n self.writable.set()\n\n # Buffers\n self.bytes = b\"\"\n self.text = \"\"\n\n # Protocol interface\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n\n if self.logger.level <= TRACE_LOG_LEVEL:\n prefix = \"%s:%d - \" % tuple(self.client) if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection made\", prefix)\n\n def connection_lost(self, exc):\n self.queue.put_nowait({\"type\": \"websocket.disconnect\"})\n self.connections.remove(self)\n\n if self.logger.level <= TRACE_LOG_LEVEL:\n prefix = \"%s:%d - \" % tuple(self.client) if self.client else \"\"\n self.logger.log(TRACE_LOG_LEVEL, \"%sWebSocket connection lost\", prefix)\n\n if exc is None:\n self.transport.close()\n\n def eof_received(self):\n pass\n\n def data_received(self, data):\n try:\n self.conn.receive_data(data)\n except RemoteProtocolError as err:\n if err.event_hint is not None:\n self.transport.write(self.conn.send(err.event_hint))\n self.transport.close()\n else:\n self.handle_no_connect(events.CloseConnection())\n else:\n self.handle_events()\n\n def handle_events(self):\n for event in self.conn.events():\n if isinstance(event, events.Request):\n self.handle_connect(event)\n elif isinstance(event, events.TextMessage):\n self.handle_text(event)\n elif isinstance(event, events.BytesMessage):\n self.handle_bytes(event)\n elif isinstance(event, events.RejectConnection):\n self.handle_no_connect(event)\n elif isinstance(event, events.RejectData):\n self.handle_no_connect(event)\n elif isinstance(event, events.CloseConnection):\n self.handle_close(event)\n elif isinstance(event, events.Ping):\n self.handle_ping(event)\n\n def pause_writing(self):\n \"\"\"\n Called by the transport when the write buffer exceeds the high water mark.\n \"\"\"\n self.writable.clear()\n\n def resume_writing(self):\n \"\"\"\n Called by the transport when the write buffer drops below the low water mark.\n \"\"\"\n self.writable.set()\n\n def shutdown(self):\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": 1012})\n output = self.conn.send(wsproto.events.CloseConnection(code=1012))\n self.transport.write(output)\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n # Event handlers\n\n def handle_connect(self, event):\n self.connect_event = event\n headers = [(b\"host\", event.host.encode())]\n headers += [(key.lower(), value) for key, value in event.extra_headers]\n raw_path, _, query_string = event.target.partition(\"?\")\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.3\"},\n \"http_version\": \"1.1\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(raw_path),\n \"raw_path\": raw_path.encode(\"ascii\"),\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": headers,\n \"subprotocols\": event.subprotocols,\n }\n self.queue.put_nowait({\"type\": \"websocket.connect\"})\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n\n def handle_no_connect(self, event):\n headers = [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ]\n msg = h11.Response(status_code=400, headers=headers, reason=\"Bad Request\")\n output = self.conn.send(msg)\n msg = h11.Data(data=event.reason.encode(\"utf-8\"))\n output += self.conn.send(msg)\n msg = h11.EndOfMessage()\n output += self.conn.send(msg)\n self.transport.write(output)\n self.transport.close()\n\n def handle_text(self, event):\n self.text += event.data\n if event.message_finished:\n self.queue.put_nowait({\"type\": \"websocket.receive\", \"text\": self.text})\n self.text = \"\"\n if not self.read_paused:\n self.read_paused = True\n self.transport.pause_reading()\n\n def handle_bytes(self, event):\n self.bytes += event.data\n # todo: we may want to guard the size of self.bytes and self.text\n if event.message_finished:\n self.queue.put_nowait({\"type\": \"websocket.receive\", \"bytes\": self.bytes})\n self.bytes = b\"\"\n if not self.read_paused:\n self.read_paused = True\n self.transport.pause_reading()\n\n def handle_close(self, event):\n if self.conn.state == ConnectionState.REMOTE_CLOSING:\n self.transport.write(self.conn.send(event.response()))\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": event.code})\n self.transport.close()\n\n def handle_ping(self, event):\n self.transport.write(self.conn.send(event.response()))\n\n def send_500_response(self):\n headers = [\n (b\"content-type\", b\"text/plain; charset=utf-8\"),\n (b\"connection\", b\"close\"),\n ]\n if self.conn.connection is None:\n output = self.conn.send(wsproto.events.RejectConnection(status_code=500))\n else:\n msg = h11.Response(\n status_code=500, headers=headers, reason=\"Internal Server Error\"\n )\n output = self.conn.send(msg)\n msg = h11.Data(data=b\"Internal Server Error\")\n output += self.conn.send(msg)\n msg = h11.EndOfMessage()\n output += self.conn.send(msg)\n self.transport.write(output)\n\n async def run_asgi(self):\n try:\n result = await self.app(self.scope, self.receive, self.send)\n except BaseException as exc:\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_complete:\n self.send_500_response()\n self.transport.close()\n else:\n if not self.handshake_complete:\n msg = \"ASGI callable returned without completing handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n self.transport.close()\n\n async def send(self, message):\n await self.writable.wait()\n\n message_type = message[\"type\"]\n\n if not self.handshake_complete:\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.handshake_complete = True\n subprotocol = message.get(\"subprotocol\")\n extra_headers = self.default_headers + list(message.get(\"headers\", []))\n extensions = []\n if self.config.ws_per_message_deflate:\n extensions.append(PerMessageDeflate())\n output = self.conn.send(\n wsproto.events.AcceptConnection(\n subprotocol=subprotocol,\n extensions=extensions,\n extra_headers=extra_headers,\n )\n )\n self.transport.write(output)\n\n elif message_type == \"websocket.close\":\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": None})\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n get_path_with_query_string(self.scope),\n )\n self.handshake_complete = True\n self.close_sent = True\n msg = events.RejectConnection(status_code=403, headers=[])\n output = self.conn.send(msg)\n self.transport.write(output)\n self.transport.close()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.close_sent:\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n output = self.conn.send(wsproto.events.Message(data=data))\n if not self.transport.is_closing():\n self.transport.write(output)\n\n elif message_type == \"websocket.close\":\n self.close_sent = True\n code = message.get(\"code\", 1000)\n reason = message.get(\"reason\", \"\") or \"\"\n self.queue.put_nowait({\"type\": \"websocket.disconnect\", \"code\": code})\n output = self.conn.send(\n wsproto.events.CloseConnection(code=code, reason=reason)\n )\n if not self.transport.is_closing():\n self.transport.write(output)\n self.transport.close()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def receive(self):\n message = await self.queue.get()\n if self.read_paused and self.queue.empty():\n self.read_paused = False\n self.transport.resume_reading()\n return message\n", "path": "uvicorn/protocols/websockets/wsproto_impl.py"}]}
| 3,947 | 147 |
gh_patches_debug_8589
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-987
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Basis UNet
On what paper is your UNet based?
The original 2D seems to differ quite a lot from the 3D U-Net by Özgün Çiçek which I'd like to use.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `monai/networks/nets/unet.py`
Content:
```
1 # Copyright 2020 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 from typing import Sequence, Union
13
14 import torch
15 import torch.nn as nn
16
17 from monai.networks.blocks.convolutions import Convolution, ResidualUnit
18 from monai.networks.layers.factories import Act, Norm
19 from monai.networks.layers.simplelayers import SkipConnection
20 from monai.utils import alias, export
21
22
23 @export("monai.networks.nets")
24 @alias("Unet")
25 class UNet(nn.Module):
26 def __init__(
27 self,
28 dimensions: int,
29 in_channels: int,
30 out_channels: int,
31 channels: Sequence[int],
32 strides: Sequence[int],
33 kernel_size: Union[Sequence[int], int] = 3,
34 up_kernel_size: Union[Sequence[int], int] = 3,
35 num_res_units: int = 0,
36 act=Act.PRELU,
37 norm=Norm.INSTANCE,
38 dropout=0,
39 ) -> None:
40 """
41 Args:
42 dimensions: number of spatial dimensions.
43 in_channels: number of input channels.
44 out_channels: number of output channels.
45 channels: sequence of channels. Top block first.
46 strides: convolution stride.
47 kernel_size: convolution kernel size. Defaults to 3.
48 up_kernel_size: upsampling convolution kernel size. Defaults to 3.
49 num_res_units: number of residual units. Defaults to 0.
50 act: activation type and arguments. Defaults to PReLU.
51 norm: feature normalization type and arguments. Defaults to instance norm.
52 dropout: dropout ratio. Defaults to no dropout.
53 """
54 super().__init__()
55
56 self.dimensions = dimensions
57 self.in_channels = in_channels
58 self.out_channels = out_channels
59 self.channels = channels
60 self.strides = strides
61 self.kernel_size = kernel_size
62 self.up_kernel_size = up_kernel_size
63 self.num_res_units = num_res_units
64 self.act = act
65 self.norm = norm
66 self.dropout = dropout
67
68 def _create_block(
69 inc: int, outc: int, channels: Sequence[int], strides: Sequence[int], is_top: bool
70 ) -> nn.Sequential:
71 """
72 Builds the UNet structure from the bottom up by recursing down to the bottom block, then creating sequential
73 blocks containing the downsample path, a skip connection around the previous block, and the upsample path.
74
75 Args:
76 inc: number of input channels.
77 outc: number of output channels.
78 channels: sequence of channels. Top block first.
79 strides: convolution stride.
80 is_top: True if this is the top block.
81 """
82 c = channels[0]
83 s = strides[0]
84
85 subblock: Union[nn.Sequential, ResidualUnit, Convolution]
86
87 if len(channels) > 2:
88 subblock = _create_block(c, c, channels[1:], strides[1:], False) # continue recursion down
89 upc = c * 2
90 else:
91 # the next layer is the bottom so stop recursion, create the bottom layer as the sublock for this layer
92 subblock = self._get_bottom_layer(c, channels[1])
93 upc = c + channels[1]
94
95 down = self._get_down_layer(inc, c, s, is_top) # create layer in downsampling path
96 up = self._get_up_layer(upc, outc, s, is_top) # create layer in upsampling path
97
98 return nn.Sequential(down, SkipConnection(subblock), up)
99
100 self.model = _create_block(in_channels, out_channels, self.channels, self.strides, True)
101
102 def _get_down_layer(
103 self, in_channels: int, out_channels: int, strides: int, is_top: bool
104 ) -> Union[ResidualUnit, Convolution]:
105 """
106 Args:
107 in_channels: number of input channels.
108 out_channels: number of output channels.
109 strides: convolution stride.
110 is_top: True if this is the top block.
111 """
112 if self.num_res_units > 0:
113 return ResidualUnit(
114 self.dimensions,
115 in_channels,
116 out_channels,
117 strides=strides,
118 kernel_size=self.kernel_size,
119 subunits=self.num_res_units,
120 act=self.act,
121 norm=self.norm,
122 dropout=self.dropout,
123 )
124 else:
125 return Convolution(
126 self.dimensions,
127 in_channels,
128 out_channels,
129 strides=strides,
130 kernel_size=self.kernel_size,
131 act=self.act,
132 norm=self.norm,
133 dropout=self.dropout,
134 )
135
136 def _get_bottom_layer(self, in_channels: int, out_channels: int) -> Union[ResidualUnit, Convolution]:
137 """
138 Args:
139 in_channels: number of input channels.
140 out_channels: number of output channels.
141 """
142 return self._get_down_layer(in_channels, out_channels, 1, False)
143
144 def _get_up_layer(
145 self, in_channels: int, out_channels: int, strides: int, is_top: bool
146 ) -> Union[Convolution, nn.Sequential]:
147 """
148 Args:
149 in_channels: number of input channels.
150 out_channels: number of output channels.
151 strides: convolution stride.
152 is_top: True if this is the top block.
153 """
154 conv: Union[Convolution, nn.Sequential]
155
156 conv = Convolution(
157 self.dimensions,
158 in_channels,
159 out_channels,
160 strides=strides,
161 kernel_size=self.up_kernel_size,
162 act=self.act,
163 norm=self.norm,
164 dropout=self.dropout,
165 conv_only=is_top and self.num_res_units == 0,
166 is_transposed=True,
167 )
168
169 if self.num_res_units > 0:
170 ru = ResidualUnit(
171 self.dimensions,
172 out_channels,
173 out_channels,
174 strides=1,
175 kernel_size=self.kernel_size,
176 subunits=1,
177 act=self.act,
178 norm=self.norm,
179 dropout=self.dropout,
180 last_conv_only=is_top,
181 )
182 conv = nn.Sequential(conv, ru)
183
184 return conv
185
186 def forward(self, x: torch.Tensor) -> torch.Tensor:
187 x = self.model(x)
188 return x
189
190
191 Unet = unet = UNet
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py
--- a/monai/networks/nets/unet.py
+++ b/monai/networks/nets/unet.py
@@ -38,6 +38,11 @@
dropout=0,
) -> None:
"""
+ Enhanced version of UNet which has residual units implemented with the ResidualUnit class.
+ The residual part uses a convolution to change the input dimensions to match the output dimensions
+ if this is necessary but will use nn.Identity if not.
+ Refer to: https://link.springer.com/chapter/10.1007/978-3-030-12029-0_40.
+
Args:
dimensions: number of spatial dimensions.
in_channels: number of input channels.
|
{"golden_diff": "diff --git a/monai/networks/nets/unet.py b/monai/networks/nets/unet.py\n--- a/monai/networks/nets/unet.py\n+++ b/monai/networks/nets/unet.py\n@@ -38,6 +38,11 @@\n dropout=0,\n ) -> None:\n \"\"\"\n+ Enhanced version of UNet which has residual units implemented with the ResidualUnit class.\n+ The residual part uses a convolution to change the input dimensions to match the output dimensions\n+ if this is necessary but will use nn.Identity if not.\n+ Refer to: https://link.springer.com/chapter/10.1007/978-3-030-12029-0_40.\n+\n Args:\n dimensions: number of spatial dimensions.\n in_channels: number of input channels.\n", "issue": "Basis UNet\nOn what paper is your UNet based?\r\nThe original 2D seems to differ quite a lot from the 3D U-Net by \u00d6zg\u00fcn \u00c7i\u00e7ek which I'd like to use. \r\n\r\nThanks.\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Sequence, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks.convolutions import Convolution, ResidualUnit\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.layers.simplelayers import SkipConnection\nfrom monai.utils import alias, export\n\n\n@export(\"monai.networks.nets\")\n@alias(\"Unet\")\nclass UNet(nn.Module):\n def __init__(\n self,\n dimensions: int,\n in_channels: int,\n out_channels: int,\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Union[Sequence[int], int] = 3,\n up_kernel_size: Union[Sequence[int], int] = 3,\n num_res_units: int = 0,\n act=Act.PRELU,\n norm=Norm.INSTANCE,\n dropout=0,\n ) -> None:\n \"\"\"\n Args:\n dimensions: number of spatial dimensions.\n in_channels: number of input channels.\n out_channels: number of output channels.\n channels: sequence of channels. Top block first.\n strides: convolution stride.\n kernel_size: convolution kernel size. Defaults to 3.\n up_kernel_size: upsampling convolution kernel size. Defaults to 3.\n num_res_units: number of residual units. Defaults to 0.\n act: activation type and arguments. Defaults to PReLU.\n norm: feature normalization type and arguments. Defaults to instance norm.\n dropout: dropout ratio. Defaults to no dropout.\n \"\"\"\n super().__init__()\n\n self.dimensions = dimensions\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.channels = channels\n self.strides = strides\n self.kernel_size = kernel_size\n self.up_kernel_size = up_kernel_size\n self.num_res_units = num_res_units\n self.act = act\n self.norm = norm\n self.dropout = dropout\n\n def _create_block(\n inc: int, outc: int, channels: Sequence[int], strides: Sequence[int], is_top: bool\n ) -> nn.Sequential:\n \"\"\"\n Builds the UNet structure from the bottom up by recursing down to the bottom block, then creating sequential\n blocks containing the downsample path, a skip connection around the previous block, and the upsample path.\n\n Args:\n inc: number of input channels.\n outc: number of output channels.\n channels: sequence of channels. Top block first.\n strides: convolution stride.\n is_top: True if this is the top block.\n \"\"\"\n c = channels[0]\n s = strides[0]\n\n subblock: Union[nn.Sequential, ResidualUnit, Convolution]\n\n if len(channels) > 2:\n subblock = _create_block(c, c, channels[1:], strides[1:], False) # continue recursion down\n upc = c * 2\n else:\n # the next layer is the bottom so stop recursion, create the bottom layer as the sublock for this layer\n subblock = self._get_bottom_layer(c, channels[1])\n upc = c + channels[1]\n\n down = self._get_down_layer(inc, c, s, is_top) # create layer in downsampling path\n up = self._get_up_layer(upc, outc, s, is_top) # create layer in upsampling path\n\n return nn.Sequential(down, SkipConnection(subblock), up)\n\n self.model = _create_block(in_channels, out_channels, self.channels, self.strides, True)\n\n def _get_down_layer(\n self, in_channels: int, out_channels: int, strides: int, is_top: bool\n ) -> Union[ResidualUnit, Convolution]:\n \"\"\"\n Args:\n in_channels: number of input channels.\n out_channels: number of output channels.\n strides: convolution stride.\n is_top: True if this is the top block.\n \"\"\"\n if self.num_res_units > 0:\n return ResidualUnit(\n self.dimensions,\n in_channels,\n out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n subunits=self.num_res_units,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n )\n else:\n return Convolution(\n self.dimensions,\n in_channels,\n out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n )\n\n def _get_bottom_layer(self, in_channels: int, out_channels: int) -> Union[ResidualUnit, Convolution]:\n \"\"\"\n Args:\n in_channels: number of input channels.\n out_channels: number of output channels.\n \"\"\"\n return self._get_down_layer(in_channels, out_channels, 1, False)\n\n def _get_up_layer(\n self, in_channels: int, out_channels: int, strides: int, is_top: bool\n ) -> Union[Convolution, nn.Sequential]:\n \"\"\"\n Args:\n in_channels: number of input channels.\n out_channels: number of output channels.\n strides: convolution stride.\n is_top: True if this is the top block.\n \"\"\"\n conv: Union[Convolution, nn.Sequential]\n\n conv = Convolution(\n self.dimensions,\n in_channels,\n out_channels,\n strides=strides,\n kernel_size=self.up_kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n conv_only=is_top and self.num_res_units == 0,\n is_transposed=True,\n )\n\n if self.num_res_units > 0:\n ru = ResidualUnit(\n self.dimensions,\n out_channels,\n out_channels,\n strides=1,\n kernel_size=self.kernel_size,\n subunits=1,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n last_conv_only=is_top,\n )\n conv = nn.Sequential(conv, ru)\n\n return conv\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.model(x)\n return x\n\n\nUnet = unet = UNet\n", "path": "monai/networks/nets/unet.py"}], "after_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Sequence, Union\n\nimport torch\nimport torch.nn as nn\n\nfrom monai.networks.blocks.convolutions import Convolution, ResidualUnit\nfrom monai.networks.layers.factories import Act, Norm\nfrom monai.networks.layers.simplelayers import SkipConnection\nfrom monai.utils import alias, export\n\n\n@export(\"monai.networks.nets\")\n@alias(\"Unet\")\nclass UNet(nn.Module):\n def __init__(\n self,\n dimensions: int,\n in_channels: int,\n out_channels: int,\n channels: Sequence[int],\n strides: Sequence[int],\n kernel_size: Union[Sequence[int], int] = 3,\n up_kernel_size: Union[Sequence[int], int] = 3,\n num_res_units: int = 0,\n act=Act.PRELU,\n norm=Norm.INSTANCE,\n dropout=0,\n ) -> None:\n \"\"\"\n Enhanced version of UNet which has residual units implemented with the ResidualUnit class.\n The residual part uses a convolution to change the input dimensions to match the output dimensions\n if this is necessary but will use nn.Identity if not.\n Refer to: https://link.springer.com/chapter/10.1007/978-3-030-12029-0_40.\n\n Args:\n dimensions: number of spatial dimensions.\n in_channels: number of input channels.\n out_channels: number of output channels.\n channels: sequence of channels. Top block first.\n strides: convolution stride.\n kernel_size: convolution kernel size. Defaults to 3.\n up_kernel_size: upsampling convolution kernel size. Defaults to 3.\n num_res_units: number of residual units. Defaults to 0.\n act: activation type and arguments. Defaults to PReLU.\n norm: feature normalization type and arguments. Defaults to instance norm.\n dropout: dropout ratio. Defaults to no dropout.\n \"\"\"\n super().__init__()\n\n self.dimensions = dimensions\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.channels = channels\n self.strides = strides\n self.kernel_size = kernel_size\n self.up_kernel_size = up_kernel_size\n self.num_res_units = num_res_units\n self.act = act\n self.norm = norm\n self.dropout = dropout\n\n def _create_block(\n inc: int, outc: int, channels: Sequence[int], strides: Sequence[int], is_top: bool\n ) -> nn.Sequential:\n \"\"\"\n Builds the UNet structure from the bottom up by recursing down to the bottom block, then creating sequential\n blocks containing the downsample path, a skip connection around the previous block, and the upsample path.\n\n Args:\n inc: number of input channels.\n outc: number of output channels.\n channels: sequence of channels. Top block first.\n strides: convolution stride.\n is_top: True if this is the top block.\n \"\"\"\n c = channels[0]\n s = strides[0]\n\n subblock: Union[nn.Sequential, ResidualUnit, Convolution]\n\n if len(channels) > 2:\n subblock = _create_block(c, c, channels[1:], strides[1:], False) # continue recursion down\n upc = c * 2\n else:\n # the next layer is the bottom so stop recursion, create the bottom layer as the sublock for this layer\n subblock = self._get_bottom_layer(c, channels[1])\n upc = c + channels[1]\n\n down = self._get_down_layer(inc, c, s, is_top) # create layer in downsampling path\n up = self._get_up_layer(upc, outc, s, is_top) # create layer in upsampling path\n\n return nn.Sequential(down, SkipConnection(subblock), up)\n\n self.model = _create_block(in_channels, out_channels, self.channels, self.strides, True)\n\n def _get_down_layer(\n self, in_channels: int, out_channels: int, strides: int, is_top: bool\n ) -> Union[ResidualUnit, Convolution]:\n \"\"\"\n Args:\n in_channels: number of input channels.\n out_channels: number of output channels.\n strides: convolution stride.\n is_top: True if this is the top block.\n \"\"\"\n if self.num_res_units > 0:\n return ResidualUnit(\n self.dimensions,\n in_channels,\n out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n subunits=self.num_res_units,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n )\n else:\n return Convolution(\n self.dimensions,\n in_channels,\n out_channels,\n strides=strides,\n kernel_size=self.kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n )\n\n def _get_bottom_layer(self, in_channels: int, out_channels: int) -> Union[ResidualUnit, Convolution]:\n \"\"\"\n Args:\n in_channels: number of input channels.\n out_channels: number of output channels.\n \"\"\"\n return self._get_down_layer(in_channels, out_channels, 1, False)\n\n def _get_up_layer(\n self, in_channels: int, out_channels: int, strides: int, is_top: bool\n ) -> Union[Convolution, nn.Sequential]:\n \"\"\"\n Args:\n in_channels: number of input channels.\n out_channels: number of output channels.\n strides: convolution stride.\n is_top: True if this is the top block.\n \"\"\"\n conv: Union[Convolution, nn.Sequential]\n\n conv = Convolution(\n self.dimensions,\n in_channels,\n out_channels,\n strides=strides,\n kernel_size=self.up_kernel_size,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n conv_only=is_top and self.num_res_units == 0,\n is_transposed=True,\n )\n\n if self.num_res_units > 0:\n ru = ResidualUnit(\n self.dimensions,\n out_channels,\n out_channels,\n strides=1,\n kernel_size=self.kernel_size,\n subunits=1,\n act=self.act,\n norm=self.norm,\n dropout=self.dropout,\n last_conv_only=is_top,\n )\n conv = nn.Sequential(conv, ru)\n\n return conv\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.model(x)\n return x\n\n\nUnet = unet = UNet\n", "path": "monai/networks/nets/unet.py"}]}
| 2,241 | 193 |
gh_patches_debug_43552
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-4202
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.ard_mediathek: rewrite plugin
Resolves #4191
One issue I couldn't fix is the text encoding of the metadata which gets messed up by `validate.parse_html()`. See the VOD title down below...
https://github.com/streamlink/streamlink/blob/175d4748561c7154bb80c5a47dae22039e45d4ce/src/streamlink/utils/parse.py#L54-L55
Some VODs also have a second title, eg. if it's a TV show, but I couldn't be bothered to implement this. Not important.
----
Das Erste - Live:
```
$ streamlink -l debug --title '{author} - {title}' 'https://www.ardmediathek.de/daserste/live/Y3JpZDovL2Rhc2Vyc3RlLmRlL0xpdmVzdHJlYW0tRGFzRXJzdGU/' best
[cli.output][debug] Opening subprocess: mpv "--force-media-title=Das Erste - Das Erste" -
```
WDR - Live:
```
$ streamlink -l debug --title '{author} - {title}' 'https://www.ardmediathek.de/live/Y3JpZDovL3dkci5kZS9CZWl0cmFnLTNkYTY2NGRlLTE4YzItNDY1MC1hNGZmLTRmNjQxNDcyMDcyYg/' best
[cli.output][debug] Opening subprocess: mpv "--force-media-title=WDR - WDR Fernsehen im Livestream" -
```
VOD
```
$ streamlink -l debug --title '{author} - {title}' 'https://www.ardmediathek.de/video/dokus-im-ersten/wirecard-die-milliarden-luege/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3JlcG9ydGFnZSBfIGRva3VtZW50YXRpb24gaW0gZXJzdGVuL2NlMjQ0OWM4LTQ4YTUtNGIyNC1iMTdlLWNhOTNjMDQ5OTc4Zg/' best
[cli.output][debug] Opening subprocess: mpv "--force-media-title=Das Erste - Wirecard - Die Milliarden-Lüge" -
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/ard_mediathek.py`
Content:
```
1 import logging
2 import re
3
4 from streamlink.plugin import Plugin, pluginmatcher
5 from streamlink.plugin.api import validate
6 from streamlink.stream.hls import HLSStream
7
8
9 log = logging.getLogger(__name__)
10
11
12 @pluginmatcher(re.compile(
13 r"https?://(?:(\w+\.)?ardmediathek\.de/|mediathek\.daserste\.de/)"
14 ))
15 class ARDMediathek(Plugin):
16 def _get_streams(self):
17 data_json = self.session.http.get(self.url, schema=validate.Schema(
18 validate.parse_html(),
19 validate.xml_findtext(".//script[@id='fetchedContextValue'][@type='application/json']"),
20 validate.any(None, validate.all(
21 validate.parse_json(),
22 {str: dict},
23 validate.transform(lambda obj: list(obj.items())),
24 validate.filter(lambda item: item[0].startswith("https://api.ardmediathek.de/page-gateway/pages/")),
25 validate.any(validate.get((0, 1)), [])
26 ))
27 ))
28 if not data_json:
29 return
30
31 schema_data = validate.Schema({
32 "id": str,
33 "widgets": validate.all(
34 [dict],
35 validate.filter(lambda item: item.get("mediaCollection")),
36 validate.get(0),
37 {
38 "geoblocked": bool,
39 "publicationService": {
40 "name": str,
41 },
42 "title": str,
43 "mediaCollection": {
44 "embedded": {
45 "_mediaArray": [{
46 "_mediaStreamArray": [{
47 "_quality": validate.any(str, int),
48 "_stream": validate.url()
49 }]
50 }]
51 }
52 }
53 }
54 )
55 })
56 data = schema_data.validate(data_json)
57
58 log.debug(f"Found media id: {data['id']}")
59 data_media = data["widgets"]
60
61 if data_media["geoblocked"]:
62 log.info("The content is not available in your region")
63 return
64
65 self.author = data_media["publicationService"]["name"]
66 self.title = data_media["title"]
67
68 for media in data_media["mediaCollection"]["embedded"]["_mediaArray"]:
69 for stream in media["_mediaStreamArray"]:
70 if stream["_quality"] != "auto" or ".m3u8" not in stream["_stream"]:
71 continue
72 return HLSStream.parse_variant_playlist(self.session, stream["_stream"])
73
74
75 __plugin__ = ARDMediathek
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/ard_mediathek.py b/src/streamlink/plugins/ard_mediathek.py
--- a/src/streamlink/plugins/ard_mediathek.py
+++ b/src/streamlink/plugins/ard_mediathek.py
@@ -4,6 +4,7 @@
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
+from streamlink.stream.http import HTTPStream
log = logging.getLogger(__name__)
@@ -13,6 +14,14 @@
r"https?://(?:(\w+\.)?ardmediathek\.de/|mediathek\.daserste\.de/)"
))
class ARDMediathek(Plugin):
+ _QUALITY_MAP = {
+ 4: "1080p",
+ 3: "720p",
+ 2: "540p",
+ 1: "360p",
+ 0: "270p"
+ }
+
def _get_streams(self):
data_json = self.session.http.get(self.url, schema=validate.Schema(
validate.parse_html(),
@@ -34,42 +43,64 @@
[dict],
validate.filter(lambda item: item.get("mediaCollection")),
validate.get(0),
- {
- "geoblocked": bool,
- "publicationService": {
- "name": str,
+ validate.any(None, validate.all(
+ {
+ "geoblocked": bool,
+ "publicationService": {
+ "name": str,
+ },
+ "show": validate.any(None, validate.all(
+ {"title": str},
+ validate.get("title")
+ )),
+ "title": str,
+ "mediaCollection": {
+ "embedded": {
+ "_mediaArray": [validate.all(
+ {
+ "_mediaStreamArray": [validate.all(
+ {
+ "_quality": validate.any(str, int),
+ "_stream": validate.url(),
+ },
+ validate.union_get("_quality", "_stream")
+ )]
+ },
+ validate.get("_mediaStreamArray"),
+ validate.transform(dict)
+ )]
+ }
+ },
},
- "title": str,
- "mediaCollection": {
- "embedded": {
- "_mediaArray": [{
- "_mediaStreamArray": [{
- "_quality": validate.any(str, int),
- "_stream": validate.url()
- }]
- }]
- }
- }
- }
+ validate.union_get(
+ "geoblocked",
+ ("mediaCollection", "embedded", "_mediaArray", 0),
+ ("publicationService", "name"),
+ "title",
+ "show",
+ )
+ ))
)
})
data = schema_data.validate(data_json)
log.debug(f"Found media id: {data['id']}")
- data_media = data["widgets"]
+ if not data["widgets"]:
+ log.info("The content is unavailable")
+ return
- if data_media["geoblocked"]:
+ geoblocked, media, self.author, self.title, show = data["widgets"]
+ if geoblocked:
log.info("The content is not available in your region")
return
+ if show:
+ self.title = f"{show}: {self.title}"
- self.author = data_media["publicationService"]["name"]
- self.title = data_media["title"]
-
- for media in data_media["mediaCollection"]["embedded"]["_mediaArray"]:
- for stream in media["_mediaStreamArray"]:
- if stream["_quality"] != "auto" or ".m3u8" not in stream["_stream"]:
- continue
- return HLSStream.parse_variant_playlist(self.session, stream["_stream"])
+ if media.get("auto"):
+ yield from HLSStream.parse_variant_playlist(self.session, media.get("auto")).items()
+ else:
+ for quality, stream in media.items():
+ yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)
__plugin__ = ARDMediathek
|
{"golden_diff": "diff --git a/src/streamlink/plugins/ard_mediathek.py b/src/streamlink/plugins/ard_mediathek.py\n--- a/src/streamlink/plugins/ard_mediathek.py\n+++ b/src/streamlink/plugins/ard_mediathek.py\n@@ -4,6 +4,7 @@\n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n from streamlink.stream.hls import HLSStream\n+from streamlink.stream.http import HTTPStream\n \n \n log = logging.getLogger(__name__)\n@@ -13,6 +14,14 @@\n r\"https?://(?:(\\w+\\.)?ardmediathek\\.de/|mediathek\\.daserste\\.de/)\"\n ))\n class ARDMediathek(Plugin):\n+ _QUALITY_MAP = {\n+ 4: \"1080p\",\n+ 3: \"720p\",\n+ 2: \"540p\",\n+ 1: \"360p\",\n+ 0: \"270p\"\n+ }\n+\n def _get_streams(self):\n data_json = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n@@ -34,42 +43,64 @@\n [dict],\n validate.filter(lambda item: item.get(\"mediaCollection\")),\n validate.get(0),\n- {\n- \"geoblocked\": bool,\n- \"publicationService\": {\n- \"name\": str,\n+ validate.any(None, validate.all(\n+ {\n+ \"geoblocked\": bool,\n+ \"publicationService\": {\n+ \"name\": str,\n+ },\n+ \"show\": validate.any(None, validate.all(\n+ {\"title\": str},\n+ validate.get(\"title\")\n+ )),\n+ \"title\": str,\n+ \"mediaCollection\": {\n+ \"embedded\": {\n+ \"_mediaArray\": [validate.all(\n+ {\n+ \"_mediaStreamArray\": [validate.all(\n+ {\n+ \"_quality\": validate.any(str, int),\n+ \"_stream\": validate.url(),\n+ },\n+ validate.union_get(\"_quality\", \"_stream\")\n+ )]\n+ },\n+ validate.get(\"_mediaStreamArray\"),\n+ validate.transform(dict)\n+ )]\n+ }\n+ },\n },\n- \"title\": str,\n- \"mediaCollection\": {\n- \"embedded\": {\n- \"_mediaArray\": [{\n- \"_mediaStreamArray\": [{\n- \"_quality\": validate.any(str, int),\n- \"_stream\": validate.url()\n- }]\n- }]\n- }\n- }\n- }\n+ validate.union_get(\n+ \"geoblocked\",\n+ (\"mediaCollection\", \"embedded\", \"_mediaArray\", 0),\n+ (\"publicationService\", \"name\"),\n+ \"title\",\n+ \"show\",\n+ )\n+ ))\n )\n })\n data = schema_data.validate(data_json)\n \n log.debug(f\"Found media id: {data['id']}\")\n- data_media = data[\"widgets\"]\n+ if not data[\"widgets\"]:\n+ log.info(\"The content is unavailable\")\n+ return\n \n- if data_media[\"geoblocked\"]:\n+ geoblocked, media, self.author, self.title, show = data[\"widgets\"]\n+ if geoblocked:\n log.info(\"The content is not available in your region\")\n return\n+ if show:\n+ self.title = f\"{show}: {self.title}\"\n \n- self.author = data_media[\"publicationService\"][\"name\"]\n- self.title = data_media[\"title\"]\n-\n- for media in data_media[\"mediaCollection\"][\"embedded\"][\"_mediaArray\"]:\n- for stream in media[\"_mediaStreamArray\"]:\n- if stream[\"_quality\"] != \"auto\" or \".m3u8\" not in stream[\"_stream\"]:\n- continue\n- return HLSStream.parse_variant_playlist(self.session, stream[\"_stream\"])\n+ if media.get(\"auto\"):\n+ yield from HLSStream.parse_variant_playlist(self.session, media.get(\"auto\")).items()\n+ else:\n+ for quality, stream in media.items():\n+ yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)\n \n \n __plugin__ = ARDMediathek\n", "issue": "plugins.ard_mediathek: rewrite plugin\nResolves #4191 \r\n\r\nOne issue I couldn't fix is the text encoding of the metadata which gets messed up by `validate.parse_html()`. See the VOD title down below...\r\nhttps://github.com/streamlink/streamlink/blob/175d4748561c7154bb80c5a47dae22039e45d4ce/src/streamlink/utils/parse.py#L54-L55\r\n\r\nSome VODs also have a second title, eg. if it's a TV show, but I couldn't be bothered to implement this. Not important.\r\n\r\n----\r\n\r\nDas Erste - Live:\r\n```\r\n$ streamlink -l debug --title '{author} - {title}' 'https://www.ardmediathek.de/daserste/live/Y3JpZDovL2Rhc2Vyc3RlLmRlL0xpdmVzdHJlYW0tRGFzRXJzdGU/' best\r\n[cli.output][debug] Opening subprocess: mpv \"--force-media-title=Das Erste - Das Erste\" -\r\n```\r\n\r\nWDR - Live:\r\n```\r\n$ streamlink -l debug --title '{author} - {title}' 'https://www.ardmediathek.de/live/Y3JpZDovL3dkci5kZS9CZWl0cmFnLTNkYTY2NGRlLTE4YzItNDY1MC1hNGZmLTRmNjQxNDcyMDcyYg/' best\r\n[cli.output][debug] Opening subprocess: mpv \"--force-media-title=WDR - WDR Fernsehen im Livestream\" -\r\n```\r\n\r\nVOD\r\n```\r\n$ streamlink -l debug --title '{author} - {title}' 'https://www.ardmediathek.de/video/dokus-im-ersten/wirecard-die-milliarden-luege/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3JlcG9ydGFnZSBfIGRva3VtZW50YXRpb24gaW0gZXJzdGVuL2NlMjQ0OWM4LTQ4YTUtNGIyNC1iMTdlLWNhOTNjMDQ5OTc4Zg/' best\r\n[cli.output][debug] Opening subprocess: mpv \"--force-media-title=Das Erste - Wirecard - Die Milliarden-L\u00c3\u00bcge\" -\r\n```\n", "before_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:(\\w+\\.)?ardmediathek\\.de/|mediathek\\.daserste\\.de/)\"\n))\nclass ARDMediathek(Plugin):\n def _get_streams(self):\n data_json = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_findtext(\".//script[@id='fetchedContextValue'][@type='application/json']\"),\n validate.any(None, validate.all(\n validate.parse_json(),\n {str: dict},\n validate.transform(lambda obj: list(obj.items())),\n validate.filter(lambda item: item[0].startswith(\"https://api.ardmediathek.de/page-gateway/pages/\")),\n validate.any(validate.get((0, 1)), [])\n ))\n ))\n if not data_json:\n return\n\n schema_data = validate.Schema({\n \"id\": str,\n \"widgets\": validate.all(\n [dict],\n validate.filter(lambda item: item.get(\"mediaCollection\")),\n validate.get(0),\n {\n \"geoblocked\": bool,\n \"publicationService\": {\n \"name\": str,\n },\n \"title\": str,\n \"mediaCollection\": {\n \"embedded\": {\n \"_mediaArray\": [{\n \"_mediaStreamArray\": [{\n \"_quality\": validate.any(str, int),\n \"_stream\": validate.url()\n }]\n }]\n }\n }\n }\n )\n })\n data = schema_data.validate(data_json)\n\n log.debug(f\"Found media id: {data['id']}\")\n data_media = data[\"widgets\"]\n\n if data_media[\"geoblocked\"]:\n log.info(\"The content is not available in your region\")\n return\n\n self.author = data_media[\"publicationService\"][\"name\"]\n self.title = data_media[\"title\"]\n\n for media in data_media[\"mediaCollection\"][\"embedded\"][\"_mediaArray\"]:\n for stream in media[\"_mediaStreamArray\"]:\n if stream[\"_quality\"] != \"auto\" or \".m3u8\" not in stream[\"_stream\"]:\n continue\n return HLSStream.parse_variant_playlist(self.session, stream[\"_stream\"])\n\n\n__plugin__ = ARDMediathek\n", "path": "src/streamlink/plugins/ard_mediathek.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.stream.http import HTTPStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:(\\w+\\.)?ardmediathek\\.de/|mediathek\\.daserste\\.de/)\"\n))\nclass ARDMediathek(Plugin):\n _QUALITY_MAP = {\n 4: \"1080p\",\n 3: \"720p\",\n 2: \"540p\",\n 1: \"360p\",\n 0: \"270p\"\n }\n\n def _get_streams(self):\n data_json = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_findtext(\".//script[@id='fetchedContextValue'][@type='application/json']\"),\n validate.any(None, validate.all(\n validate.parse_json(),\n {str: dict},\n validate.transform(lambda obj: list(obj.items())),\n validate.filter(lambda item: item[0].startswith(\"https://api.ardmediathek.de/page-gateway/pages/\")),\n validate.any(validate.get((0, 1)), [])\n ))\n ))\n if not data_json:\n return\n\n schema_data = validate.Schema({\n \"id\": str,\n \"widgets\": validate.all(\n [dict],\n validate.filter(lambda item: item.get(\"mediaCollection\")),\n validate.get(0),\n validate.any(None, validate.all(\n {\n \"geoblocked\": bool,\n \"publicationService\": {\n \"name\": str,\n },\n \"show\": validate.any(None, validate.all(\n {\"title\": str},\n validate.get(\"title\")\n )),\n \"title\": str,\n \"mediaCollection\": {\n \"embedded\": {\n \"_mediaArray\": [validate.all(\n {\n \"_mediaStreamArray\": [validate.all(\n {\n \"_quality\": validate.any(str, int),\n \"_stream\": validate.url(),\n },\n validate.union_get(\"_quality\", \"_stream\")\n )]\n },\n validate.get(\"_mediaStreamArray\"),\n validate.transform(dict)\n )]\n }\n },\n },\n validate.union_get(\n \"geoblocked\",\n (\"mediaCollection\", \"embedded\", \"_mediaArray\", 0),\n (\"publicationService\", \"name\"),\n \"title\",\n \"show\",\n )\n ))\n )\n })\n data = schema_data.validate(data_json)\n\n log.debug(f\"Found media id: {data['id']}\")\n if not data[\"widgets\"]:\n log.info(\"The content is unavailable\")\n return\n\n geoblocked, media, self.author, self.title, show = data[\"widgets\"]\n if geoblocked:\n log.info(\"The content is not available in your region\")\n return\n if show:\n self.title = f\"{show}: {self.title}\"\n\n if media.get(\"auto\"):\n yield from HLSStream.parse_variant_playlist(self.session, media.get(\"auto\")).items()\n else:\n for quality, stream in media.items():\n yield self._QUALITY_MAP.get(quality, quality), HTTPStream(self.session, stream)\n\n\n__plugin__ = ARDMediathek\n", "path": "src/streamlink/plugins/ard_mediathek.py"}]}
| 1,483 | 922 |
gh_patches_debug_10487
|
rasdani/github-patches
|
git_diff
|
magenta__magenta-1365
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DRUMS_RNN: when trying to run --eval on drums_rnn I get "ValueError: invalid every_n_iter=0."
Some very strange behavior here.
I am comparing lstm behavior here to that of [LSTMetallica](https://github.com/keunwoochoi/LSTMetallica). So using the same midi library of metallica drums, I went through the normal drums_rnn pipeline:
1. I created a tfrecord (notesequence) of the entire metallica midi library
2. I turned that notesequence into a training and validation split of 10 percent
3. I started a training on the training sequence on the training data
4. I started a validation (--eval) on the eval data and IMMEDIATELY get a valueerror:
```
Traceback (most recent call last):
File ".\drums_rnn_train.py", line 113, in <module>
console_entry_point()
File ".\drums_rnn_train.py", line 109, in console_entry_point
tf.app.run(main)
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\platform\app.py", line 124, in run
_sys.exit(main(argv))
File ".\drums_rnn_train.py", line 99, in main
events_rnn_train.run_eval(build_graph_fn, train_dir, eval_dir, num_batches)
File "C:\Program Files\Python35\lib\site-packages\magenta\models\shared\events_rnn_train.py", line 114, in run_eval
EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),
File "C:\Program Files\Python35\lib\site-packages\tensorflow\python\training\basic_session_run_hooks.py", line 209, in __init__
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
ValueError: invalid every_n_iter=0.
```
I've played around with the training and the evaluation percentages, and if I do 40%!!! VALIDATION data then it doesn't give that error any more, but that is way too much data for validation. (the tfrecord size for training data is 103 megs and the the eval data is 75 megs).
The rest of the data sets i've tried this on has similar properties, which led me to believe that perhaps these other data sets were too small. But a weird quirk is that if I used a much smaller data set (training set size of about 20megs, and eval data of 10megs), it would still give the same error on using the validation data to run the eval, but it doesn't fail if i used the training data for evaluation. So somehow a 20 meg file of training data for a smaller data set works validation but it fails for this metallica data set if I don't give it at least 70 megs of evaluation data. Is there just a weird problem with "drums_rnn_create_dataset"
Any ideas?
Thanks in advance
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `magenta/models/shared/events_rnn_train.py`
Content:
```
1 # Copyright 2016 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Train and evaluate an event sequence RNN model."""
15
16 import tensorflow as tf
17
18
19 def run_training(build_graph_fn, train_dir, num_training_steps=None,
20 summary_frequency=10, save_checkpoint_secs=60,
21 checkpoints_to_keep=10, keep_checkpoint_every_n_hours=1,
22 master='', task=0, num_ps_tasks=0):
23 """Runs the training loop.
24
25 Args:
26 build_graph_fn: A function that builds the graph ops.
27 train_dir: The path to the directory where checkpoints and summary events
28 will be written to.
29 num_training_steps: The number of steps to train for before exiting.
30 summary_frequency: The number of steps between each summary. A summary is
31 when graph values from the last step are logged to the console and
32 written to disk.
33 save_checkpoint_secs: The frequency at which to save checkpoints, in
34 seconds.
35 checkpoints_to_keep: The number of most recent checkpoints to keep in
36 `train_dir`. Keeps all if set to 0.
37 keep_checkpoint_every_n_hours: Keep a checkpoint every N hours, even if it
38 results in more checkpoints than checkpoints_to_keep.
39 master: URL of the Tensorflow master.
40 task: Task number for this worker.
41 num_ps_tasks: Number of parameter server tasks.
42 """
43 with tf.Graph().as_default():
44 with tf.device(tf.train.replica_device_setter(num_ps_tasks)):
45 build_graph_fn()
46
47 global_step = tf.train.get_or_create_global_step()
48 loss = tf.get_collection('loss')[0]
49 perplexity = tf.get_collection('metrics/perplexity')[0]
50 accuracy = tf.get_collection('metrics/accuracy')[0]
51 train_op = tf.get_collection('train_op')[0]
52
53 logging_dict = {
54 'Global Step': global_step,
55 'Loss': loss,
56 'Perplexity': perplexity,
57 'Accuracy': accuracy
58 }
59 hooks = [
60 tf.train.NanTensorHook(loss),
61 tf.train.LoggingTensorHook(
62 logging_dict, every_n_iter=summary_frequency),
63 tf.train.StepCounterHook(
64 output_dir=train_dir, every_n_steps=summary_frequency)
65 ]
66 if num_training_steps:
67 hooks.append(tf.train.StopAtStepHook(num_training_steps))
68
69 scaffold = tf.train.Scaffold(
70 saver=tf.train.Saver(
71 max_to_keep=checkpoints_to_keep,
72 keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))
73
74 tf.logging.info('Starting training loop...')
75 tf.contrib.training.train(
76 train_op=train_op,
77 logdir=train_dir,
78 scaffold=scaffold,
79 hooks=hooks,
80 save_checkpoint_secs=save_checkpoint_secs,
81 save_summaries_steps=summary_frequency,
82 master=master,
83 is_chief=task == 0)
84 tf.logging.info('Training complete.')
85
86
87 # TODO(adarob): Limit to a single epoch each evaluation step.
88 def run_eval(build_graph_fn, train_dir, eval_dir, num_batches,
89 timeout_secs=300):
90 """Runs the training loop.
91
92 Args:
93 build_graph_fn: A function that builds the graph ops.
94 train_dir: The path to the directory where checkpoints will be loaded
95 from for evaluation.
96 eval_dir: The path to the directory where the evaluation summary events
97 will be written to.
98 num_batches: The number of full batches to use for each evaluation step.
99 timeout_secs: The number of seconds after which to stop waiting for a new
100 checkpoint.
101 """
102 with tf.Graph().as_default():
103 build_graph_fn()
104
105 global_step = tf.train.get_or_create_global_step()
106 loss = tf.get_collection('loss')[0]
107 perplexity = tf.get_collection('metrics/perplexity')[0]
108 accuracy = tf.get_collection('metrics/accuracy')[0]
109 eval_ops = tf.get_collection('eval_ops')
110
111 logging_dict = {
112 'Global Step': global_step,
113 'Loss': loss,
114 'Perplexity': perplexity,
115 'Accuracy': accuracy
116 }
117 hooks = [
118 EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),
119 tf.contrib.training.StopAfterNEvalsHook(num_batches),
120 tf.contrib.training.SummaryAtEndHook(eval_dir),
121 ]
122
123 tf.contrib.training.evaluate_repeatedly(
124 train_dir,
125 eval_ops=eval_ops,
126 hooks=hooks,
127 eval_interval_secs=60,
128 timeout=timeout_secs)
129
130
131 class EvalLoggingTensorHook(tf.train.LoggingTensorHook):
132 """A revised version of LoggingTensorHook to use during evaluation.
133
134 This version supports being reset and increments `_iter_count` before run
135 instead of after run.
136 """
137
138 def begin(self):
139 # Reset timer.
140 self._timer.update_last_triggered_step(0)
141 super(EvalLoggingTensorHook, self).begin()
142
143 def before_run(self, run_context):
144 self._iter_count += 1
145 return super(EvalLoggingTensorHook, self).before_run(run_context)
146
147 def after_run(self, run_context, run_values):
148 super(EvalLoggingTensorHook, self).after_run(run_context, run_values)
149 self._iter_count -= 1
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/magenta/models/shared/events_rnn_train.py b/magenta/models/shared/events_rnn_train.py
--- a/magenta/models/shared/events_rnn_train.py
+++ b/magenta/models/shared/events_rnn_train.py
@@ -98,7 +98,13 @@
num_batches: The number of full batches to use for each evaluation step.
timeout_secs: The number of seconds after which to stop waiting for a new
checkpoint.
+ Raises:
+ ValueError: If `num_batches` is less than or equal to 0.
"""
+ if num_batches <= 0:
+ raise ValueError(
+ '`num_batches` must be greater than 0. Check that the batch size is '
+ 'no larger than the number of records in the eval set.')
with tf.Graph().as_default():
build_graph_fn()
|
{"golden_diff": "diff --git a/magenta/models/shared/events_rnn_train.py b/magenta/models/shared/events_rnn_train.py\n--- a/magenta/models/shared/events_rnn_train.py\n+++ b/magenta/models/shared/events_rnn_train.py\n@@ -98,7 +98,13 @@\n num_batches: The number of full batches to use for each evaluation step.\n timeout_secs: The number of seconds after which to stop waiting for a new\n checkpoint.\n+ Raises:\n+ ValueError: If `num_batches` is less than or equal to 0.\n \"\"\"\n+ if num_batches <= 0:\n+ raise ValueError(\n+ '`num_batches` must be greater than 0. Check that the batch size is '\n+ 'no larger than the number of records in the eval set.')\n with tf.Graph().as_default():\n build_graph_fn()\n", "issue": "DRUMS_RNN: when trying to run --eval on drums_rnn I get \"ValueError: invalid every_n_iter=0.\"\nSome very strange behavior here.\r\n\r\nI am comparing lstm behavior here to that of [LSTMetallica](https://github.com/keunwoochoi/LSTMetallica). So using the same midi library of metallica drums, I went through the normal drums_rnn pipeline:\r\n\r\n1. I created a tfrecord (notesequence) of the entire metallica midi library\r\n2. I turned that notesequence into a training and validation split of 10 percent\r\n3. I started a training on the training sequence on the training data\r\n4. I started a validation (--eval) on the eval data and IMMEDIATELY get a valueerror:\r\n```\r\nTraceback (most recent call last):\r\n File \".\\drums_rnn_train.py\", line 113, in <module>\r\n console_entry_point()\r\n File \".\\drums_rnn_train.py\", line 109, in console_entry_point\r\n tf.app.run(main)\r\n File \"C:\\Program Files\\Python35\\lib\\site-packages\\tensorflow\\python\\platform\\app.py\", line 124, in run\r\n _sys.exit(main(argv))\r\n File \".\\drums_rnn_train.py\", line 99, in main\r\n events_rnn_train.run_eval(build_graph_fn, train_dir, eval_dir, num_batches)\r\n File \"C:\\Program Files\\Python35\\lib\\site-packages\\magenta\\models\\shared\\events_rnn_train.py\", line 114, in run_eval\r\n EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),\r\n File \"C:\\Program Files\\Python35\\lib\\site-packages\\tensorflow\\python\\training\\basic_session_run_hooks.py\", line 209, in __init__\r\n raise ValueError(\"invalid every_n_iter=%s.\" % every_n_iter)\r\nValueError: invalid every_n_iter=0.\r\n\r\n```\r\n\r\nI've played around with the training and the evaluation percentages, and if I do 40%!!! VALIDATION data then it doesn't give that error any more, but that is way too much data for validation. (the tfrecord size for training data is 103 megs and the the eval data is 75 megs). \r\n\r\nThe rest of the data sets i've tried this on has similar properties, which led me to believe that perhaps these other data sets were too small. But a weird quirk is that if I used a much smaller data set (training set size of about 20megs, and eval data of 10megs), it would still give the same error on using the validation data to run the eval, but it doesn't fail if i used the training data for evaluation. So somehow a 20 meg file of training data for a smaller data set works validation but it fails for this metallica data set if I don't give it at least 70 megs of evaluation data. Is there just a weird problem with \"drums_rnn_create_dataset\"\r\n\r\nAny ideas?\r\n\r\nThanks in advance\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Train and evaluate an event sequence RNN model.\"\"\"\n\nimport tensorflow as tf\n\n\ndef run_training(build_graph_fn, train_dir, num_training_steps=None,\n summary_frequency=10, save_checkpoint_secs=60,\n checkpoints_to_keep=10, keep_checkpoint_every_n_hours=1,\n master='', task=0, num_ps_tasks=0):\n \"\"\"Runs the training loop.\n\n Args:\n build_graph_fn: A function that builds the graph ops.\n train_dir: The path to the directory where checkpoints and summary events\n will be written to.\n num_training_steps: The number of steps to train for before exiting.\n summary_frequency: The number of steps between each summary. A summary is\n when graph values from the last step are logged to the console and\n written to disk.\n save_checkpoint_secs: The frequency at which to save checkpoints, in\n seconds.\n checkpoints_to_keep: The number of most recent checkpoints to keep in\n `train_dir`. Keeps all if set to 0.\n keep_checkpoint_every_n_hours: Keep a checkpoint every N hours, even if it\n results in more checkpoints than checkpoints_to_keep.\n master: URL of the Tensorflow master.\n task: Task number for this worker.\n num_ps_tasks: Number of parameter server tasks.\n \"\"\"\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(num_ps_tasks)):\n build_graph_fn()\n\n global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection('loss')[0]\n perplexity = tf.get_collection('metrics/perplexity')[0]\n accuracy = tf.get_collection('metrics/accuracy')[0]\n train_op = tf.get_collection('train_op')[0]\n\n logging_dict = {\n 'Global Step': global_step,\n 'Loss': loss,\n 'Perplexity': perplexity,\n 'Accuracy': accuracy\n }\n hooks = [\n tf.train.NanTensorHook(loss),\n tf.train.LoggingTensorHook(\n logging_dict, every_n_iter=summary_frequency),\n tf.train.StepCounterHook(\n output_dir=train_dir, every_n_steps=summary_frequency)\n ]\n if num_training_steps:\n hooks.append(tf.train.StopAtStepHook(num_training_steps))\n\n scaffold = tf.train.Scaffold(\n saver=tf.train.Saver(\n max_to_keep=checkpoints_to_keep,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))\n\n tf.logging.info('Starting training loop...')\n tf.contrib.training.train(\n train_op=train_op,\n logdir=train_dir,\n scaffold=scaffold,\n hooks=hooks,\n save_checkpoint_secs=save_checkpoint_secs,\n save_summaries_steps=summary_frequency,\n master=master,\n is_chief=task == 0)\n tf.logging.info('Training complete.')\n\n\n# TODO(adarob): Limit to a single epoch each evaluation step.\ndef run_eval(build_graph_fn, train_dir, eval_dir, num_batches,\n timeout_secs=300):\n \"\"\"Runs the training loop.\n\n Args:\n build_graph_fn: A function that builds the graph ops.\n train_dir: The path to the directory where checkpoints will be loaded\n from for evaluation.\n eval_dir: The path to the directory where the evaluation summary events\n will be written to.\n num_batches: The number of full batches to use for each evaluation step.\n timeout_secs: The number of seconds after which to stop waiting for a new\n checkpoint.\n \"\"\"\n with tf.Graph().as_default():\n build_graph_fn()\n\n global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection('loss')[0]\n perplexity = tf.get_collection('metrics/perplexity')[0]\n accuracy = tf.get_collection('metrics/accuracy')[0]\n eval_ops = tf.get_collection('eval_ops')\n\n logging_dict = {\n 'Global Step': global_step,\n 'Loss': loss,\n 'Perplexity': perplexity,\n 'Accuracy': accuracy\n }\n hooks = [\n EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),\n tf.contrib.training.StopAfterNEvalsHook(num_batches),\n tf.contrib.training.SummaryAtEndHook(eval_dir),\n ]\n\n tf.contrib.training.evaluate_repeatedly(\n train_dir,\n eval_ops=eval_ops,\n hooks=hooks,\n eval_interval_secs=60,\n timeout=timeout_secs)\n\n\nclass EvalLoggingTensorHook(tf.train.LoggingTensorHook):\n \"\"\"A revised version of LoggingTensorHook to use during evaluation.\n\n This version supports being reset and increments `_iter_count` before run\n instead of after run.\n \"\"\"\n\n def begin(self):\n # Reset timer.\n self._timer.update_last_triggered_step(0)\n super(EvalLoggingTensorHook, self).begin()\n\n def before_run(self, run_context):\n self._iter_count += 1\n return super(EvalLoggingTensorHook, self).before_run(run_context)\n\n def after_run(self, run_context, run_values):\n super(EvalLoggingTensorHook, self).after_run(run_context, run_values)\n self._iter_count -= 1\n", "path": "magenta/models/shared/events_rnn_train.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Train and evaluate an event sequence RNN model.\"\"\"\n\nimport tensorflow as tf\n\n\ndef run_training(build_graph_fn, train_dir, num_training_steps=None,\n summary_frequency=10, save_checkpoint_secs=60,\n checkpoints_to_keep=10, keep_checkpoint_every_n_hours=1,\n master='', task=0, num_ps_tasks=0):\n \"\"\"Runs the training loop.\n\n Args:\n build_graph_fn: A function that builds the graph ops.\n train_dir: The path to the directory where checkpoints and summary events\n will be written to.\n num_training_steps: The number of steps to train for before exiting.\n summary_frequency: The number of steps between each summary. A summary is\n when graph values from the last step are logged to the console and\n written to disk.\n save_checkpoint_secs: The frequency at which to save checkpoints, in\n seconds.\n checkpoints_to_keep: The number of most recent checkpoints to keep in\n `train_dir`. Keeps all if set to 0.\n keep_checkpoint_every_n_hours: Keep a checkpoint every N hours, even if it\n results in more checkpoints than checkpoints_to_keep.\n master: URL of the Tensorflow master.\n task: Task number for this worker.\n num_ps_tasks: Number of parameter server tasks.\n \"\"\"\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(num_ps_tasks)):\n build_graph_fn()\n\n global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection('loss')[0]\n perplexity = tf.get_collection('metrics/perplexity')[0]\n accuracy = tf.get_collection('metrics/accuracy')[0]\n train_op = tf.get_collection('train_op')[0]\n\n logging_dict = {\n 'Global Step': global_step,\n 'Loss': loss,\n 'Perplexity': perplexity,\n 'Accuracy': accuracy\n }\n hooks = [\n tf.train.NanTensorHook(loss),\n tf.train.LoggingTensorHook(\n logging_dict, every_n_iter=summary_frequency),\n tf.train.StepCounterHook(\n output_dir=train_dir, every_n_steps=summary_frequency)\n ]\n if num_training_steps:\n hooks.append(tf.train.StopAtStepHook(num_training_steps))\n\n scaffold = tf.train.Scaffold(\n saver=tf.train.Saver(\n max_to_keep=checkpoints_to_keep,\n keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours))\n\n tf.logging.info('Starting training loop...')\n tf.contrib.training.train(\n train_op=train_op,\n logdir=train_dir,\n scaffold=scaffold,\n hooks=hooks,\n save_checkpoint_secs=save_checkpoint_secs,\n save_summaries_steps=summary_frequency,\n master=master,\n is_chief=task == 0)\n tf.logging.info('Training complete.')\n\n\n# TODO(adarob): Limit to a single epoch each evaluation step.\ndef run_eval(build_graph_fn, train_dir, eval_dir, num_batches,\n timeout_secs=300):\n \"\"\"Runs the training loop.\n\n Args:\n build_graph_fn: A function that builds the graph ops.\n train_dir: The path to the directory where checkpoints will be loaded\n from for evaluation.\n eval_dir: The path to the directory where the evaluation summary events\n will be written to.\n num_batches: The number of full batches to use for each evaluation step.\n timeout_secs: The number of seconds after which to stop waiting for a new\n checkpoint.\n Raises:\n ValueError: If `num_batches` is less than or equal to 0.\n \"\"\"\n if num_batches <= 0:\n raise ValueError(\n '`num_batches` must be greater than 0. Check that the batch size is '\n 'no larger than the number of records in the eval set.')\n with tf.Graph().as_default():\n build_graph_fn()\n\n global_step = tf.train.get_or_create_global_step()\n loss = tf.get_collection('loss')[0]\n perplexity = tf.get_collection('metrics/perplexity')[0]\n accuracy = tf.get_collection('metrics/accuracy')[0]\n eval_ops = tf.get_collection('eval_ops')\n\n logging_dict = {\n 'Global Step': global_step,\n 'Loss': loss,\n 'Perplexity': perplexity,\n 'Accuracy': accuracy\n }\n hooks = [\n EvalLoggingTensorHook(logging_dict, every_n_iter=num_batches),\n tf.contrib.training.StopAfterNEvalsHook(num_batches),\n tf.contrib.training.SummaryAtEndHook(eval_dir),\n ]\n\n tf.contrib.training.evaluate_repeatedly(\n train_dir,\n eval_ops=eval_ops,\n hooks=hooks,\n eval_interval_secs=60,\n timeout=timeout_secs)\n\n\nclass EvalLoggingTensorHook(tf.train.LoggingTensorHook):\n \"\"\"A revised version of LoggingTensorHook to use during evaluation.\n\n This version supports being reset and increments `_iter_count` before run\n instead of after run.\n \"\"\"\n\n def begin(self):\n # Reset timer.\n self._timer.update_last_triggered_step(0)\n super(EvalLoggingTensorHook, self).begin()\n\n def before_run(self, run_context):\n self._iter_count += 1\n return super(EvalLoggingTensorHook, self).before_run(run_context)\n\n def after_run(self, run_context, run_values):\n super(EvalLoggingTensorHook, self).after_run(run_context, run_values)\n self._iter_count -= 1\n", "path": "magenta/models/shared/events_rnn_train.py"}]}
| 2,496 | 179 |
gh_patches_debug_44084
|
rasdani/github-patches
|
git_diff
|
Gallopsled__pwntools-1998
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pwn.shellcraft.linkat second argument is incorrect
https://github.com/Gallopsled/pwntools/blob/bd12d1874f17e1fd6a9b26411ccc7ccd6c31f4cb/pwnlib/shellcraft/templates/common/linux/syscalls/linkat.asm#L30
`from` should be `from_`. The current implementation makes it not possible to pass a string.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/data/syscalls/generate.py`
Content:
```
1 #!/usr/bin/env python2
2 from __future__ import division
3 import argparse
4 import keyword
5 import os
6
7 from pwnlib import constants
8 from pwnlib.context import context
9
10 # github.com/zachriggle/functions
11 from functions import functions, Function, Argument
12
13 ARCHITECTURES = ['i386', 'amd64', 'arm', 'aarch64', 'mips']
14
15 HEADER = '''
16 <%
17 import collections
18 import pwnlib.abi
19 import pwnlib.constants
20 import pwnlib.shellcraft
21 import six
22 %>
23 '''
24
25 DOCSTRING = '''
26 <%docstring>{name}({arguments_comma_separated}) -> str
27
28 Invokes the syscall {name}.
29
30 See 'man 2 {name}' for more information.
31
32 Arguments:
33 {arg_docs}
34 Returns:
35 {return_type}
36 </%docstring>
37 '''
38
39 ARGUMENTS = """
40 <%page args="{arguments_default_values}"/>
41 """
42
43 CALL = """
44 <%
45 abi = pwnlib.abi.ABI.syscall()
46 stack = abi.stack
47 regs = abi.register_arguments[1:]
48 allregs = pwnlib.shellcraft.registers.current()
49
50 can_pushstr = {string_arguments!r}
51 can_pushstr_array = {array_arguments!r}
52
53 argument_names = {argument_names!r}
54 argument_values = [{arguments_comma_separated!s}]
55
56 # Load all of the arguments into their destination registers / stack slots.
57 register_arguments = dict()
58 stack_arguments = collections.OrderedDict()
59 string_arguments = dict()
60 dict_arguments = dict()
61 array_arguments = dict()
62 syscall_repr = []
63
64 for name, arg in zip(argument_names, argument_values):
65 if arg is not None:
66 syscall_repr.append('%s=%s' % (name, pwnlib.shellcraft.pretty(arg, False)))
67
68 # If the argument itself (input) is a register...
69 if arg in allregs:
70 index = argument_names.index(name)
71 if index < len(regs):
72 target = regs[index]
73 register_arguments[target] = arg
74 elif arg is not None:
75 stack_arguments[index] = arg
76
77 # The argument is not a register. It is a string value, and we
78 # are expecting a string value
79 elif name in can_pushstr and isinstance(arg, (six.binary_type, six.text_type)):
80 if isinstance(arg, six.text_type):
81 arg = arg.encode('utf-8')
82 string_arguments[name] = arg
83
84 # The argument is not a register. It is a dictionary, and we are
85 # expecting K:V paris.
86 elif name in can_pushstr_array and isinstance(arg, dict):
87 array_arguments[name] = ['%s=%s' % (k,v) for (k,v) in arg.items()]
88
89 # The arguent is not a register. It is a list, and we are expecting
90 # a list of arguments.
91 elif name in can_pushstr_array and isinstance(arg, (list, tuple)):
92 array_arguments[name] = arg
93
94 # The argument is not a register, string, dict, or list.
95 # It could be a constant string ('O_RDONLY') for an integer argument,
96 # an actual integer value, or a constant.
97 else:
98 index = argument_names.index(name)
99 if index < len(regs):
100 target = regs[index]
101 register_arguments[target] = arg
102 elif arg is not None:
103 stack_arguments[target] = arg
104
105 # Some syscalls have different names on various architectures.
106 # Determine which syscall number to use for the current architecture.
107 for syscall in {syscalls!r}:
108 if hasattr(pwnlib.constants, syscall):
109 break
110 else:
111 raise Exception("Could not locate any syscalls: %r" % syscalls)
112 %>
113 /* {name}(${{', '.join(syscall_repr)}}) */
114 %for name, arg in string_arguments.items():
115 ${{pwnlib.shellcraft.pushstr(arg, append_null=(b'\\x00' not in arg))}}
116 ${{pwnlib.shellcraft.mov(regs[argument_names.index(name)], abi.stack)}}
117 %endfor
118 %for name, arg in array_arguments.items():
119 ${{pwnlib.shellcraft.pushstr_array(regs[argument_names.index(name)], arg)}}
120 %endfor
121 %for name, arg in stack_arguments.items():
122 ${{pwnlib.shellcraft.push(arg)}}
123 %endfor
124 ${{pwnlib.shellcraft.setregs(register_arguments)}}
125 ${{pwnlib.shellcraft.syscall(syscall)}}
126 """
127
128
129 def can_be_constant(arg):
130 if arg.derefcnt == 0:
131 return True
132
133
134 def can_be_string(arg):
135 if arg.type == 'char' and arg.derefcnt == 1:
136 return True
137 if arg.type == 'void' and arg.derefcnt == 1:
138 return True
139
140 def can_be_array(arg):
141 if arg.type == 'char' and arg.derefcnt == 2:
142 return True
143 if arg.type == 'void' and arg.derefcnt == 2:
144 return True
145
146
147 def fix_bad_arg_names(func, arg):
148 if arg.name == 'len':
149 return 'length'
150
151 if arg.name in ('str', 'repr') or keyword.iskeyword(arg.name):
152 return arg.name + '_'
153
154 if func.name == 'open' and arg.name == 'vararg':
155 return 'mode'
156
157 return arg.name
158
159
160 def get_arg_default(arg):
161 return 0
162
163 def fix_rt_syscall_name(name):
164 if name.startswith('rt_'):
165 return name[3:]
166 return name
167
168 def fix_syscall_names(name):
169 # Do not use old_mmap
170 if name == 'SYS_mmap':
171 return ['SYS_mmap2', name]
172 # Some arches don't have vanilla sigreturn
173 if name.endswith('_sigreturn'):
174 return ['SYS_sigreturn', 'SYS_rt_sigreturn']
175 return [name]
176
177
178 def main(target):
179 for arch in ARCHITECTURES:
180 with context.local(arch=arch):
181 generate_one(target)
182
183 def generate_one(target):
184 SYSCALL_NAMES = [c for c in dir(constants) if c.startswith('SYS_')]
185
186 for syscall in SYSCALL_NAMES:
187 name = syscall[4:]
188
189 # Skip anything with uppercase
190 if name.lower() != name:
191 print('Skipping %s' % name)
192 continue
193
194 # Skip anything that starts with 'unused' or 'sys' after stripping
195 if name.startswith('unused'):
196 print('Skipping %s' % name)
197 continue
198
199 function = functions.get(name, None)
200
201 if name.startswith('rt_'):
202 name = name[3:]
203
204 # If we can't find a function, just stub it out with something
205 # that has a vararg argument.
206 if function is None:
207 print('Stubbing out %s' % name)
208 args = [Argument('int', 0, 'vararg')]
209 function = Function('long', 0, name, args)
210
211 # Some syscalls have different names on different architectures,
212 # or are superceded. We try to do the "best" thing at runtime.
213 syscalls = fix_syscall_names(syscall)
214
215 # Set up the argument string for Mako
216 argument_names = []
217 argument_defaults = []
218
219 #
220
221 for arg in function.args:
222 argname = fix_bad_arg_names(function, arg)
223 default = get_arg_default(arg)
224
225 # Mako is unable to use *vararg and *kwarg, so we just stub in
226 # a whole bunch of additional arguments.
227 if argname == 'vararg':
228 for j in range(5):
229 argname = 'vararg_%i' % j
230 argument_names.append(argname)
231 argument_defaults.append('%s=%s' % (argname, None))
232 break
233
234 argument_names.append(argname)
235 argument_defaults.append('%s=%s' % (argname, default))
236
237 arguments_default_values = ', '.join(argument_defaults)
238 arguments_comma_separated = ', '.join(argument_names)
239
240 string_arguments = []
241 array_arguments = []
242 arg_docs = []
243
244 for arg in function.args:
245
246 if can_be_array(arg):
247 array_arguments.append(arg.name)
248
249 if can_be_string(arg):
250 string_arguments.append(arg.name)
251
252 argname = arg.name
253 argtype = str(arg.type) + ('*' * arg.derefcnt)
254 arg_docs.append(
255 ' {argname}({argtype}): {argname}'.format(argname=argname,
256 argtype=argtype))
257
258 return_type = str(function.type) + ('*' * function.derefcnt)
259 arg_docs = '\n'.join(arg_docs)
260
261 template_variables = {
262 'name': name,
263 'arg_docs': arg_docs,
264 'syscalls': syscalls,
265 'arguments_default_values': arguments_default_values,
266 'arguments_comma_separated': arguments_comma_separated,
267 'return_type': return_type,
268 'string_arguments': string_arguments,
269 'array_arguments': array_arguments,
270 'argument_names': argument_names,
271 }
272
273 lines = [
274 HEADER,
275 DOCSTRING.format(**template_variables),
276 ARGUMENTS.format(**template_variables),
277 CALL.format(**template_variables)
278 ]
279
280 if keyword.iskeyword(name):
281 name += '_'
282 with open(os.path.join(target, name + '.asm'), 'wt') as f:
283 f.write('\n'.join(map(str.strip, lines)) + '\n')
284
285 if __name__ == '__main__':
286 p = argparse.ArgumentParser()
287 p.add_argument('target_directory')
288 args = p.parse_args()
289 main(args.target_directory)
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pwnlib/data/syscalls/generate.py b/pwnlib/data/syscalls/generate.py
--- a/pwnlib/data/syscalls/generate.py
+++ b/pwnlib/data/syscalls/generate.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python3
from __future__ import division
import argparse
import keyword
@@ -137,6 +137,7 @@
if arg.type == 'void' and arg.derefcnt == 1:
return True
+
def can_be_array(arg):
if arg.type == 'char' and arg.derefcnt == 2:
return True
@@ -160,11 +161,13 @@
def get_arg_default(arg):
return 0
+
def fix_rt_syscall_name(name):
if name.startswith('rt_'):
return name[3:]
return name
+
def fix_syscall_names(name):
# Do not use old_mmap
if name == 'SYS_mmap':
@@ -180,6 +183,7 @@
with context.local(arch=arch):
generate_one(target)
+
def generate_one(target):
SYSCALL_NAMES = [c for c in dir(constants) if c.startswith('SYS_')]
@@ -214,46 +218,50 @@
# Set up the argument string for Mako
argument_names = []
+ argument_names_ = []
argument_defaults = []
+ string_arguments = []
+ array_arguments = []
+ arg_docs = []
+
#
for arg in function.args:
- argname = fix_bad_arg_names(function, arg)
+ argname_ = fix_bad_arg_names(function, arg)
+ argname = argname_.rstrip('_')
default = get_arg_default(arg)
+ if can_be_array(arg):
+ array_arguments.append(argname)
+
+ if can_be_string(arg):
+ string_arguments.append(argname)
+
+ argtype = str(arg.type) + ('*' * arg.derefcnt)
+ arg_docs.append(
+ ' {argname_}({argtype}): {argname}'.format(
+ argname_=argname_,
+ argname=argname,
+ argtype=argtype,
+ ))
+
# Mako is unable to use *vararg and *kwarg, so we just stub in
# a whole bunch of additional arguments.
if argname == 'vararg':
for j in range(5):
argname = 'vararg_%i' % j
argument_names.append(argname)
+ argument_names_.append(argname)
argument_defaults.append('%s=%s' % (argname, None))
break
argument_names.append(argname)
- argument_defaults.append('%s=%s' % (argname, default))
+ argument_names_.append(argname_)
+ argument_defaults.append('%s=%s' % (argname_, default))
arguments_default_values = ', '.join(argument_defaults)
- arguments_comma_separated = ', '.join(argument_names)
-
- string_arguments = []
- array_arguments = []
- arg_docs = []
-
- for arg in function.args:
-
- if can_be_array(arg):
- array_arguments.append(arg.name)
-
- if can_be_string(arg):
- string_arguments.append(arg.name)
-
- argname = arg.name
- argtype = str(arg.type) + ('*' * arg.derefcnt)
- arg_docs.append(
- ' {argname}({argtype}): {argname}'.format(argname=argname,
- argtype=argtype))
+ arguments_comma_separated = ', '.join(argument_names_)
return_type = str(function.type) + ('*' * function.derefcnt)
arg_docs = '\n'.join(arg_docs)
@@ -282,6 +290,7 @@
with open(os.path.join(target, name + '.asm'), 'wt') as f:
f.write('\n'.join(map(str.strip, lines)) + '\n')
+
if __name__ == '__main__':
p = argparse.ArgumentParser()
p.add_argument('target_directory')
|
{"golden_diff": "diff --git a/pwnlib/data/syscalls/generate.py b/pwnlib/data/syscalls/generate.py\n--- a/pwnlib/data/syscalls/generate.py\n+++ b/pwnlib/data/syscalls/generate.py\n@@ -1,4 +1,4 @@\n-#!/usr/bin/env python2\n+#!/usr/bin/env python3\n from __future__ import division\n import argparse\n import keyword\n@@ -137,6 +137,7 @@\n if arg.type == 'void' and arg.derefcnt == 1:\n return True\n \n+\n def can_be_array(arg):\n if arg.type == 'char' and arg.derefcnt == 2:\n return True\n@@ -160,11 +161,13 @@\n def get_arg_default(arg):\n return 0\n \n+\n def fix_rt_syscall_name(name):\n if name.startswith('rt_'):\n return name[3:]\n return name\n \n+\n def fix_syscall_names(name):\n # Do not use old_mmap\n if name == 'SYS_mmap':\n@@ -180,6 +183,7 @@\n with context.local(arch=arch):\n generate_one(target)\n \n+\n def generate_one(target):\n SYSCALL_NAMES = [c for c in dir(constants) if c.startswith('SYS_')]\n \n@@ -214,46 +218,50 @@\n \n # Set up the argument string for Mako\n argument_names = []\n+ argument_names_ = []\n argument_defaults = []\n \n+ string_arguments = []\n+ array_arguments = []\n+ arg_docs = []\n+\n #\n \n for arg in function.args:\n- argname = fix_bad_arg_names(function, arg)\n+ argname_ = fix_bad_arg_names(function, arg)\n+ argname = argname_.rstrip('_')\n default = get_arg_default(arg)\n \n+ if can_be_array(arg):\n+ array_arguments.append(argname)\n+\n+ if can_be_string(arg):\n+ string_arguments.append(argname)\n+\n+ argtype = str(arg.type) + ('*' * arg.derefcnt)\n+ arg_docs.append(\n+ ' {argname_}({argtype}): {argname}'.format(\n+ argname_=argname_,\n+ argname=argname,\n+ argtype=argtype,\n+ ))\n+\n # Mako is unable to use *vararg and *kwarg, so we just stub in\n # a whole bunch of additional arguments.\n if argname == 'vararg':\n for j in range(5):\n argname = 'vararg_%i' % j\n argument_names.append(argname)\n+ argument_names_.append(argname)\n argument_defaults.append('%s=%s' % (argname, None))\n break\n \n argument_names.append(argname)\n- argument_defaults.append('%s=%s' % (argname, default))\n+ argument_names_.append(argname_)\n+ argument_defaults.append('%s=%s' % (argname_, default))\n \n arguments_default_values = ', '.join(argument_defaults)\n- arguments_comma_separated = ', '.join(argument_names)\n-\n- string_arguments = []\n- array_arguments = []\n- arg_docs = []\n-\n- for arg in function.args:\n-\n- if can_be_array(arg):\n- array_arguments.append(arg.name)\n-\n- if can_be_string(arg):\n- string_arguments.append(arg.name)\n-\n- argname = arg.name\n- argtype = str(arg.type) + ('*' * arg.derefcnt)\n- arg_docs.append(\n- ' {argname}({argtype}): {argname}'.format(argname=argname,\n- argtype=argtype))\n+ arguments_comma_separated = ', '.join(argument_names_)\n \n return_type = str(function.type) + ('*' * function.derefcnt)\n arg_docs = '\\n'.join(arg_docs)\n@@ -282,6 +290,7 @@\n with open(os.path.join(target, name + '.asm'), 'wt') as f:\n f.write('\\n'.join(map(str.strip, lines)) + '\\n')\n \n+\n if __name__ == '__main__':\n p = argparse.ArgumentParser()\n p.add_argument('target_directory')\n", "issue": "pwn.shellcraft.linkat second argument is incorrect\nhttps://github.com/Gallopsled/pwntools/blob/bd12d1874f17e1fd6a9b26411ccc7ccd6c31f4cb/pwnlib/shellcraft/templates/common/linux/syscalls/linkat.asm#L30\r\n\r\n`from` should be `from_`. The current implementation makes it not possible to pass a string.\n", "before_files": [{"content": "#!/usr/bin/env python2\nfrom __future__ import division\nimport argparse\nimport keyword\nimport os\n\nfrom pwnlib import constants\nfrom pwnlib.context import context\n\n# github.com/zachriggle/functions\nfrom functions import functions, Function, Argument\n\nARCHITECTURES = ['i386', 'amd64', 'arm', 'aarch64', 'mips']\n\nHEADER = '''\n<%\nimport collections\nimport pwnlib.abi\nimport pwnlib.constants\nimport pwnlib.shellcraft\nimport six\n%>\n'''\n\nDOCSTRING = '''\n<%docstring>{name}({arguments_comma_separated}) -> str\n\nInvokes the syscall {name}.\n\nSee 'man 2 {name}' for more information.\n\nArguments:\n{arg_docs}\nReturns:\n {return_type}\n</%docstring>\n'''\n\nARGUMENTS = \"\"\"\n<%page args=\"{arguments_default_values}\"/>\n\"\"\"\n\nCALL = \"\"\"\n<%\n abi = pwnlib.abi.ABI.syscall()\n stack = abi.stack\n regs = abi.register_arguments[1:]\n allregs = pwnlib.shellcraft.registers.current()\n\n can_pushstr = {string_arguments!r}\n can_pushstr_array = {array_arguments!r}\n\n argument_names = {argument_names!r}\n argument_values = [{arguments_comma_separated!s}]\n\n # Load all of the arguments into their destination registers / stack slots.\n register_arguments = dict()\n stack_arguments = collections.OrderedDict()\n string_arguments = dict()\n dict_arguments = dict()\n array_arguments = dict()\n syscall_repr = []\n\n for name, arg in zip(argument_names, argument_values):\n if arg is not None:\n syscall_repr.append('%s=%s' % (name, pwnlib.shellcraft.pretty(arg, False)))\n\n # If the argument itself (input) is a register...\n if arg in allregs:\n index = argument_names.index(name)\n if index < len(regs):\n target = regs[index]\n register_arguments[target] = arg\n elif arg is not None:\n stack_arguments[index] = arg\n\n # The argument is not a register. It is a string value, and we\n # are expecting a string value\n elif name in can_pushstr and isinstance(arg, (six.binary_type, six.text_type)):\n if isinstance(arg, six.text_type):\n arg = arg.encode('utf-8')\n string_arguments[name] = arg\n\n # The argument is not a register. It is a dictionary, and we are\n # expecting K:V paris.\n elif name in can_pushstr_array and isinstance(arg, dict):\n array_arguments[name] = ['%s=%s' % (k,v) for (k,v) in arg.items()]\n\n # The arguent is not a register. It is a list, and we are expecting\n # a list of arguments.\n elif name in can_pushstr_array and isinstance(arg, (list, tuple)):\n array_arguments[name] = arg\n\n # The argument is not a register, string, dict, or list.\n # It could be a constant string ('O_RDONLY') for an integer argument,\n # an actual integer value, or a constant.\n else:\n index = argument_names.index(name)\n if index < len(regs):\n target = regs[index]\n register_arguments[target] = arg\n elif arg is not None:\n stack_arguments[target] = arg\n\n # Some syscalls have different names on various architectures.\n # Determine which syscall number to use for the current architecture.\n for syscall in {syscalls!r}:\n if hasattr(pwnlib.constants, syscall):\n break\n else:\n raise Exception(\"Could not locate any syscalls: %r\" % syscalls)\n%>\n /* {name}(${{', '.join(syscall_repr)}}) */\n%for name, arg in string_arguments.items():\n ${{pwnlib.shellcraft.pushstr(arg, append_null=(b'\\\\x00' not in arg))}}\n ${{pwnlib.shellcraft.mov(regs[argument_names.index(name)], abi.stack)}}\n%endfor\n%for name, arg in array_arguments.items():\n ${{pwnlib.shellcraft.pushstr_array(regs[argument_names.index(name)], arg)}}\n%endfor\n%for name, arg in stack_arguments.items():\n ${{pwnlib.shellcraft.push(arg)}}\n%endfor\n ${{pwnlib.shellcraft.setregs(register_arguments)}}\n ${{pwnlib.shellcraft.syscall(syscall)}}\n\"\"\"\n\n\ndef can_be_constant(arg):\n if arg.derefcnt == 0:\n return True\n\n\ndef can_be_string(arg):\n if arg.type == 'char' and arg.derefcnt == 1:\n return True\n if arg.type == 'void' and arg.derefcnt == 1:\n return True\n\ndef can_be_array(arg):\n if arg.type == 'char' and arg.derefcnt == 2:\n return True\n if arg.type == 'void' and arg.derefcnt == 2:\n return True\n\n\ndef fix_bad_arg_names(func, arg):\n if arg.name == 'len':\n return 'length'\n\n if arg.name in ('str', 'repr') or keyword.iskeyword(arg.name):\n return arg.name + '_'\n\n if func.name == 'open' and arg.name == 'vararg':\n return 'mode'\n\n return arg.name\n\n\ndef get_arg_default(arg):\n return 0\n\ndef fix_rt_syscall_name(name):\n if name.startswith('rt_'):\n return name[3:]\n return name\n\ndef fix_syscall_names(name):\n # Do not use old_mmap\n if name == 'SYS_mmap':\n return ['SYS_mmap2', name]\n # Some arches don't have vanilla sigreturn\n if name.endswith('_sigreturn'):\n return ['SYS_sigreturn', 'SYS_rt_sigreturn']\n return [name]\n\n\ndef main(target):\n for arch in ARCHITECTURES:\n with context.local(arch=arch):\n generate_one(target)\n\ndef generate_one(target):\n SYSCALL_NAMES = [c for c in dir(constants) if c.startswith('SYS_')]\n\n for syscall in SYSCALL_NAMES:\n name = syscall[4:]\n\n # Skip anything with uppercase\n if name.lower() != name:\n print('Skipping %s' % name)\n continue\n\n # Skip anything that starts with 'unused' or 'sys' after stripping\n if name.startswith('unused'):\n print('Skipping %s' % name)\n continue\n\n function = functions.get(name, None)\n\n if name.startswith('rt_'):\n name = name[3:]\n\n # If we can't find a function, just stub it out with something\n # that has a vararg argument.\n if function is None:\n print('Stubbing out %s' % name)\n args = [Argument('int', 0, 'vararg')]\n function = Function('long', 0, name, args)\n\n # Some syscalls have different names on different architectures,\n # or are superceded. We try to do the \"best\" thing at runtime.\n syscalls = fix_syscall_names(syscall)\n\n # Set up the argument string for Mako\n argument_names = []\n argument_defaults = []\n\n #\n\n for arg in function.args:\n argname = fix_bad_arg_names(function, arg)\n default = get_arg_default(arg)\n\n # Mako is unable to use *vararg and *kwarg, so we just stub in\n # a whole bunch of additional arguments.\n if argname == 'vararg':\n for j in range(5):\n argname = 'vararg_%i' % j\n argument_names.append(argname)\n argument_defaults.append('%s=%s' % (argname, None))\n break\n\n argument_names.append(argname)\n argument_defaults.append('%s=%s' % (argname, default))\n\n arguments_default_values = ', '.join(argument_defaults)\n arguments_comma_separated = ', '.join(argument_names)\n\n string_arguments = []\n array_arguments = []\n arg_docs = []\n\n for arg in function.args:\n\n if can_be_array(arg):\n array_arguments.append(arg.name)\n\n if can_be_string(arg):\n string_arguments.append(arg.name)\n\n argname = arg.name\n argtype = str(arg.type) + ('*' * arg.derefcnt)\n arg_docs.append(\n ' {argname}({argtype}): {argname}'.format(argname=argname,\n argtype=argtype))\n\n return_type = str(function.type) + ('*' * function.derefcnt)\n arg_docs = '\\n'.join(arg_docs)\n\n template_variables = {\n 'name': name,\n 'arg_docs': arg_docs,\n 'syscalls': syscalls,\n 'arguments_default_values': arguments_default_values,\n 'arguments_comma_separated': arguments_comma_separated,\n 'return_type': return_type,\n 'string_arguments': string_arguments,\n 'array_arguments': array_arguments,\n 'argument_names': argument_names,\n }\n\n lines = [\n HEADER,\n DOCSTRING.format(**template_variables),\n ARGUMENTS.format(**template_variables),\n CALL.format(**template_variables)\n ]\n\n if keyword.iskeyword(name):\n name += '_'\n with open(os.path.join(target, name + '.asm'), 'wt') as f:\n f.write('\\n'.join(map(str.strip, lines)) + '\\n')\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser()\n p.add_argument('target_directory')\n args = p.parse_args()\n main(args.target_directory)\n", "path": "pwnlib/data/syscalls/generate.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nfrom __future__ import division\nimport argparse\nimport keyword\nimport os\n\nfrom pwnlib import constants\nfrom pwnlib.context import context\n\n# github.com/zachriggle/functions\nfrom functions import functions, Function, Argument\n\nARCHITECTURES = ['i386', 'amd64', 'arm', 'aarch64', 'mips']\n\nHEADER = '''\n<%\nimport collections\nimport pwnlib.abi\nimport pwnlib.constants\nimport pwnlib.shellcraft\nimport six\n%>\n'''\n\nDOCSTRING = '''\n<%docstring>{name}({arguments_comma_separated}) -> str\n\nInvokes the syscall {name}.\n\nSee 'man 2 {name}' for more information.\n\nArguments:\n{arg_docs}\nReturns:\n {return_type}\n</%docstring>\n'''\n\nARGUMENTS = \"\"\"\n<%page args=\"{arguments_default_values}\"/>\n\"\"\"\n\nCALL = \"\"\"\n<%\n abi = pwnlib.abi.ABI.syscall()\n stack = abi.stack\n regs = abi.register_arguments[1:]\n allregs = pwnlib.shellcraft.registers.current()\n\n can_pushstr = {string_arguments!r}\n can_pushstr_array = {array_arguments!r}\n\n argument_names = {argument_names!r}\n argument_values = [{arguments_comma_separated!s}]\n\n # Load all of the arguments into their destination registers / stack slots.\n register_arguments = dict()\n stack_arguments = collections.OrderedDict()\n string_arguments = dict()\n dict_arguments = dict()\n array_arguments = dict()\n syscall_repr = []\n\n for name, arg in zip(argument_names, argument_values):\n if arg is not None:\n syscall_repr.append('%s=%s' % (name, pwnlib.shellcraft.pretty(arg, False)))\n\n # If the argument itself (input) is a register...\n if arg in allregs:\n index = argument_names.index(name)\n if index < len(regs):\n target = regs[index]\n register_arguments[target] = arg\n elif arg is not None:\n stack_arguments[index] = arg\n\n # The argument is not a register. It is a string value, and we\n # are expecting a string value\n elif name in can_pushstr and isinstance(arg, (six.binary_type, six.text_type)):\n if isinstance(arg, six.text_type):\n arg = arg.encode('utf-8')\n string_arguments[name] = arg\n\n # The argument is not a register. It is a dictionary, and we are\n # expecting K:V paris.\n elif name in can_pushstr_array and isinstance(arg, dict):\n array_arguments[name] = ['%s=%s' % (k,v) for (k,v) in arg.items()]\n\n # The arguent is not a register. It is a list, and we are expecting\n # a list of arguments.\n elif name in can_pushstr_array and isinstance(arg, (list, tuple)):\n array_arguments[name] = arg\n\n # The argument is not a register, string, dict, or list.\n # It could be a constant string ('O_RDONLY') for an integer argument,\n # an actual integer value, or a constant.\n else:\n index = argument_names.index(name)\n if index < len(regs):\n target = regs[index]\n register_arguments[target] = arg\n elif arg is not None:\n stack_arguments[target] = arg\n\n # Some syscalls have different names on various architectures.\n # Determine which syscall number to use for the current architecture.\n for syscall in {syscalls!r}:\n if hasattr(pwnlib.constants, syscall):\n break\n else:\n raise Exception(\"Could not locate any syscalls: %r\" % syscalls)\n%>\n /* {name}(${{', '.join(syscall_repr)}}) */\n%for name, arg in string_arguments.items():\n ${{pwnlib.shellcraft.pushstr(arg, append_null=(b'\\\\x00' not in arg))}}\n ${{pwnlib.shellcraft.mov(regs[argument_names.index(name)], abi.stack)}}\n%endfor\n%for name, arg in array_arguments.items():\n ${{pwnlib.shellcraft.pushstr_array(regs[argument_names.index(name)], arg)}}\n%endfor\n%for name, arg in stack_arguments.items():\n ${{pwnlib.shellcraft.push(arg)}}\n%endfor\n ${{pwnlib.shellcraft.setregs(register_arguments)}}\n ${{pwnlib.shellcraft.syscall(syscall)}}\n\"\"\"\n\n\ndef can_be_constant(arg):\n if arg.derefcnt == 0:\n return True\n\n\ndef can_be_string(arg):\n if arg.type == 'char' and arg.derefcnt == 1:\n return True\n if arg.type == 'void' and arg.derefcnt == 1:\n return True\n\n\ndef can_be_array(arg):\n if arg.type == 'char' and arg.derefcnt == 2:\n return True\n if arg.type == 'void' and arg.derefcnt == 2:\n return True\n\n\ndef fix_bad_arg_names(func, arg):\n if arg.name == 'len':\n return 'length'\n\n if arg.name in ('str', 'repr') or keyword.iskeyword(arg.name):\n return arg.name + '_'\n\n if func.name == 'open' and arg.name == 'vararg':\n return 'mode'\n\n return arg.name\n\n\ndef get_arg_default(arg):\n return 0\n\n\ndef fix_rt_syscall_name(name):\n if name.startswith('rt_'):\n return name[3:]\n return name\n\n\ndef fix_syscall_names(name):\n # Do not use old_mmap\n if name == 'SYS_mmap':\n return ['SYS_mmap2', name]\n # Some arches don't have vanilla sigreturn\n if name.endswith('_sigreturn'):\n return ['SYS_sigreturn', 'SYS_rt_sigreturn']\n return [name]\n\n\ndef main(target):\n for arch in ARCHITECTURES:\n with context.local(arch=arch):\n generate_one(target)\n\n\ndef generate_one(target):\n SYSCALL_NAMES = [c for c in dir(constants) if c.startswith('SYS_')]\n\n for syscall in SYSCALL_NAMES:\n name = syscall[4:]\n\n # Skip anything with uppercase\n if name.lower() != name:\n print('Skipping %s' % name)\n continue\n\n # Skip anything that starts with 'unused' or 'sys' after stripping\n if name.startswith('unused'):\n print('Skipping %s' % name)\n continue\n\n function = functions.get(name, None)\n\n if name.startswith('rt_'):\n name = name[3:]\n\n # If we can't find a function, just stub it out with something\n # that has a vararg argument.\n if function is None:\n print('Stubbing out %s' % name)\n args = [Argument('int', 0, 'vararg')]\n function = Function('long', 0, name, args)\n\n # Some syscalls have different names on different architectures,\n # or are superceded. We try to do the \"best\" thing at runtime.\n syscalls = fix_syscall_names(syscall)\n\n # Set up the argument string for Mako\n argument_names = []\n argument_names_ = []\n argument_defaults = []\n\n string_arguments = []\n array_arguments = []\n arg_docs = []\n\n #\n\n for arg in function.args:\n argname_ = fix_bad_arg_names(function, arg)\n argname = argname_.rstrip('_')\n default = get_arg_default(arg)\n\n if can_be_array(arg):\n array_arguments.append(argname)\n\n if can_be_string(arg):\n string_arguments.append(argname)\n\n argtype = str(arg.type) + ('*' * arg.derefcnt)\n arg_docs.append(\n ' {argname_}({argtype}): {argname}'.format(\n argname_=argname_,\n argname=argname,\n argtype=argtype,\n ))\n\n # Mako is unable to use *vararg and *kwarg, so we just stub in\n # a whole bunch of additional arguments.\n if argname == 'vararg':\n for j in range(5):\n argname = 'vararg_%i' % j\n argument_names.append(argname)\n argument_names_.append(argname)\n argument_defaults.append('%s=%s' % (argname, None))\n break\n\n argument_names.append(argname)\n argument_names_.append(argname_)\n argument_defaults.append('%s=%s' % (argname_, default))\n\n arguments_default_values = ', '.join(argument_defaults)\n arguments_comma_separated = ', '.join(argument_names_)\n\n return_type = str(function.type) + ('*' * function.derefcnt)\n arg_docs = '\\n'.join(arg_docs)\n\n template_variables = {\n 'name': name,\n 'arg_docs': arg_docs,\n 'syscalls': syscalls,\n 'arguments_default_values': arguments_default_values,\n 'arguments_comma_separated': arguments_comma_separated,\n 'return_type': return_type,\n 'string_arguments': string_arguments,\n 'array_arguments': array_arguments,\n 'argument_names': argument_names,\n }\n\n lines = [\n HEADER,\n DOCSTRING.format(**template_variables),\n ARGUMENTS.format(**template_variables),\n CALL.format(**template_variables)\n ]\n\n if keyword.iskeyword(name):\n name += '_'\n with open(os.path.join(target, name + '.asm'), 'wt') as f:\n f.write('\\n'.join(map(str.strip, lines)) + '\\n')\n\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser()\n p.add_argument('target_directory')\n args = p.parse_args()\n main(args.target_directory)\n", "path": "pwnlib/data/syscalls/generate.py"}]}
| 3,256 | 933 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.