problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_2770 | rasdani/github-patches | git_diff | internetarchive__openlibrary-7672 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pre-commit: Add Python linting tool ruff
Add https://beta.ruff.rs to our pre-commit as a replacement for `flake8`, `isort`, `pyupgrade`, etc. but written in Rust instead of Python. It can lint the CPython codebase from scratch in 0.29 seconds.
Adopting Ruff should greatly accelerate our pre-commits and our pre-commit.ci jobs. We will run the new and old tools in parallel for a sprint or two to verify the coverage before we completely drop the old tools.
</issue>
<code>
[start of openlibrary/catalog/marc/marc_subject.py]
1 """ This entire module is deprecated,
2 openlibrary.catalog.marc.get_subjects is the preferred module
3 """
4
5 # Tell the flake8 linter to ignore this deprecated file.
6 # flake8: noqa
7
8 from collections import defaultdict
9 from deprecated import deprecated
10 from lxml import etree
11 import re
12
13
14 from openlibrary.catalog.utils.query import get_mc
15 from openlibrary.catalog.get_ia import (
16 get_from_archive,
17 marc_formats,
18 urlopen_keep_trying,
19 )
20 from openlibrary.catalog.marc import get_subjects
21 from openlibrary.catalog.marc.marc_binary import MarcBinary
22 from openlibrary.catalog.marc.marc_xml import (
23 read_marc_file,
24 MarcXml,
25 BlankTag,
26 BadSubtag,
27 )
28 from openlibrary.catalog.utils import (
29 remove_trailing_dot,
30 remove_trailing_number_dot,
31 flip_name,
32 )
33
34
35 subject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}
36
37 re_flip_name = re.compile('^(.+), ([A-Z].+)$')
38
39 # 'Rhodes, Dan (Fictitious character)'
40 re_fictitious_character = re.compile(r'^(.+), (.+)( \(.* character\))$')
41 re_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)
42 re_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')
43
44 re_place_comma = re.compile('^(.+), (.+)$')
45 re_paren = re.compile('[()]')
46
47
48 @deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')
49 def flip_place(s):
50 return get_subjects.flip_place(s)
51
52
53 @deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')
54 def flip_subject(s):
55 return get_subjects.flip_subject(s)
56
57
58 @deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')
59 def four_types(i):
60 return get_subjects.four_types(i)
61
62
63 archive_url = "http://archive.org/download/"
64
65
66 @deprecated
67 def load_binary(ia):
68 url = archive_url + ia + '/' + ia + '_meta.mrc'
69 f = urlopen_keep_trying(url)
70 data = f.content
71 assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]
72 if len(data) != int(data[:5]):
73 data = data.decode('utf-8').encode('raw_unicode_escape')
74 if len(data) != int(data[:5]):
75 return
76 return MarcBinary(data)
77
78
79 @deprecated
80 def load_xml(ia):
81 url = archive_url + ia + '/' + ia + '_marc.xml'
82 f = urlopen_keep_trying(url)
83 root = etree.fromstring(f.text).getroot()
84 if root.tag == '{http://www.loc.gov/MARC21/slim}collection':
85 root = root[0]
86 return MarcXml(root)
87
88
89 @deprecated
90 def subjects_for_work(rec):
91 field_map = {
92 'subject': 'subjects',
93 'place': 'subject_places',
94 'time': 'subject_times',
95 'person': 'subject_people',
96 }
97
98 subjects = four_types(read_subjects(rec))
99
100 return {field_map[k]: list(v) for k, v in subjects.items()}
101
102
103 re_edition_key = re.compile(r'^/(?:b|books)/(OL\d+M)$')
104
105
106 @deprecated
107 def get_subjects_from_ia(ia):
108 formats = marc_formats(ia)
109 if not any(formats.values()):
110 return {}
111 rec = None
112 if formats['bin']:
113 rec = load_binary(ia)
114 if not rec:
115 assert formats['xml']
116 rec = load_xml(ia)
117 return read_subjects(rec)
118
119
120 re_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\.xml|meta\.mrc)(:0:\d+)?$')
121
122
123 @deprecated
124 def get_work_subjects(w, do_get_mc=True):
125 found = set()
126 for e in w['editions']:
127 sr = e.get('source_records', [])
128 if sr:
129 for i in sr:
130 if i.endswith('initial import'):
131 continue
132 if i.startswith(('ia:', 'marc:')):
133 found.add(i)
134 continue
135 else:
136 mc = None
137 if do_get_mc:
138 m = re_edition_key.match(e['key'])
139 mc = get_mc('/b/' + m.group(1))
140 if mc:
141 if mc.endswith('initial import'):
142 continue
143 if not mc.startswith('amazon:') and not re_ia_marc.match(mc):
144 found.add('marc:' + mc)
145 subjects = []
146 for sr in found:
147 if sr.startswith('marc:ia:'):
148 subjects.append(get_subjects_from_ia(sr[8:]))
149 elif sr.startswith('marc:'):
150 loc = sr[5:]
151 data = get_from_archive(loc)
152 rec = MarcBinary(data)
153 subjects.append(read_subjects(rec))
154 else:
155 assert sr.startswith('ia:')
156 subjects.append(get_subjects_from_ia(sr[3:]))
157 return combine_subjects(subjects)
158
159
160 @deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')
161 def tidy_subject(s):
162 return get_subjects.tidy_subject(s)
163
164
165 re_aspects = re.compile(' [Aa]spects$')
166
167
168 @deprecated
169 def find_aspects(f):
170 cur = [(i, j) for i, j in f.get_subfields('ax')]
171 if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':
172 return
173 a, x = cur[0][1], cur[1][1]
174 x = x.strip('. ')
175 a = a.strip('. ')
176 if not re_aspects.search(x):
177 return
178 if a == 'Body, Human':
179 a = 'the Human body'
180 return x + ' of ' + flip_subject(a)
181
182
183 @deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')
184 def read_subjects(rec):
185 return get_subjects.read_subject(s)
186
187
188 @deprecated
189 def combine_subjects(subjects):
190 all_subjects = defaultdict(lambda: defaultdict(int))
191 for a in subjects:
192 for b, c in a.items():
193 for d, e in c.items():
194 all_subjects[b][d] += e
195 return {k: dict(v) for k, v in all_subjects.items()}
196
[end of openlibrary/catalog/marc/marc_subject.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openlibrary/catalog/marc/marc_subject.py b/openlibrary/catalog/marc/marc_subject.py
--- a/openlibrary/catalog/marc/marc_subject.py
+++ b/openlibrary/catalog/marc/marc_subject.py
@@ -2,8 +2,8 @@
openlibrary.catalog.marc.get_subjects is the preferred module
"""
-# Tell the flake8 linter to ignore this deprecated file.
-# flake8: noqa
+# Tell the ruff linter to ignore this deprecated file.
+# ruff: noqa
from collections import defaultdict
from deprecated import deprecated
| {"golden_diff": "diff --git a/openlibrary/catalog/marc/marc_subject.py b/openlibrary/catalog/marc/marc_subject.py\n--- a/openlibrary/catalog/marc/marc_subject.py\n+++ b/openlibrary/catalog/marc/marc_subject.py\n@@ -2,8 +2,8 @@\n openlibrary.catalog.marc.get_subjects is the preferred module\n \"\"\"\n \n-# Tell the flake8 linter to ignore this deprecated file.\n-# flake8: noqa\n+# Tell the ruff linter to ignore this deprecated file.\n+# ruff: noqa\n \n from collections import defaultdict\n from deprecated import deprecated\n", "issue": "pre-commit: Add Python linting tool ruff\nAdd https://beta.ruff.rs to our pre-commit as a replacement for `flake8`, `isort`, `pyupgrade`, etc. but written in Rust instead of Python. It can lint the CPython codebase from scratch in 0.29 seconds.\r\n\r\nAdopting Ruff should greatly accelerate our pre-commits and our pre-commit.ci jobs. We will run the new and old tools in parallel for a sprint or two to verify the coverage before we completely drop the old tools.\n", "before_files": [{"content": "\"\"\" This entire module is deprecated,\n openlibrary.catalog.marc.get_subjects is the preferred module\n\"\"\"\n\n# Tell the flake8 linter to ignore this deprecated file.\n# flake8: noqa\n\nfrom collections import defaultdict\nfrom deprecated import deprecated\nfrom lxml import etree\nimport re\n\n\nfrom openlibrary.catalog.utils.query import get_mc\nfrom openlibrary.catalog.get_ia import (\n get_from_archive,\n marc_formats,\n urlopen_keep_trying,\n)\nfrom openlibrary.catalog.marc import get_subjects\nfrom openlibrary.catalog.marc.marc_binary import MarcBinary\nfrom openlibrary.catalog.marc.marc_xml import (\n read_marc_file,\n MarcXml,\n BlankTag,\n BadSubtag,\n)\nfrom openlibrary.catalog.utils import (\n remove_trailing_dot,\n remove_trailing_number_dot,\n flip_name,\n)\n\n\nsubject_fields = {'600', '610', '611', '630', '648', '650', '651', '662'}\n\nre_flip_name = re.compile('^(.+), ([A-Z].+)$')\n\n# 'Rhodes, Dan (Fictitious character)'\nre_fictitious_character = re.compile(r'^(.+), (.+)( \\(.* character\\))$')\nre_etc = re.compile('^(.+?)[, .]+etc[, .]?$', re.I)\nre_comma = re.compile('^([A-Z])([A-Za-z ]+?) *, ([A-Z][A-Z a-z]+)$')\n\nre_place_comma = re.compile('^(.+), (.+)$')\nre_paren = re.compile('[()]')\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_place() instead.')\ndef flip_place(s):\n return get_subjects.flip_place(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.flip_subject() instead.')\ndef flip_subject(s):\n return get_subjects.flip_subject(s)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.four_types() instead.')\ndef four_types(i):\n return get_subjects.four_types(i)\n\n\narchive_url = \"http://archive.org/download/\"\n\n\n@deprecated\ndef load_binary(ia):\n url = archive_url + ia + '/' + ia + '_meta.mrc'\n f = urlopen_keep_trying(url)\n data = f.content\n assert '<title>Internet Archive: Page Not Found</title>' not in data[:200]\n if len(data) != int(data[:5]):\n data = data.decode('utf-8').encode('raw_unicode_escape')\n if len(data) != int(data[:5]):\n return\n return MarcBinary(data)\n\n\n@deprecated\ndef load_xml(ia):\n url = archive_url + ia + '/' + ia + '_marc.xml'\n f = urlopen_keep_trying(url)\n root = etree.fromstring(f.text).getroot()\n if root.tag == '{http://www.loc.gov/MARC21/slim}collection':\n root = root[0]\n return MarcXml(root)\n\n\n@deprecated\ndef subjects_for_work(rec):\n field_map = {\n 'subject': 'subjects',\n 'place': 'subject_places',\n 'time': 'subject_times',\n 'person': 'subject_people',\n }\n\n subjects = four_types(read_subjects(rec))\n\n return {field_map[k]: list(v) for k, v in subjects.items()}\n\n\nre_edition_key = re.compile(r'^/(?:b|books)/(OL\\d+M)$')\n\n\n@deprecated\ndef get_subjects_from_ia(ia):\n formats = marc_formats(ia)\n if not any(formats.values()):\n return {}\n rec = None\n if formats['bin']:\n rec = load_binary(ia)\n if not rec:\n assert formats['xml']\n rec = load_xml(ia)\n return read_subjects(rec)\n\n\nre_ia_marc = re.compile(r'^(?:.*/)?([^/]+)_(marc\\.xml|meta\\.mrc)(:0:\\d+)?$')\n\n\n@deprecated\ndef get_work_subjects(w, do_get_mc=True):\n found = set()\n for e in w['editions']:\n sr = e.get('source_records', [])\n if sr:\n for i in sr:\n if i.endswith('initial import'):\n continue\n if i.startswith(('ia:', 'marc:')):\n found.add(i)\n continue\n else:\n mc = None\n if do_get_mc:\n m = re_edition_key.match(e['key'])\n mc = get_mc('/b/' + m.group(1))\n if mc:\n if mc.endswith('initial import'):\n continue\n if not mc.startswith('amazon:') and not re_ia_marc.match(mc):\n found.add('marc:' + mc)\n subjects = []\n for sr in found:\n if sr.startswith('marc:ia:'):\n subjects.append(get_subjects_from_ia(sr[8:]))\n elif sr.startswith('marc:'):\n loc = sr[5:]\n data = get_from_archive(loc)\n rec = MarcBinary(data)\n subjects.append(read_subjects(rec))\n else:\n assert sr.startswith('ia:')\n subjects.append(get_subjects_from_ia(sr[3:]))\n return combine_subjects(subjects)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.tidy_subject() instead.')\ndef tidy_subject(s):\n return get_subjects.tidy_subject(s)\n\n\nre_aspects = re.compile(' [Aa]spects$')\n\n\n@deprecated\ndef find_aspects(f):\n cur = [(i, j) for i, j in f.get_subfields('ax')]\n if len(cur) < 2 or cur[0][0] != 'a' or cur[1][0] != 'x':\n return\n a, x = cur[0][1], cur[1][1]\n x = x.strip('. ')\n a = a.strip('. ')\n if not re_aspects.search(x):\n return\n if a == 'Body, Human':\n a = 'the Human body'\n return x + ' of ' + flip_subject(a)\n\n\n@deprecated('Use openlibrary.catalog.marc.get_subjects.read_subject() instead.')\ndef read_subjects(rec):\n return get_subjects.read_subject(s)\n\n\n@deprecated\ndef combine_subjects(subjects):\n all_subjects = defaultdict(lambda: defaultdict(int))\n for a in subjects:\n for b, c in a.items():\n for d, e in c.items():\n all_subjects[b][d] += e\n return {k: dict(v) for k, v in all_subjects.items()}\n", "path": "openlibrary/catalog/marc/marc_subject.py"}]} | 2,590 | 126 |
gh_patches_debug_36850 | rasdani/github-patches | git_diff | buildbot__buildbot-4934 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bitbucket Cloud web hook returns 500
Started three weeks ago (missed it due to vacations), my guess is that Bitbucket changed something. Error log looks like this:
```
2019-07-02 14:33:40+0300 [_GenericHTTPChannelProtocol,13,18.234.32.226] adding changes from web hook
Traceback (most recent call last):
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py", line 106, in getAndSubmitChanges
changes, src = yield self.getChanges(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator
return _cancellableInlineCallbacks(gen)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks
_inlineCallbacks(None, g, status)
--- <exception caught here> ---
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py", line 106, in getAndSubmitChanges
changes, src = yield self.getChanges(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks
result = g.send(result)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py", line 167, in getChanges
changes, src = yield handler.getChanges(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py", line 163, in getChanges
return self.process(request)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py", line 54, in process
return handler(payload)
File "/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py", line 97, in handle_repo_push
payload['actor']['username']),
builtins.KeyError: 'username'
```
Last line reveals the culprit.
</issue>
<code>
[start of master/buildbot/www/hooks/bitbucketcloud.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15 # Copyright Mamba Team
16
17
18 import json
19
20 from twisted.python import log
21
22 from buildbot.util import bytes2unicode
23
24 GIT_BRANCH_REF = "refs/heads/{}"
25 GIT_MERGE_REF = "refs/pull-requests/{}/merge"
26 GIT_TAG_REF = "refs/tags/{}"
27
28 _HEADER_EVENT = b'X-Event-Key'
29
30
31 class BitbucketCloudEventHandler:
32
33 def __init__(self, master, options=None):
34 if options is None:
35 options = {}
36 self.master = master
37 if not isinstance(options, dict):
38 options = {}
39 self.options = options
40 self._codebase = self.options.get('codebase', None)
41
42 def process(self, request):
43 payload = self._get_payload(request)
44 event_type = request.getHeader(_HEADER_EVENT)
45 event_type = bytes2unicode(event_type)
46 log.msg("Processing event {header}: {event}"
47 .format(header=_HEADER_EVENT, event=event_type))
48 event_type = event_type.replace(":", "_")
49 handler = getattr(self, 'handle_{}'.format(event_type), None)
50
51 if handler is None:
52 raise ValueError('Unknown event: {}'.format(event_type))
53
54 return handler(payload)
55
56 def _get_payload(self, request):
57 content = request.content.read()
58 content = bytes2unicode(content)
59 content_type = request.getHeader(b'Content-Type')
60 content_type = bytes2unicode(content_type)
61 if content_type.startswith('application/json'):
62 payload = json.loads(content)
63 else:
64 raise ValueError('Unknown content type: {}'
65 .format(content_type))
66
67 log.msg("Payload: {}".format(payload))
68
69 return payload
70
71 def handle_repo_push(self, payload):
72 changes = []
73 project = payload['repository']['project']['name']
74 repo_url = payload['repository']['links']['self']['href']
75 web_url = payload['repository']['links']['html']['href']
76
77 for payload_change in payload['push']['changes']:
78 if payload_change['new']:
79 age = 'new'
80 category = 'push'
81 else: # when new is null the ref is deleted
82 age = 'old'
83 category = 'ref-deleted'
84
85 commit_hash = payload_change[age]['target']['hash']
86
87 if payload_change[age]['type'] == 'branch':
88 branch = GIT_BRANCH_REF.format(payload_change[age]['name'])
89 elif payload_change[age]['type'] == 'tag':
90 branch = GIT_TAG_REF.format(payload_change[age]['name'])
91
92 change = {
93 'revision': commit_hash,
94 'revlink': '{}/commits/{}'.format(web_url, commit_hash),
95 'repository': repo_url,
96 'author': '{} <{}>'.format(payload['actor']['display_name'],
97 payload['actor']['username']),
98 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),
99 'branch': branch,
100 'project': project,
101 'category': category
102 }
103
104 if callable(self._codebase):
105 change['codebase'] = self._codebase(payload)
106 elif self._codebase is not None:
107 change['codebase'] = self._codebase
108
109 changes.append(change)
110
111 return (changes, payload['repository']['scm'])
112
113 def handle_pullrequest_created(self, payload):
114 return self.handle_pullrequest(
115 payload,
116 GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),
117 "pull-created")
118
119 def handle_pullrequest_updated(self, payload):
120 return self.handle_pullrequest(
121 payload,
122 GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),
123 "pull-updated")
124
125 def handle_pullrequest_fulfilled(self, payload):
126 return self.handle_pullrequest(
127 payload,
128 GIT_BRANCH_REF.format(
129 payload['pullrequest']['toRef']['branch']['name']),
130 "pull-fulfilled")
131
132 def handle_pullrequest_rejected(self, payload):
133 return self.handle_pullrequest(
134 payload,
135 GIT_BRANCH_REF.format(
136 payload['pullrequest']['fromRef']['branch']['name']),
137 "pull-rejected")
138
139 def handle_pullrequest(self, payload, refname, category):
140 pr_number = int(payload['pullrequest']['id'])
141 repo_url = payload['repository']['links']['self']['href']
142 change = {
143 'revision': payload['pullrequest']['fromRef']['commit']['hash'],
144 'revlink': payload['pullrequest']['link'],
145 'repository': repo_url,
146 'author': '{} <{}>'.format(payload['actor']['display_name'],
147 payload['actor']['username']),
148 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),
149 'branch': refname,
150 'project': payload['repository']['project']['name'],
151 'category': category,
152 'properties': {'pullrequesturl': payload['pullrequest']['link']}
153 }
154
155 if callable(self._codebase):
156 change['codebase'] = self._codebase(payload)
157 elif self._codebase is not None:
158 change['codebase'] = self._codebase
159
160 return [change], payload['repository']['scm']
161
162 def getChanges(self, request):
163 return self.process(request)
164
165
166 bitbucketcloud = BitbucketCloudEventHandler
167
[end of master/buildbot/www/hooks/bitbucketcloud.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/master/buildbot/www/hooks/bitbucketcloud.py b/master/buildbot/www/hooks/bitbucketcloud.py
--- a/master/buildbot/www/hooks/bitbucketcloud.py
+++ b/master/buildbot/www/hooks/bitbucketcloud.py
@@ -70,7 +70,7 @@
def handle_repo_push(self, payload):
changes = []
- project = payload['repository']['project']['name']
+ project = payload['repository'].get('project', {'name': 'none'})['name']
repo_url = payload['repository']['links']['self']['href']
web_url = payload['repository']['links']['html']['href']
@@ -94,7 +94,7 @@
'revlink': '{}/commits/{}'.format(web_url, commit_hash),
'repository': repo_url,
'author': '{} <{}>'.format(payload['actor']['display_name'],
- payload['actor']['username']),
+ payload['actor']['nickname']),
'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),
'branch': branch,
'project': project,
@@ -139,15 +139,16 @@
def handle_pullrequest(self, payload, refname, category):
pr_number = int(payload['pullrequest']['id'])
repo_url = payload['repository']['links']['self']['href']
+ project = payload['repository'].get('project', {'name': 'none'})['name']
change = {
'revision': payload['pullrequest']['fromRef']['commit']['hash'],
'revlink': payload['pullrequest']['link'],
'repository': repo_url,
'author': '{} <{}>'.format(payload['actor']['display_name'],
- payload['actor']['username']),
+ payload['actor']['nickname']),
'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),
'branch': refname,
- 'project': payload['repository']['project']['name'],
+ 'project': project,
'category': category,
'properties': {'pullrequesturl': payload['pullrequest']['link']}
}
| {"golden_diff": "diff --git a/master/buildbot/www/hooks/bitbucketcloud.py b/master/buildbot/www/hooks/bitbucketcloud.py\n--- a/master/buildbot/www/hooks/bitbucketcloud.py\n+++ b/master/buildbot/www/hooks/bitbucketcloud.py\n@@ -70,7 +70,7 @@\n \n def handle_repo_push(self, payload):\n changes = []\n- project = payload['repository']['project']['name']\n+ project = payload['repository'].get('project', {'name': 'none'})['name']\n repo_url = payload['repository']['links']['self']['href']\n web_url = payload['repository']['links']['html']['href']\n \n@@ -94,7 +94,7 @@\n 'revlink': '{}/commits/{}'.format(web_url, commit_hash),\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n- payload['actor']['username']),\n+ payload['actor']['nickname']),\n 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),\n 'branch': branch,\n 'project': project,\n@@ -139,15 +139,16 @@\n def handle_pullrequest(self, payload, refname, category):\n pr_number = int(payload['pullrequest']['id'])\n repo_url = payload['repository']['links']['self']['href']\n+ project = payload['repository'].get('project', {'name': 'none'})['name']\n change = {\n 'revision': payload['pullrequest']['fromRef']['commit']['hash'],\n 'revlink': payload['pullrequest']['link'],\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n- payload['actor']['username']),\n+ payload['actor']['nickname']),\n 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),\n 'branch': refname,\n- 'project': payload['repository']['project']['name'],\n+ 'project': project,\n 'category': category,\n 'properties': {'pullrequesturl': payload['pullrequest']['link']}\n }\n", "issue": "Bitbucket Cloud web hook returns 500\nStarted three weeks ago (missed it due to vacations), my guess is that Bitbucket changed something. Error log looks like this:\r\n\r\n```\r\n2019-07-02 14:33:40+0300 [_GenericHTTPChannelProtocol,13,18.234.32.226] adding changes from web hook\r\n Traceback (most recent call last):\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py\", line 106, in getAndSubmitChanges\r\n changes, src = yield self.getChanges(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1613, in unwindGenerator\r\n return _cancellableInlineCallbacks(gen)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1529, in _cancellableInlineCallbacks\r\n _inlineCallbacks(None, g, status)\r\n --- <exception caught here> ---\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py\", line 106, in getAndSubmitChanges\r\n changes, src = yield self.getChanges(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/change_hook.py\", line 167, in getChanges\r\n changes, src = yield handler.getChanges(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py\", line 163, in getChanges\r\n return self.process(request)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py\", line 54, in process\r\n return handler(payload)\r\n File \"/home/buildbot/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/bitbucketcloud.py\", line 97, in handle_repo_push\r\n payload['actor']['username']),\r\n builtins.KeyError: 'username'\r\n```\r\n\r\nLast line reveals the culprit.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n# Copyright Mamba Team\n\n\nimport json\n\nfrom twisted.python import log\n\nfrom buildbot.util import bytes2unicode\n\nGIT_BRANCH_REF = \"refs/heads/{}\"\nGIT_MERGE_REF = \"refs/pull-requests/{}/merge\"\nGIT_TAG_REF = \"refs/tags/{}\"\n\n_HEADER_EVENT = b'X-Event-Key'\n\n\nclass BitbucketCloudEventHandler:\n\n def __init__(self, master, options=None):\n if options is None:\n options = {}\n self.master = master\n if not isinstance(options, dict):\n options = {}\n self.options = options\n self._codebase = self.options.get('codebase', None)\n\n def process(self, request):\n payload = self._get_payload(request)\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2unicode(event_type)\n log.msg(\"Processing event {header}: {event}\"\n .format(header=_HEADER_EVENT, event=event_type))\n event_type = event_type.replace(\":\", \"_\")\n handler = getattr(self, 'handle_{}'.format(event_type), None)\n\n if handler is None:\n raise ValueError('Unknown event: {}'.format(event_type))\n\n return handler(payload)\n\n def _get_payload(self, request):\n content = request.content.read()\n content = bytes2unicode(content)\n content_type = request.getHeader(b'Content-Type')\n content_type = bytes2unicode(content_type)\n if content_type.startswith('application/json'):\n payload = json.loads(content)\n else:\n raise ValueError('Unknown content type: {}'\n .format(content_type))\n\n log.msg(\"Payload: {}\".format(payload))\n\n return payload\n\n def handle_repo_push(self, payload):\n changes = []\n project = payload['repository']['project']['name']\n repo_url = payload['repository']['links']['self']['href']\n web_url = payload['repository']['links']['html']['href']\n\n for payload_change in payload['push']['changes']:\n if payload_change['new']:\n age = 'new'\n category = 'push'\n else: # when new is null the ref is deleted\n age = 'old'\n category = 'ref-deleted'\n\n commit_hash = payload_change[age]['target']['hash']\n\n if payload_change[age]['type'] == 'branch':\n branch = GIT_BRANCH_REF.format(payload_change[age]['name'])\n elif payload_change[age]['type'] == 'tag':\n branch = GIT_TAG_REF.format(payload_change[age]['name'])\n\n change = {\n 'revision': commit_hash,\n 'revlink': '{}/commits/{}'.format(web_url, commit_hash),\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n payload['actor']['username']),\n 'comments': 'Bitbucket Cloud commit {}'.format(commit_hash),\n 'branch': branch,\n 'project': project,\n 'category': category\n }\n\n if callable(self._codebase):\n change['codebase'] = self._codebase(payload)\n elif self._codebase is not None:\n change['codebase'] = self._codebase\n\n changes.append(change)\n\n return (changes, payload['repository']['scm'])\n\n def handle_pullrequest_created(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),\n \"pull-created\")\n\n def handle_pullrequest_updated(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_MERGE_REF.format(int(payload['pullrequest']['id'])),\n \"pull-updated\")\n\n def handle_pullrequest_fulfilled(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_BRANCH_REF.format(\n payload['pullrequest']['toRef']['branch']['name']),\n \"pull-fulfilled\")\n\n def handle_pullrequest_rejected(self, payload):\n return self.handle_pullrequest(\n payload,\n GIT_BRANCH_REF.format(\n payload['pullrequest']['fromRef']['branch']['name']),\n \"pull-rejected\")\n\n def handle_pullrequest(self, payload, refname, category):\n pr_number = int(payload['pullrequest']['id'])\n repo_url = payload['repository']['links']['self']['href']\n change = {\n 'revision': payload['pullrequest']['fromRef']['commit']['hash'],\n 'revlink': payload['pullrequest']['link'],\n 'repository': repo_url,\n 'author': '{} <{}>'.format(payload['actor']['display_name'],\n payload['actor']['username']),\n 'comments': 'Bitbucket Cloud Pull Request #{}'.format(pr_number),\n 'branch': refname,\n 'project': payload['repository']['project']['name'],\n 'category': category,\n 'properties': {'pullrequesturl': payload['pullrequest']['link']}\n }\n\n if callable(self._codebase):\n change['codebase'] = self._codebase(payload)\n elif self._codebase is not None:\n change['codebase'] = self._codebase\n\n return [change], payload['repository']['scm']\n\n def getChanges(self, request):\n return self.process(request)\n\n\nbitbucketcloud = BitbucketCloudEventHandler\n", "path": "master/buildbot/www/hooks/bitbucketcloud.py"}]} | 2,779 | 446 |
gh_patches_debug_21591 | rasdani/github-patches | git_diff | microsoft__torchgeo-16 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improvements to VHR-10 documentation
Need better docs describing the format of the dataset.
</issue>
<code>
[start of torchgeo/datasets/nwpu.py]
1 import os
2 from typing import Any, Callable, Optional, Tuple
3
4 from PIL import Image
5 from torchvision.datasets import VisionDataset
6 from torchvision.datasets.utils import (
7 check_integrity,
8 download_file_from_google_drive,
9 download_url,
10 )
11
12
13 class VHR10(VisionDataset):
14 """`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.
15
16 Includes object detection bounding boxes from original paper and instance
17 segmentation masks from follow-up publications. If you use this dataset in your
18 research, please cite the following papers:
19
20 * https://doi.org/10.1016/j.isprsjprs.2014.10.002
21 * https://doi.org/10.1109/IGARSS.2019.8898573
22 * https://doi.org/10.3390/rs12060989
23 """
24
25 base_folder = "vhr10"
26 image_meta = {
27 "file_id": "1--foZ3dV5OCsqXQXT84UeKtrAqc5CkAE",
28 "filename": "NWPU VHR-10 dataset.rar",
29 "md5": "d30a7ff99d92123ebb0b3a14d9102081",
30 }
31 target_meta = {
32 "url": (
33 "https://raw.githubusercontent.com/chaozhong2010/VHR-10_dataset_coco/"
34 "master/NWPU%20VHR-10_dataset_coco/annotations.json"
35 ),
36 "filename": "annotations.json",
37 "md5": "7c76ec50c17a61bb0514050d20f22c08",
38 }
39
40 def __init__(
41 self,
42 root: str,
43 transform: Optional[Callable[[Any], Any]] = None,
44 target_transform: Optional[Callable[[Any], Any]] = None,
45 transforms: Optional[Callable[[Any], Any]] = None,
46 download: bool = False,
47 ) -> None:
48 """Initialize a new VHR-10 dataset instance.
49
50 Parameters:
51 root: root directory where dataset can be found
52 transform: a function/transform that takes in a PIL image and returns a
53 transformed version
54 target_transform: a function/transform that takes in the target and
55 transforms it
56 transforms: a function/transform that takes input sample and its target as
57 entry and returns a transformed version
58 download: if True, download dataset and store it in the root directory
59 """
60 super().__init__(root, transforms, transform, target_transform)
61
62 if download:
63 self.download()
64
65 if not self._check_integrity():
66 raise RuntimeError(
67 "Dataset not found or corrupted. "
68 + "You can use download=True to download it"
69 )
70
71 # Must be installed to parse annotations file
72 from pycocotools.coco import COCO
73
74 self.coco = COCO(
75 os.path.join(
76 self.root,
77 self.base_folder,
78 "NWPU VHR-10 dataset",
79 self.target_meta["filename"],
80 )
81 )
82 self.ids = list(sorted(self.coco.imgs.keys()))
83
84 def __getitem__(self, index: int) -> Tuple[Any, Any]:
85 """Return an index within the dataset.
86
87 Parameters:
88 idx: index to return
89
90 Returns:
91 data and label at that index
92 """
93 id = self.ids[index]
94 image = self._load_image(id)
95 annot = self._load_target(id)
96
97 target = dict(image_id=id, annotations=annot)
98
99 if self.transforms is not None:
100 image, target = self.transforms(image, target)
101
102 return image, target
103
104 def __len__(self) -> int:
105 """Return the number of data points in the dataset.
106
107 Returns:
108 length of the dataset
109 """
110 return len(self.ids)
111
112 def _load_image(self, id: int) -> Image.Image:
113 """Load a single image.
114
115 Parameters:
116 id: unique ID of the image
117
118 Returns:
119 the image
120 """
121 path = self.coco.loadImgs(id)[0]["file_name"]
122 return Image.open(
123 os.path.join(
124 self.root,
125 self.base_folder,
126 "NWPU VHR-10 dataset",
127 "positive image set",
128 path,
129 )
130 ).convert("RGB")
131
132 def _load_target(self, id: int) -> Any:
133 """Load the annotations for a single image.
134
135 Parameters:
136 id: unique ID of the image
137
138 Returns:
139 the annotations
140 """
141 return self.coco.loadAnns(self.coco.getAnnIds(id))
142
143 def _check_integrity(self) -> bool:
144 """Check integrity of dataset.
145
146 Returns:
147 True if dataset MD5s match, else False
148 """
149 image: bool = check_integrity(
150 os.path.join(self.root, self.base_folder, self.image_meta["filename"]),
151 self.image_meta["md5"],
152 )
153 target: bool = check_integrity(
154 os.path.join(
155 self.root,
156 self.base_folder,
157 "NWPU VHR-10 dataset",
158 self.target_meta["filename"],
159 ),
160 self.target_meta["md5"],
161 )
162 return image and target
163
164 def download(self) -> None:
165 """Download the dataset and extract it."""
166
167 if self._check_integrity():
168 print("Files already downloaded and verified")
169 return
170
171 download_file_from_google_drive(
172 self.image_meta["file_id"],
173 os.path.join(self.root, self.base_folder),
174 self.image_meta["filename"],
175 self.image_meta["md5"],
176 )
177
178 # Must be installed to extract RAR file
179 import rarfile
180
181 with rarfile.RarFile(
182 os.path.join(self.root, self.base_folder, self.image_meta["filename"])
183 ) as f:
184 f.extractall(os.path.join(self.root, self.base_folder))
185
186 download_url(
187 self.target_meta["url"],
188 os.path.join(self.root, self.base_folder, "NWPU VHR-10 dataset"),
189 self.target_meta["filename"],
190 self.target_meta["md5"],
191 )
192
[end of torchgeo/datasets/nwpu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchgeo/datasets/nwpu.py b/torchgeo/datasets/nwpu.py
--- a/torchgeo/datasets/nwpu.py
+++ b/torchgeo/datasets/nwpu.py
@@ -11,7 +11,31 @@
class VHR10(VisionDataset):
- """`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.
+ """Northwestern Polytechnical University (NWPU) very-high-resolution ten-class
+ (VHR-10) remote sensing image dataset.
+
+ Consists of 800 VHR optical remote sensing images, where 715 color images were
+ acquired from Google Earth with the spatial resolution ranging from 0.5 to 2 m,
+ and 85 pansharpened color infrared (CIR) images were acquired from Vaihingen data
+ with a spatial resolution of 0.08 m.
+
+ The data set is divided into two sets:
+
+ 1. Positive image set (650 images) which contains at least one target in an image
+ 2. Negative image set (150 images) does not contain any targets
+
+ The positive image set consists of objects from ten classes:
+
+ 1. Airplanes (757)
+ 2. Ships (302)
+ 3. Storage tanks (655)
+ 4. Baseball diamonds (390)
+ 5. Tennis courts (524)
+ 6. Basketball courts (159)
+ 7. Ground track fields (163)
+ 8. Harbors (224)
+ 9. Bridges (124)
+ 10. Vehicles (477)
Includes object detection bounding boxes from original paper and instance
segmentation masks from follow-up publications. If you use this dataset in your
| {"golden_diff": "diff --git a/torchgeo/datasets/nwpu.py b/torchgeo/datasets/nwpu.py\n--- a/torchgeo/datasets/nwpu.py\n+++ b/torchgeo/datasets/nwpu.py\n@@ -11,7 +11,31 @@\n \n \n class VHR10(VisionDataset):\n- \"\"\"`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.\n+ \"\"\"Northwestern Polytechnical University (NWPU) very-high-resolution ten-class\n+ (VHR-10) remote sensing image dataset.\n+\n+ Consists of 800 VHR optical remote sensing images, where 715 color images were\n+ acquired from Google Earth with the spatial resolution ranging from 0.5 to 2 m,\n+ and 85 pansharpened color infrared (CIR) images were acquired from Vaihingen data\n+ with a spatial resolution of 0.08 m.\n+\n+ The data set is divided into two sets:\n+\n+ 1. Positive image set (650 images) which contains at least one target in an image\n+ 2. Negative image set (150 images) does not contain any targets\n+\n+ The positive image set consists of objects from ten classes:\n+\n+ 1. Airplanes (757)\n+ 2. Ships (302)\n+ 3. Storage tanks (655)\n+ 4. Baseball diamonds (390)\n+ 5. Tennis courts (524)\n+ 6. Basketball courts (159)\n+ 7. Ground track fields (163)\n+ 8. Harbors (224)\n+ 9. Bridges (124)\n+ 10. Vehicles (477)\n \n Includes object detection bounding boxes from original paper and instance\n segmentation masks from follow-up publications. If you use this dataset in your\n", "issue": "Improvements to VHR-10 documentation\nNeed better docs describing the format of the dataset.\n", "before_files": [{"content": "import os\nfrom typing import Any, Callable, Optional, Tuple\n\nfrom PIL import Image\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.datasets.utils import (\n check_integrity,\n download_file_from_google_drive,\n download_url,\n)\n\n\nclass VHR10(VisionDataset):\n \"\"\"`NWPU VHR-10 <https://doi.org/10.1016/j.isprsjprs.2014.10.002>`_ Dataset.\n\n Includes object detection bounding boxes from original paper and instance\n segmentation masks from follow-up publications. If you use this dataset in your\n research, please cite the following papers:\n\n * https://doi.org/10.1016/j.isprsjprs.2014.10.002\n * https://doi.org/10.1109/IGARSS.2019.8898573\n * https://doi.org/10.3390/rs12060989\n \"\"\"\n\n base_folder = \"vhr10\"\n image_meta = {\n \"file_id\": \"1--foZ3dV5OCsqXQXT84UeKtrAqc5CkAE\",\n \"filename\": \"NWPU VHR-10 dataset.rar\",\n \"md5\": \"d30a7ff99d92123ebb0b3a14d9102081\",\n }\n target_meta = {\n \"url\": (\n \"https://raw.githubusercontent.com/chaozhong2010/VHR-10_dataset_coco/\"\n \"master/NWPU%20VHR-10_dataset_coco/annotations.json\"\n ),\n \"filename\": \"annotations.json\",\n \"md5\": \"7c76ec50c17a61bb0514050d20f22c08\",\n }\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable[[Any], Any]] = None,\n target_transform: Optional[Callable[[Any], Any]] = None,\n transforms: Optional[Callable[[Any], Any]] = None,\n download: bool = False,\n ) -> None:\n \"\"\"Initialize a new VHR-10 dataset instance.\n\n Parameters:\n root: root directory where dataset can be found\n transform: a function/transform that takes in a PIL image and returns a\n transformed version\n target_transform: a function/transform that takes in the target and\n transforms it\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n \"\"\"\n super().__init__(root, transforms, transform, target_transform)\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n \"Dataset not found or corrupted. \"\n + \"You can use download=True to download it\"\n )\n\n # Must be installed to parse annotations file\n from pycocotools.coco import COCO\n\n self.coco = COCO(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n self.target_meta[\"filename\"],\n )\n )\n self.ids = list(sorted(self.coco.imgs.keys()))\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"Return an index within the dataset.\n\n Parameters:\n idx: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n id = self.ids[index]\n image = self._load_image(id)\n annot = self._load_target(id)\n\n target = dict(image_id=id, annotations=annot)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.ids)\n\n def _load_image(self, id: int) -> Image.Image:\n \"\"\"Load a single image.\n\n Parameters:\n id: unique ID of the image\n\n Returns:\n the image\n \"\"\"\n path = self.coco.loadImgs(id)[0][\"file_name\"]\n return Image.open(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n \"positive image set\",\n path,\n )\n ).convert(\"RGB\")\n\n def _load_target(self, id: int) -> Any:\n \"\"\"Load the annotations for a single image.\n\n Parameters:\n id: unique ID of the image\n\n Returns:\n the annotations\n \"\"\"\n return self.coco.loadAnns(self.coco.getAnnIds(id))\n\n def _check_integrity(self) -> bool:\n \"\"\"Check integrity of dataset.\n\n Returns:\n True if dataset MD5s match, else False\n \"\"\"\n image: bool = check_integrity(\n os.path.join(self.root, self.base_folder, self.image_meta[\"filename\"]),\n self.image_meta[\"md5\"],\n )\n target: bool = check_integrity(\n os.path.join(\n self.root,\n self.base_folder,\n \"NWPU VHR-10 dataset\",\n self.target_meta[\"filename\"],\n ),\n self.target_meta[\"md5\"],\n )\n return image and target\n\n def download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n download_file_from_google_drive(\n self.image_meta[\"file_id\"],\n os.path.join(self.root, self.base_folder),\n self.image_meta[\"filename\"],\n self.image_meta[\"md5\"],\n )\n\n # Must be installed to extract RAR file\n import rarfile\n\n with rarfile.RarFile(\n os.path.join(self.root, self.base_folder, self.image_meta[\"filename\"])\n ) as f:\n f.extractall(os.path.join(self.root, self.base_folder))\n\n download_url(\n self.target_meta[\"url\"],\n os.path.join(self.root, self.base_folder, \"NWPU VHR-10 dataset\"),\n self.target_meta[\"filename\"],\n self.target_meta[\"md5\"],\n )\n", "path": "torchgeo/datasets/nwpu.py"}]} | 2,447 | 456 |
gh_patches_debug_14225 | rasdani/github-patches | git_diff | rucio__rucio-6130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Keyword `type` gets overwritten
Motivation
----------
The keyword `type` in identities.py gets overwritten, this is a bad practice.
https://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134
Modification
------------
Use another variable name.
Keyword `type` gets overwritten
Motivation
----------
The keyword `type` in identities.py gets overwritten, this is a bad practice.
https://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134
Modification
------------
Use another variable name.
</issue>
<code>
[start of lib/rucio/web/rest/flaskapi/v1/identities.py]
1 # -*- coding: utf-8 -*-
2 # Copyright European Organization for Nuclear Research (CERN) since 2012
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from flask import Flask, request, jsonify
17
18 from rucio.api.identity import add_identity, add_account_identity, list_accounts_for_identity
19 from rucio.web.rest.flaskapi.v1.common import response_headers, check_accept_header_wrapper_flask, \
20 ErrorHandlingMethodView
21 from rucio.web.rest.flaskapi.authenticated_bp import AuthenticatedBlueprint
22
23
24 class UserPass(ErrorHandlingMethodView):
25 """ Manage a username/password identity for an account. """
26
27 def put(self, account):
28 """
29 ---
30 summary: Create UserPass identity
31 description: Creates a new UserPass identity and maps it to an account.
32 tags:
33 - Identity
34 parameters:
35 - name: account
36 in: path
37 description: The account for the identity.
38 schema:
39 type: string
40 style: simple
41 - name: X-Rucio-Username
42 in: query
43 description: Username for the identity.
44 schema:
45 type: string
46 style: simple
47 required: true
48 - name: X-Rucio-Password
49 in: query
50 description: The password for the identity.
51 schema:
52 type: string
53 style: simple
54 required: true
55 - name: X-Rucio-Email
56 in: query
57 description: The email for the identity.
58 schema:
59 type: string
60 style: simple
61 required: false
62 responses:
63 201:
64 description: OK
65 content:
66 application/json:
67 schema:
68 type: string
69 enum: ['Created']
70 401:
71 description: Invalid Auth Token
72 400:
73 description: Missing username or password.
74 """
75 username = request.headers.get('X-Rucio-Username', default=None)
76 password = request.headers.get('X-Rucio-Password', default=None)
77 email = request.headers.get('X-Rucio-Email', default=None)
78
79 if not username or not password:
80 return 'Username and Password must be set.', 400
81
82 add_identity(username, 'userpass', email, password)
83
84 add_account_identity(
85 identity_key=username,
86 id_type='userpass',
87 account=account,
88 email=email,
89 password=password,
90 issuer=request.environ.get('issuer'),
91 vo=request.environ.get('vo'),
92 )
93
94 return 'Created', 201
95
96
97 class X509(ErrorHandlingMethodView):
98 """ Manage an x509 identity for an account. """
99
100 def put(self, account):
101 """
102 ---
103 summary: Create X509 identity
104 description: Creates a new X509 identity and maps it to an account.
105 tags:
106 - Identity
107 parameters:
108 - name: account
109 in: path
110 description: The account for the identity.
111 schema:
112 type: string
113 style: simple
114 - name: X-Rucio-Email
115 in: query
116 description: The email for the identity.
117 schema:
118 type: string
119 style: simple
120 required: false
121 responses:
122 201:
123 description: OK
124 content:
125 application/json:
126 schema:
127 type: string
128 enum: ['Created']
129 401:
130 description: Invalid Auth Token
131 """
132 dn = request.environ.get('SSL_CLIENT_S_DN')
133 email = request.headers.get('X-Rucio-Email', default=None)
134
135 add_identity(dn, 'x509', email=email)
136 add_account_identity(
137 identity_key=dn,
138 id_type='x509',
139 account=account,
140 email=email,
141 issuer=request.environ.get('issuer'),
142 vo=request.environ.get('vo'),
143 )
144
145 return 'Created', 201
146
147
148 class GSS(ErrorHandlingMethodView):
149 """ Manage a GSS identity for an account. """
150
151 def put(self, account):
152 """
153 ---
154 summary: Create GSS identity
155 description: Creates a new GSS identity and maps it to an account.
156 tags:
157 - Identity
158 parameters:
159 - name: account
160 in: path
161 description: The account for the identity.
162 schema:
163 type: string
164 style: simple
165 - name: X-Rucio-Email
166 in: query
167 description: The email for the identity.
168 schema:
169 type: string
170 style: simple
171 required: false
172 responses:
173 201:
174 description: OK
175 content:
176 application/json:
177 schema:
178 type: string
179 enum: ['Created']
180 401:
181 description: Invalid Auth Token
182 """
183 gsscred = request.environ.get('REMOTE_USER')
184 email = request.headers.get('X-Rucio-Email', default=None)
185
186 add_identity(gsscred, 'gss', email=email)
187 add_account_identity(
188 identity_key=gsscred,
189 id_type='gss',
190 account=account,
191 email=email,
192 issuer=request.environ.get('issuer'),
193 vo=request.environ.get('vo'),
194 )
195
196 return 'Created', 201
197
198
199 class Accounts(ErrorHandlingMethodView):
200 """ Retrieve list of accounts mapped to an identity. """
201
202 @check_accept_header_wrapper_flask(['application/json'])
203 def get(self, identity_key, type):
204 """
205 ---
206 summary: List
207 description: List all identities mapped to an account.
208 tags:
209 - Identity
210 parameters:
211 - name: identity_key
212 in: path
213 description: Identity string.
214 schema:
215 type: string
216 style: simple
217 - name: type
218 in: path
219 description: Identity type.
220 schema:
221 type: string
222 style: simple
223 required: false
224 responses:
225 200:
226 description: OK
227 content:
228 application/json:
229 schema:
230 type: array
231 items:
232 type: object
233 description: Account for the identity.
234 401:
235 description: Invalid Auth Token
236 401:
237 description: Not acceptable
238 """
239 accounts = list_accounts_for_identity(identity_key, type)
240 return jsonify(accounts)
241
242
243 def blueprint():
244 bp = AuthenticatedBlueprint('identities', __name__, url_prefix='/identities')
245
246 userpass_view = UserPass.as_view('userpass')
247 bp.add_url_rule('/<account>/userpass', view_func=userpass_view, methods=['put', ])
248 x509_view = X509.as_view('x509')
249 bp.add_url_rule('/<account>/x509', view_func=x509_view, methods=['put', ])
250 gss_view = GSS.as_view('gss')
251 bp.add_url_rule('/<account>/gss', view_func=gss_view, methods=['put', ])
252 accounts_view = Accounts.as_view('accounts')
253 bp.add_url_rule('/<identity_key>/<type>/accounts', view_func=accounts_view, methods=['get', ])
254
255 bp.after_request(response_headers)
256 return bp
257
258
259 def make_doc():
260 """ Only used for sphinx documentation """
261 doc_app = Flask(__name__)
262 doc_app.register_blueprint(blueprint())
263 return doc_app
264
[end of lib/rucio/web/rest/flaskapi/v1/identities.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/web/rest/flaskapi/v1/identities.py b/lib/rucio/web/rest/flaskapi/v1/identities.py
--- a/lib/rucio/web/rest/flaskapi/v1/identities.py
+++ b/lib/rucio/web/rest/flaskapi/v1/identities.py
@@ -200,7 +200,7 @@
""" Retrieve list of accounts mapped to an identity. """
@check_accept_header_wrapper_flask(['application/json'])
- def get(self, identity_key, type):
+ def get(self, identity_key, type_):
"""
---
summary: List
@@ -236,7 +236,7 @@
401:
description: Not acceptable
"""
- accounts = list_accounts_for_identity(identity_key, type)
+ accounts = list_accounts_for_identity(identity_key, type_)
return jsonify(accounts)
| {"golden_diff": "diff --git a/lib/rucio/web/rest/flaskapi/v1/identities.py b/lib/rucio/web/rest/flaskapi/v1/identities.py\n--- a/lib/rucio/web/rest/flaskapi/v1/identities.py\n+++ b/lib/rucio/web/rest/flaskapi/v1/identities.py\n@@ -200,7 +200,7 @@\n \"\"\" Retrieve list of accounts mapped to an identity. \"\"\"\n \n @check_accept_header_wrapper_flask(['application/json'])\n- def get(self, identity_key, type):\n+ def get(self, identity_key, type_):\n \"\"\"\n ---\n summary: List\n@@ -236,7 +236,7 @@\n 401:\n description: Not acceptable\n \"\"\"\n- accounts = list_accounts_for_identity(identity_key, type)\n+ accounts = list_accounts_for_identity(identity_key, type_)\n return jsonify(accounts)\n", "issue": "Keyword `type` gets overwritten\nMotivation\r\n----------\r\nThe keyword `type` in identities.py gets overwritten, this is a bad practice.\r\nhttps://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134\r\n\r\nModification\r\n------------\r\nUse another variable name.\nKeyword `type` gets overwritten\nMotivation\r\n----------\r\nThe keyword `type` in identities.py gets overwritten, this is a bad practice.\r\nhttps://github.com/rucio/rucio/blob/1b68bbf6d44013b193db3f668b1e4b608e488bce/lib/rucio/web/rest/flaskapi/v1/identities.py#L134\r\n\r\nModification\r\n------------\r\nUse another variable name.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright European Organization for Nuclear Research (CERN) since 2012\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom flask import Flask, request, jsonify\n\nfrom rucio.api.identity import add_identity, add_account_identity, list_accounts_for_identity\nfrom rucio.web.rest.flaskapi.v1.common import response_headers, check_accept_header_wrapper_flask, \\\n ErrorHandlingMethodView\nfrom rucio.web.rest.flaskapi.authenticated_bp import AuthenticatedBlueprint\n\n\nclass UserPass(ErrorHandlingMethodView):\n \"\"\" Manage a username/password identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create UserPass identity\n description: Creates a new UserPass identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Username\n in: query\n description: Username for the identity.\n schema:\n type: string\n style: simple\n required: true\n - name: X-Rucio-Password\n in: query\n description: The password for the identity.\n schema:\n type: string\n style: simple\n required: true\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n 400:\n description: Missing username or password.\n \"\"\"\n username = request.headers.get('X-Rucio-Username', default=None)\n password = request.headers.get('X-Rucio-Password', default=None)\n email = request.headers.get('X-Rucio-Email', default=None)\n\n if not username or not password:\n return 'Username and Password must be set.', 400\n\n add_identity(username, 'userpass', email, password)\n\n add_account_identity(\n identity_key=username,\n id_type='userpass',\n account=account,\n email=email,\n password=password,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass X509(ErrorHandlingMethodView):\n \"\"\" Manage an x509 identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create X509 identity\n description: Creates a new X509 identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n \"\"\"\n dn = request.environ.get('SSL_CLIENT_S_DN')\n email = request.headers.get('X-Rucio-Email', default=None)\n\n add_identity(dn, 'x509', email=email)\n add_account_identity(\n identity_key=dn,\n id_type='x509',\n account=account,\n email=email,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass GSS(ErrorHandlingMethodView):\n \"\"\" Manage a GSS identity for an account. \"\"\"\n\n def put(self, account):\n \"\"\"\n ---\n summary: Create GSS identity\n description: Creates a new GSS identity and maps it to an account.\n tags:\n - Identity\n parameters:\n - name: account\n in: path\n description: The account for the identity.\n schema:\n type: string\n style: simple\n - name: X-Rucio-Email\n in: query\n description: The email for the identity.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 201:\n description: OK\n content:\n application/json:\n schema:\n type: string\n enum: ['Created']\n 401:\n description: Invalid Auth Token\n \"\"\"\n gsscred = request.environ.get('REMOTE_USER')\n email = request.headers.get('X-Rucio-Email', default=None)\n\n add_identity(gsscred, 'gss', email=email)\n add_account_identity(\n identity_key=gsscred,\n id_type='gss',\n account=account,\n email=email,\n issuer=request.environ.get('issuer'),\n vo=request.environ.get('vo'),\n )\n\n return 'Created', 201\n\n\nclass Accounts(ErrorHandlingMethodView):\n \"\"\" Retrieve list of accounts mapped to an identity. \"\"\"\n\n @check_accept_header_wrapper_flask(['application/json'])\n def get(self, identity_key, type):\n \"\"\"\n ---\n summary: List\n description: List all identities mapped to an account.\n tags:\n - Identity\n parameters:\n - name: identity_key\n in: path\n description: Identity string.\n schema:\n type: string\n style: simple\n - name: type\n in: path\n description: Identity type.\n schema:\n type: string\n style: simple\n required: false\n responses:\n 200:\n description: OK\n content:\n application/json:\n schema:\n type: array\n items:\n type: object\n description: Account for the identity.\n 401:\n description: Invalid Auth Token\n 401:\n description: Not acceptable\n \"\"\"\n accounts = list_accounts_for_identity(identity_key, type)\n return jsonify(accounts)\n\n\ndef blueprint():\n bp = AuthenticatedBlueprint('identities', __name__, url_prefix='/identities')\n\n userpass_view = UserPass.as_view('userpass')\n bp.add_url_rule('/<account>/userpass', view_func=userpass_view, methods=['put', ])\n x509_view = X509.as_view('x509')\n bp.add_url_rule('/<account>/x509', view_func=x509_view, methods=['put', ])\n gss_view = GSS.as_view('gss')\n bp.add_url_rule('/<account>/gss', view_func=gss_view, methods=['put', ])\n accounts_view = Accounts.as_view('accounts')\n bp.add_url_rule('/<identity_key>/<type>/accounts', view_func=accounts_view, methods=['get', ])\n\n bp.after_request(response_headers)\n return bp\n\n\ndef make_doc():\n \"\"\" Only used for sphinx documentation \"\"\"\n doc_app = Flask(__name__)\n doc_app.register_blueprint(blueprint())\n return doc_app\n", "path": "lib/rucio/web/rest/flaskapi/v1/identities.py"}]} | 3,175 | 202 |
gh_patches_debug_51093 | rasdani/github-patches | git_diff | ivy-llc__ivy-16195 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
argmax
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/tensor.py]
1 # local
2 import ivy
3 import ivy.functional.frontends.paddle as paddle_frontend
4 from ivy.functional.frontends.paddle.func_wrapper import (
5 _to_ivy_array,
6 )
7 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
8
9
10 class Tensor:
11 def __init__(self, array, dtype=None, place="cpu", stop_gradient=True):
12 self._ivy_array = (
13 ivy.array(array, dtype=dtype, device=place)
14 if not isinstance(array, ivy.Array)
15 else array
16 )
17 self._dtype = dtype
18 self._place = place
19 self._stop_gradient = stop_gradient
20
21 def __repr__(self):
22 return (
23 str(self._ivy_array.__repr__())
24 .replace("ivy.array", "ivy.frontends.paddle.Tensor")
25 .replace("dev", "place")
26 )
27
28 # Properties #
29 # ---------- #
30
31 @property
32 def ivy_array(self):
33 return self._ivy_array
34
35 @property
36 def place(self):
37 return self.ivy_array.device
38
39 @property
40 def dtype(self):
41 return self._ivy_array.dtype
42
43 @property
44 def shape(self):
45 return self._ivy_array.shape
46
47 @property
48 def ndim(self):
49 return self.dim()
50
51 # Setters #
52 # --------#
53
54 @ivy_array.setter
55 def ivy_array(self, array):
56 self._ivy_array = (
57 ivy.array(array) if not isinstance(array, ivy.Array) else array
58 )
59
60 # Special Methods #
61 # -------------------#
62
63 def __getitem__(self, item):
64 ivy_args = ivy.nested_map([self, item], _to_ivy_array)
65 ret = ivy.get_item(*ivy_args)
66 return paddle_frontend.Tensor(ret)
67
68 def __setitem__(self, item, value):
69 item, value = ivy.nested_map([item, value], _to_ivy_array)
70 self.ivy_array[item] = value
71
72 def __iter__(self):
73 if self.ndim == 0:
74 raise TypeError("iteration over a 0-d tensor not supported")
75 for i in range(self.shape[0]):
76 yield self[i]
77
78 # Instance Methods #
79 # ---------------- #
80
81 def reshape(self, *args, shape=None):
82 if args and shape:
83 raise TypeError("reshape() got multiple values for argument 'shape'")
84 if shape is not None:
85 return paddle_frontend.reshape(self._ivy_array, shape)
86 if args:
87 if isinstance(args[0], (tuple, list)):
88 shape = args[0]
89 return paddle_frontend.reshape(self._ivy_array, shape)
90 else:
91 return paddle_frontend.reshape(self._ivy_array, args)
92 return paddle_frontend.reshape(self._ivy_array)
93
94 def dim(self):
95 return self.ivy_array.ndim
96
97 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
98 def abs(self):
99 return paddle_frontend.abs(self)
100
101 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
102 def asinh(self, name=None):
103 return ivy.asinh(self._ivy_array)
104
105 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
106 def asin(self, name=None):
107 return ivy.asin(self._ivy_array)
108
109 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
110 def log(self, name=None):
111 return ivy.log(self._ivy_array)
112
113 @with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle")
114 def sin(self, name=None):
115 return ivy.sin(self._ivy_array)
116
117
[end of ivy/functional/frontends/paddle/tensor/tensor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py
--- a/ivy/functional/frontends/paddle/tensor/tensor.py
+++ b/ivy/functional/frontends/paddle/tensor/tensor.py
@@ -114,3 +114,6 @@
def sin(self, name=None):
return ivy.sin(self._ivy_array)
+ @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle")
+ def argmax(self, axis=None, keepdim=False, dtype=None, name=None):
+ return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/tensor.py b/ivy/functional/frontends/paddle/tensor/tensor.py\n--- a/ivy/functional/frontends/paddle/tensor/tensor.py\n+++ b/ivy/functional/frontends/paddle/tensor/tensor.py\n@@ -114,3 +114,6 @@\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n \r\n+ @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n+ def argmax(self, axis=None, keepdim=False, dtype=None, name=None):\r\n+ return ivy.argmax(self._ivy_array, axis=axis, keepdims=keepdim, dtype=dtype)\n", "issue": "argmax\n\n", "before_files": [{"content": "# local\r\nimport ivy\r\nimport ivy.functional.frontends.paddle as paddle_frontend\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n _to_ivy_array,\r\n)\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\n\r\n\r\nclass Tensor:\r\n def __init__(self, array, dtype=None, place=\"cpu\", stop_gradient=True):\r\n self._ivy_array = (\r\n ivy.array(array, dtype=dtype, device=place)\r\n if not isinstance(array, ivy.Array)\r\n else array\r\n )\r\n self._dtype = dtype\r\n self._place = place\r\n self._stop_gradient = stop_gradient\r\n\r\n def __repr__(self):\r\n return (\r\n str(self._ivy_array.__repr__())\r\n .replace(\"ivy.array\", \"ivy.frontends.paddle.Tensor\")\r\n .replace(\"dev\", \"place\")\r\n )\r\n\r\n # Properties #\r\n # ---------- #\r\n\r\n @property\r\n def ivy_array(self):\r\n return self._ivy_array\r\n\r\n @property\r\n def place(self):\r\n return self.ivy_array.device\r\n\r\n @property\r\n def dtype(self):\r\n return self._ivy_array.dtype\r\n\r\n @property\r\n def shape(self):\r\n return self._ivy_array.shape\r\n\r\n @property\r\n def ndim(self):\r\n return self.dim()\r\n\r\n # Setters #\r\n # --------#\r\n\r\n @ivy_array.setter\r\n def ivy_array(self, array):\r\n self._ivy_array = (\r\n ivy.array(array) if not isinstance(array, ivy.Array) else array\r\n )\r\n\r\n # Special Methods #\r\n # -------------------#\r\n\r\n def __getitem__(self, item):\r\n ivy_args = ivy.nested_map([self, item], _to_ivy_array)\r\n ret = ivy.get_item(*ivy_args)\r\n return paddle_frontend.Tensor(ret)\r\n\r\n def __setitem__(self, item, value):\r\n item, value = ivy.nested_map([item, value], _to_ivy_array)\r\n self.ivy_array[item] = value\r\n\r\n def __iter__(self):\r\n if self.ndim == 0:\r\n raise TypeError(\"iteration over a 0-d tensor not supported\")\r\n for i in range(self.shape[0]):\r\n yield self[i]\r\n\r\n # Instance Methods #\r\n # ---------------- #\r\n\r\n def reshape(self, *args, shape=None):\r\n if args and shape:\r\n raise TypeError(\"reshape() got multiple values for argument 'shape'\")\r\n if shape is not None:\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n if args:\r\n if isinstance(args[0], (tuple, list)):\r\n shape = args[0]\r\n return paddle_frontend.reshape(self._ivy_array, shape)\r\n else:\r\n return paddle_frontend.reshape(self._ivy_array, args)\r\n return paddle_frontend.reshape(self._ivy_array)\r\n\r\n def dim(self):\r\n return self.ivy_array.ndim\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def abs(self):\r\n return paddle_frontend.abs(self)\r\n\r\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n def asinh(self, name=None):\r\n return ivy.asinh(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def asin(self, name=None):\r\n return ivy.asin(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def log(self, name=None):\r\n return ivy.log(self._ivy_array)\r\n\r\n @with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\r\n def sin(self, name=None):\r\n return ivy.sin(self._ivy_array)\r\n\r\n", "path": "ivy/functional/frontends/paddle/tensor/tensor.py"}]} | 1,669 | 175 |
gh_patches_debug_13991 | rasdani/github-patches | git_diff | pyodide__pyodide-2099 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.19 release
Opening an issue to track the 0.19 release.
I think we are fairly ready to make the 0.19.0 release. Ideally, the following would be nice to do before the 0.19 alpha release (but not critical),
- update browser versions used for testing https://github.com/pyodide/pyodide/pull/1952
- which should hopefully unblock emscripten update https://github.com/pyodide/pyodide/pull/2035
- run the update `meta.yaml` script for all pure python packages
For instance, maybe we could plan to have a
- A release candidate 0.19.0rc0: 2021/12/22 or 23 -> to make it easier still use the main branch up to the final release
- A final release 0.19.0: 2021/12/30
?
cc @hoodmane
</issue>
<code>
[start of src/py/pyodide/__init__.py]
1 # When the pyodide package is imported, both the js and the pyodide_js modules
2 # will be available to import from. Not all functions in pyodide_js will work
3 # until after pyodide is first imported, imported functions from pyodide_js
4 # should not be used at import time. It is fine to use js functions at import
5 # time.
6 #
7 # All pure Python code that does not require js or pyodide_js should go in
8 # the _pyodide package.
9 #
10 # This package is imported by the test suite as well, and currently we don't use
11 # pytest mocks for js or pyodide_js, so make sure to test "if IN_BROWSER" before
12 # importing from these.
13
14 from ._core import (
15 JsProxy,
16 JsException,
17 create_once_callable,
18 create_proxy,
19 to_js,
20 IN_BROWSER,
21 ConversionError,
22 destroy_proxies,
23 )
24 from _pyodide._base import (
25 eval_code,
26 eval_code_async,
27 find_imports,
28 CodeRunner,
29 should_quiet,
30 )
31 from .http import open_url
32 from . import _state # noqa
33
34 from _pyodide._importhook import register_js_module, unregister_js_module
35
36 if IN_BROWSER:
37 import asyncio
38 from .webloop import WebLoopPolicy
39
40 asyncio.set_event_loop_policy(WebLoopPolicy())
41
42
43 __version__ = "0.19.0dev0"
44
45 __all__ = [
46 "open_url",
47 "eval_code",
48 "eval_code_async",
49 "CodeRunner",
50 "find_imports",
51 "JsProxy",
52 "JsException",
53 "to_js",
54 "register_js_module",
55 "unregister_js_module",
56 "create_once_callable",
57 "create_proxy",
58 "console",
59 "should_quiet",
60 "ConversionError",
61 "destroy_proxies",
62 ]
63
[end of src/py/pyodide/__init__.py]
[start of docs/conf.py]
1 # -*- coding: utf-8 -*-
2 # Configuration file for the Sphinx documentation builder.
3
4 # -- Path setup --------------------------------------------------------------
5
6 import os
7 import sys
8 from typing import Dict, Any
9 import pathlib
10 import subprocess
11
12 base_dir = pathlib.Path(__file__).resolve().parent.parent
13 path_dirs = [
14 str(base_dir),
15 str(base_dir / "pyodide-build"),
16 str(base_dir / "docs/sphinx_pyodide"),
17 str(base_dir / "src/py"),
18 str(base_dir / "packages/micropip/src"),
19 ]
20 sys.path = path_dirs + sys.path
21
22 # -- Project information -----------------------------------------------------
23
24 project = "Pyodide"
25 copyright = "2019-2021, Pyodide contributors and Mozilla"
26
27 import pyodide
28 import micropip # noqa
29
30 # We hacked it so that autodoc will look for submodules, but only if we import
31 # them here. TODO: look these up in the source directory?
32 import pyodide.console
33 import pyodide.http
34 import pyodide.webloop
35
36 # The full version, including alpha/beta/rc tags.
37 release = version = pyodide.__version__
38
39
40 # -- General configuration ---------------------------------------------------
41
42 # If your documentation needs a minimal Sphinx version, state it here.
43 #
44 # needs_sphinx = '1.0'
45
46 extensions = [
47 "sphinx.ext.autodoc",
48 "sphinx.ext.autosummary",
49 "sphinxcontrib.napoleon",
50 "myst_parser",
51 "sphinx_js",
52 "autodocsumm",
53 "sphinx_panels",
54 "sphinx_pyodide",
55 "sphinx_argparse_cli",
56 # "versionwarning.extension",
57 "sphinx_issues",
58 ]
59
60 myst_enable_extensions = ["substitution"]
61 js_source_path = ["../src/js", "../src/core"]
62 jsdoc_config_path = "./jsdoc_conf.json"
63 root_for_relative_js_paths = "../src/"
64 issues_github_path = "pyodide/pyodide"
65
66 versionwarning_messages = {
67 "latest": (
68 "This is the development version of the documentation. ",
69 'See <a href="https://pyodide.org/">here</a> for latest stable '
70 "documentation. Please do not use Pyodide with non "
71 "versioned (`dev`) URLs from the CDN for deployed applications!",
72 )
73 }
74
75 autosummary_generate = True
76 autodoc_default_flags = ["members", "inherited-members"]
77
78 # Add any paths that contain templates here, relative to this directory.
79 templates_path = ["_templates"]
80
81 # The suffix(es) of source filenames.
82 source_suffix = [".rst", ".md"]
83
84 # The master toctree document.
85 master_doc = "index"
86
87 # The language for content autogenerated by Sphinx.
88 language = None
89
90 # List of patterns, relative to source directory, that match files and
91 # directories to ignore when looking for source files.
92 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "README.md"]
93
94 # The name of the Pygments (syntax highlighting) style to use.
95 pygments_style = None
96
97 # -- Options for HTML output -------------------------------------------------
98
99 # The theme to use for HTML and HTML Help pages. See the documentation for
100 # a list of builtin themes.
101 #
102 html_theme = "sphinx_book_theme"
103 html_logo = "_static/img/pyodide-logo.png"
104 html_title = f"Version {version}"
105
106 # theme-specific options
107 html_theme_options: Dict[str, Any] = {}
108
109 # paths that contain custom static files (such as style sheets)
110 html_static_path = ["_static"]
111
112
113 html_css_files = [
114 "css/pyodide.css",
115 ]
116
117 # Custom sidebar templates, must be a dictionary that maps document names
118 # to template names.
119 # html_sidebars = {}
120
121 # -- Options for HTMLHelp output ---------------------------------------------
122
123 # Output file base name for HTML help builder.
124 htmlhelp_basename = "Pyodidedoc"
125
126 # A list of files that should not be packed into the epub file.
127 epub_exclude_files = ["search.html"]
128
129 if "READTHEDOCS" in os.environ:
130 env = {"PYODIDE_BASE_URL": "https://cdn.jsdelivr.net/pyodide/dev/full/"}
131 os.makedirs("_build/html", exist_ok=True)
132 res = subprocess.check_output(
133 ["make", "-C", "..", "docs/_build/html/console.html"],
134 env=env,
135 stderr=subprocess.STDOUT,
136 encoding="utf-8",
137 )
138 print(res)
139
140
141 # Prevent API docs for webloop methods: they are the same as for base event loop
142 # and it clutters api docs too much
143
144
145 def delete_attrs(cls):
146 for name in dir(cls):
147 if not name.startswith("_"):
148 try:
149 delattr(cls, name)
150 except:
151 pass
152
153
154 delete_attrs(pyodide.webloop.WebLoop)
155 delete_attrs(pyodide.webloop.WebLoopPolicy)
156 delete_attrs(pyodide.console.PyodideConsole)
157
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -127,7 +127,7 @@
epub_exclude_files = ["search.html"]
if "READTHEDOCS" in os.environ:
- env = {"PYODIDE_BASE_URL": "https://cdn.jsdelivr.net/pyodide/dev/full/"}
+ env = {"PYODIDE_BASE_URL": "https://cdn.jsdelivr.net/pyodide/v0.19.0/full/"}
os.makedirs("_build/html", exist_ok=True)
res = subprocess.check_output(
["make", "-C", "..", "docs/_build/html/console.html"],
diff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py
--- a/src/py/pyodide/__init__.py
+++ b/src/py/pyodide/__init__.py
@@ -40,7 +40,7 @@
asyncio.set_event_loop_policy(WebLoopPolicy())
-__version__ = "0.19.0dev0"
+__version__ = "0.19.0"
__all__ = [
"open_url",
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -127,7 +127,7 @@\n epub_exclude_files = [\"search.html\"]\n \n if \"READTHEDOCS\" in os.environ:\n- env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/dev/full/\"}\n+ env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/v0.19.0/full/\"}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\ndiff --git a/src/py/pyodide/__init__.py b/src/py/pyodide/__init__.py\n--- a/src/py/pyodide/__init__.py\n+++ b/src/py/pyodide/__init__.py\n@@ -40,7 +40,7 @@\n asyncio.set_event_loop_policy(WebLoopPolicy())\n \n \n-__version__ = \"0.19.0dev0\"\n+__version__ = \"0.19.0\"\n \n __all__ = [\n \"open_url\",\n", "issue": "0.19 release\nOpening an issue to track the 0.19 release.\r\n\r\nI think we are fairly ready to make the 0.19.0 release. Ideally, the following would be nice to do before the 0.19 alpha release (but not critical),\r\n - update browser versions used for testing https://github.com/pyodide/pyodide/pull/1952 \r\n - which should hopefully unblock emscripten update https://github.com/pyodide/pyodide/pull/2035\r\n - run the update `meta.yaml` script for all pure python packages \r\n\r\nFor instance, maybe we could plan to have a\r\n - A release candidate 0.19.0rc0: 2021/12/22 or 23 -> to make it easier still use the main branch up to the final release\r\n - A final release 0.19.0: 2021/12/30\r\n \r\n?\r\n\r\ncc @hoodmane \n", "before_files": [{"content": "# When the pyodide package is imported, both the js and the pyodide_js modules\n# will be available to import from. Not all functions in pyodide_js will work\n# until after pyodide is first imported, imported functions from pyodide_js\n# should not be used at import time. It is fine to use js functions at import\n# time.\n#\n# All pure Python code that does not require js or pyodide_js should go in\n# the _pyodide package.\n#\n# This package is imported by the test suite as well, and currently we don't use\n# pytest mocks for js or pyodide_js, so make sure to test \"if IN_BROWSER\" before\n# importing from these.\n\nfrom ._core import (\n JsProxy,\n JsException,\n create_once_callable,\n create_proxy,\n to_js,\n IN_BROWSER,\n ConversionError,\n destroy_proxies,\n)\nfrom _pyodide._base import (\n eval_code,\n eval_code_async,\n find_imports,\n CodeRunner,\n should_quiet,\n)\nfrom .http import open_url\nfrom . import _state # noqa\n\nfrom _pyodide._importhook import register_js_module, unregister_js_module\n\nif IN_BROWSER:\n import asyncio\n from .webloop import WebLoopPolicy\n\n asyncio.set_event_loop_policy(WebLoopPolicy())\n\n\n__version__ = \"0.19.0dev0\"\n\n__all__ = [\n \"open_url\",\n \"eval_code\",\n \"eval_code_async\",\n \"CodeRunner\",\n \"find_imports\",\n \"JsProxy\",\n \"JsException\",\n \"to_js\",\n \"register_js_module\",\n \"unregister_js_module\",\n \"create_once_callable\",\n \"create_proxy\",\n \"console\",\n \"should_quiet\",\n \"ConversionError\",\n \"destroy_proxies\",\n]\n", "path": "src/py/pyodide/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\nfrom typing import Dict, Any\nimport pathlib\nimport subprocess\n\nbase_dir = pathlib.Path(__file__).resolve().parent.parent\npath_dirs = [\n str(base_dir),\n str(base_dir / \"pyodide-build\"),\n str(base_dir / \"docs/sphinx_pyodide\"),\n str(base_dir / \"src/py\"),\n str(base_dir / \"packages/micropip/src\"),\n]\nsys.path = path_dirs + sys.path\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Pyodide\"\ncopyright = \"2019-2021, Pyodide contributors and Mozilla\"\n\nimport pyodide\nimport micropip # noqa\n\n# We hacked it so that autodoc will look for submodules, but only if we import\n# them here. TODO: look these up in the source directory?\nimport pyodide.console\nimport pyodide.http\nimport pyodide.webloop\n\n# The full version, including alpha/beta/rc tags.\nrelease = version = pyodide.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinxcontrib.napoleon\",\n \"myst_parser\",\n \"sphinx_js\",\n \"autodocsumm\",\n \"sphinx_panels\",\n \"sphinx_pyodide\",\n \"sphinx_argparse_cli\",\n # \"versionwarning.extension\",\n \"sphinx_issues\",\n]\n\nmyst_enable_extensions = [\"substitution\"]\njs_source_path = [\"../src/js\", \"../src/core\"]\njsdoc_config_path = \"./jsdoc_conf.json\"\nroot_for_relative_js_paths = \"../src/\"\nissues_github_path = \"pyodide/pyodide\"\n\nversionwarning_messages = {\n \"latest\": (\n \"This is the development version of the documentation. \",\n 'See <a href=\"https://pyodide.org/\">here</a> for latest stable '\n \"documentation. Please do not use Pyodide with non \"\n \"versioned (`dev`) URLs from the CDN for deployed applications!\",\n )\n}\n\nautosummary_generate = True\nautodoc_default_flags = [\"members\", \"inherited-members\"]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\nsource_suffix = [\".rst\", \".md\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"README.md\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = None\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_book_theme\"\nhtml_logo = \"_static/img/pyodide-logo.png\"\nhtml_title = f\"Version {version}\"\n\n# theme-specific options\nhtml_theme_options: Dict[str, Any] = {}\n\n# paths that contain custom static files (such as style sheets)\nhtml_static_path = [\"_static\"]\n\n\nhtml_css_files = [\n \"css/pyodide.css\",\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n# html_sidebars = {}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyodidedoc\"\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\nif \"READTHEDOCS\" in os.environ:\n env = {\"PYODIDE_BASE_URL\": \"https://cdn.jsdelivr.net/pyodide/dev/full/\"}\n os.makedirs(\"_build/html\", exist_ok=True)\n res = subprocess.check_output(\n [\"make\", \"-C\", \"..\", \"docs/_build/html/console.html\"],\n env=env,\n stderr=subprocess.STDOUT,\n encoding=\"utf-8\",\n )\n print(res)\n\n\n# Prevent API docs for webloop methods: they are the same as for base event loop\n# and it clutters api docs too much\n\n\ndef delete_attrs(cls):\n for name in dir(cls):\n if not name.startswith(\"_\"):\n try:\n delattr(cls, name)\n except:\n pass\n\n\ndelete_attrs(pyodide.webloop.WebLoop)\ndelete_attrs(pyodide.webloop.WebLoopPolicy)\ndelete_attrs(pyodide.console.PyodideConsole)\n", "path": "docs/conf.py"}]} | 2,726 | 258 |
gh_patches_debug_20786 | rasdani/github-patches | git_diff | python-discord__bot-941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Publish messages to the #reddit channel
Since the #reddit channel has moved to the new News category we should convert it to an announcement channel and use the `.publish()` method on messages sent there so users can subscribe to daily reddit updates.
[Message.publish() in the discord.py docs](https://discordpy.readthedocs.io/en/latest/api.html#discord.Message.publish)
</issue>
<code>
[start of bot/cogs/reddit.py]
1 import asyncio
2 import logging
3 import random
4 import textwrap
5 from collections import namedtuple
6 from datetime import datetime, timedelta
7 from typing import List
8
9 from aiohttp import BasicAuth, ClientError
10 from discord import Colour, Embed, TextChannel
11 from discord.ext.commands import Cog, Context, group
12 from discord.ext.tasks import loop
13
14 from bot.bot import Bot
15 from bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks
16 from bot.converters import Subreddit
17 from bot.decorators import with_role
18 from bot.pagination import LinePaginator
19
20 log = logging.getLogger(__name__)
21
22 AccessToken = namedtuple("AccessToken", ["token", "expires_at"])
23
24
25 class Reddit(Cog):
26 """Track subreddit posts and show detailed statistics about them."""
27
28 HEADERS = {"User-Agent": "python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)"}
29 URL = "https://www.reddit.com"
30 OAUTH_URL = "https://oauth.reddit.com"
31 MAX_RETRIES = 3
32
33 def __init__(self, bot: Bot):
34 self.bot = bot
35
36 self.webhook = None
37 self.access_token = None
38 self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret)
39
40 bot.loop.create_task(self.init_reddit_ready())
41 self.auto_poster_loop.start()
42
43 def cog_unload(self) -> None:
44 """Stop the loop task and revoke the access token when the cog is unloaded."""
45 self.auto_poster_loop.cancel()
46 if self.access_token and self.access_token.expires_at > datetime.utcnow():
47 asyncio.create_task(self.revoke_access_token())
48
49 async def init_reddit_ready(self) -> None:
50 """Sets the reddit webhook when the cog is loaded."""
51 await self.bot.wait_until_guild_available()
52 if not self.webhook:
53 self.webhook = await self.bot.fetch_webhook(Webhooks.reddit)
54
55 @property
56 def channel(self) -> TextChannel:
57 """Get the #reddit channel object from the bot's cache."""
58 return self.bot.get_channel(Channels.reddit)
59
60 async def get_access_token(self) -> None:
61 """
62 Get a Reddit API OAuth2 access token and assign it to self.access_token.
63
64 A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog
65 will be unloaded and a ClientError raised if retrieval was still unsuccessful.
66 """
67 for i in range(1, self.MAX_RETRIES + 1):
68 response = await self.bot.http_session.post(
69 url=f"{self.URL}/api/v1/access_token",
70 headers=self.HEADERS,
71 auth=self.client_auth,
72 data={
73 "grant_type": "client_credentials",
74 "duration": "temporary"
75 }
76 )
77
78 if response.status == 200 and response.content_type == "application/json":
79 content = await response.json()
80 expiration = int(content["expires_in"]) - 60 # Subtract 1 minute for leeway.
81 self.access_token = AccessToken(
82 token=content["access_token"],
83 expires_at=datetime.utcnow() + timedelta(seconds=expiration)
84 )
85
86 log.debug(f"New token acquired; expires on UTC {self.access_token.expires_at}")
87 return
88 else:
89 log.debug(
90 f"Failed to get an access token: "
91 f"status {response.status} & content type {response.content_type}; "
92 f"retrying ({i}/{self.MAX_RETRIES})"
93 )
94
95 await asyncio.sleep(3)
96
97 self.bot.remove_cog(self.qualified_name)
98 raise ClientError("Authentication with the Reddit API failed. Unloading the cog.")
99
100 async def revoke_access_token(self) -> None:
101 """
102 Revoke the OAuth2 access token for the Reddit API.
103
104 For security reasons, it's good practice to revoke the token when it's no longer being used.
105 """
106 response = await self.bot.http_session.post(
107 url=f"{self.URL}/api/v1/revoke_token",
108 headers=self.HEADERS,
109 auth=self.client_auth,
110 data={
111 "token": self.access_token.token,
112 "token_type_hint": "access_token"
113 }
114 )
115
116 if response.status == 204 and response.content_type == "application/json":
117 self.access_token = None
118 else:
119 log.warning(f"Unable to revoke access token: status {response.status}.")
120
121 async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:
122 """A helper method to fetch a certain amount of Reddit posts at a given route."""
123 # Reddit's JSON responses only provide 25 posts at most.
124 if not 25 >= amount > 0:
125 raise ValueError("Invalid amount of subreddit posts requested.")
126
127 # Renew the token if necessary.
128 if not self.access_token or self.access_token.expires_at < datetime.utcnow():
129 await self.get_access_token()
130
131 url = f"{self.OAUTH_URL}/{route}"
132 for _ in range(self.MAX_RETRIES):
133 response = await self.bot.http_session.get(
134 url=url,
135 headers={**self.HEADERS, "Authorization": f"bearer {self.access_token.token}"},
136 params=params
137 )
138 if response.status == 200 and response.content_type == 'application/json':
139 # Got appropriate response - process and return.
140 content = await response.json()
141 posts = content["data"]["children"]
142 return posts[:amount]
143
144 await asyncio.sleep(3)
145
146 log.debug(f"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}")
147 return list() # Failed to get appropriate response within allowed number of retries.
148
149 async def get_top_posts(self, subreddit: Subreddit, time: str = "all", amount: int = 5) -> Embed:
150 """
151 Get the top amount of posts for a given subreddit within a specified timeframe.
152
153 A time of "all" will get posts from all time, "day" will get top daily posts and "week" will get the top
154 weekly posts.
155
156 The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most.
157 """
158 embed = Embed(description="")
159
160 posts = await self.fetch_posts(
161 route=f"{subreddit}/top",
162 amount=amount,
163 params={"t": time}
164 )
165
166 if not posts:
167 embed.title = random.choice(ERROR_REPLIES)
168 embed.colour = Colour.red()
169 embed.description = (
170 "Sorry! We couldn't find any posts from that subreddit. "
171 "If this problem persists, please let us know."
172 )
173
174 return embed
175
176 for post in posts:
177 data = post["data"]
178
179 text = data["selftext"]
180 if text:
181 text = textwrap.shorten(text, width=128, placeholder="...")
182 text += "\n" # Add newline to separate embed info
183
184 ups = data["ups"]
185 comments = data["num_comments"]
186 author = data["author"]
187
188 title = textwrap.shorten(data["title"], width=64, placeholder="...")
189 link = self.URL + data["permalink"]
190
191 embed.description += (
192 f"**[{title}]({link})**\n"
193 f"{text}"
194 f"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\n\n"
195 )
196
197 embed.colour = Colour.blurple()
198 return embed
199
200 @loop()
201 async def auto_poster_loop(self) -> None:
202 """Post the top 5 posts daily, and the top 5 posts weekly."""
203 # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter
204 now = datetime.utcnow()
205 tomorrow = now + timedelta(days=1)
206 midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0)
207 seconds_until = (midnight_tomorrow - now).total_seconds()
208
209 await asyncio.sleep(seconds_until)
210
211 await self.bot.wait_until_guild_available()
212 if not self.webhook:
213 await self.bot.fetch_webhook(Webhooks.reddit)
214
215 if datetime.utcnow().weekday() == 0:
216 await self.top_weekly_posts()
217 # if it's a monday send the top weekly posts
218
219 for subreddit in RedditConfig.subreddits:
220 top_posts = await self.get_top_posts(subreddit=subreddit, time="day")
221 await self.webhook.send(username=f"{subreddit} Top Daily Posts", embed=top_posts)
222
223 async def top_weekly_posts(self) -> None:
224 """Post a summary of the top posts."""
225 for subreddit in RedditConfig.subreddits:
226 # Send and pin the new weekly posts.
227 top_posts = await self.get_top_posts(subreddit=subreddit, time="week")
228
229 message = await self.webhook.send(wait=True, username=f"{subreddit} Top Weekly Posts", embed=top_posts)
230
231 if subreddit.lower() == "r/python":
232 if not self.channel:
233 log.warning("Failed to get #reddit channel to remove pins in the weekly loop.")
234 return
235
236 # Remove the oldest pins so that only 12 remain at most.
237 pins = await self.channel.pins()
238
239 while len(pins) >= 12:
240 await pins[-1].unpin()
241 del pins[-1]
242
243 await message.pin()
244
245 @group(name="reddit", invoke_without_command=True)
246 async def reddit_group(self, ctx: Context) -> None:
247 """View the top posts from various subreddits."""
248 await ctx.send_help(ctx.command)
249
250 @reddit_group.command(name="top")
251 async def top_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None:
252 """Send the top posts of all time from a given subreddit."""
253 async with ctx.typing():
254 embed = await self.get_top_posts(subreddit=subreddit, time="all")
255
256 await ctx.send(content=f"Here are the top {subreddit} posts of all time!", embed=embed)
257
258 @reddit_group.command(name="daily")
259 async def daily_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None:
260 """Send the top posts of today from a given subreddit."""
261 async with ctx.typing():
262 embed = await self.get_top_posts(subreddit=subreddit, time="day")
263
264 await ctx.send(content=f"Here are today's top {subreddit} posts!", embed=embed)
265
266 @reddit_group.command(name="weekly")
267 async def weekly_command(self, ctx: Context, subreddit: Subreddit = "r/Python") -> None:
268 """Send the top posts of this week from a given subreddit."""
269 async with ctx.typing():
270 embed = await self.get_top_posts(subreddit=subreddit, time="week")
271
272 await ctx.send(content=f"Here are this week's top {subreddit} posts!", embed=embed)
273
274 @with_role(*STAFF_ROLES)
275 @reddit_group.command(name="subreddits", aliases=("subs",))
276 async def subreddits_command(self, ctx: Context) -> None:
277 """Send a paginated embed of all the subreddits we're relaying."""
278 embed = Embed()
279 embed.title = "Relayed subreddits."
280 embed.colour = Colour.blurple()
281
282 await LinePaginator.paginate(
283 RedditConfig.subreddits,
284 ctx, embed,
285 footer_text="Use the reddit commands along with these to view their posts.",
286 empty=False,
287 max_lines=15
288 )
289
290
291 def setup(bot: Bot) -> None:
292 """Load the Reddit cog."""
293 if not RedditConfig.secret or not RedditConfig.client_id:
294 log.error("Credentials not provided, cog not loaded.")
295 return
296 bot.add_cog(Reddit(bot))
297
[end of bot/cogs/reddit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/reddit.py b/bot/cogs/reddit.py
--- a/bot/cogs/reddit.py
+++ b/bot/cogs/reddit.py
@@ -218,7 +218,10 @@
for subreddit in RedditConfig.subreddits:
top_posts = await self.get_top_posts(subreddit=subreddit, time="day")
- await self.webhook.send(username=f"{subreddit} Top Daily Posts", embed=top_posts)
+ message = await self.webhook.send(username=f"{subreddit} Top Daily Posts", embed=top_posts)
+
+ if message.channel.is_news():
+ await message.publish()
async def top_weekly_posts(self) -> None:
"""Post a summary of the top posts."""
@@ -242,6 +245,9 @@
await message.pin()
+ if message.channel.is_news():
+ await message.publish()
+
@group(name="reddit", invoke_without_command=True)
async def reddit_group(self, ctx: Context) -> None:
"""View the top posts from various subreddits."""
| {"golden_diff": "diff --git a/bot/cogs/reddit.py b/bot/cogs/reddit.py\n--- a/bot/cogs/reddit.py\n+++ b/bot/cogs/reddit.py\n@@ -218,7 +218,10 @@\n \n for subreddit in RedditConfig.subreddits:\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n- await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n+ message = await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n+\n+ if message.channel.is_news():\n+ await message.publish()\n \n async def top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts.\"\"\"\n@@ -242,6 +245,9 @@\n \n await message.pin()\n \n+ if message.channel.is_news():\n+ await message.publish()\n+\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n", "issue": "Publish messages to the #reddit channel\nSince the #reddit channel has moved to the new News category we should convert it to an announcement channel and use the `.publish()` method on messages sent there so users can subscribe to daily reddit updates.\r\n\r\n[Message.publish() in the discord.py docs](https://discordpy.readthedocs.io/en/latest/api.html#discord.Message.publish)\n", "before_files": [{"content": "import asyncio\nimport logging\nimport random\nimport textwrap\nfrom collections import namedtuple\nfrom datetime import datetime, timedelta\nfrom typing import List\n\nfrom aiohttp import BasicAuth, ClientError\nfrom discord import Colour, Embed, TextChannel\nfrom discord.ext.commands import Cog, Context, group\nfrom discord.ext.tasks import loop\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, ERROR_REPLIES, Emojis, Reddit as RedditConfig, STAFF_ROLES, Webhooks\nfrom bot.converters import Subreddit\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nAccessToken = namedtuple(\"AccessToken\", [\"token\", \"expires_at\"])\n\n\nclass Reddit(Cog):\n \"\"\"Track subreddit posts and show detailed statistics about them.\"\"\"\n\n HEADERS = {\"User-Agent\": \"python3:python-discord/bot:1.0.0 (by /u/PythonDiscord)\"}\n URL = \"https://www.reddit.com\"\n OAUTH_URL = \"https://oauth.reddit.com\"\n MAX_RETRIES = 3\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.webhook = None\n self.access_token = None\n self.client_auth = BasicAuth(RedditConfig.client_id, RedditConfig.secret)\n\n bot.loop.create_task(self.init_reddit_ready())\n self.auto_poster_loop.start()\n\n def cog_unload(self) -> None:\n \"\"\"Stop the loop task and revoke the access token when the cog is unloaded.\"\"\"\n self.auto_poster_loop.cancel()\n if self.access_token and self.access_token.expires_at > datetime.utcnow():\n asyncio.create_task(self.revoke_access_token())\n\n async def init_reddit_ready(self) -> None:\n \"\"\"Sets the reddit webhook when the cog is loaded.\"\"\"\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n self.webhook = await self.bot.fetch_webhook(Webhooks.reddit)\n\n @property\n def channel(self) -> TextChannel:\n \"\"\"Get the #reddit channel object from the bot's cache.\"\"\"\n return self.bot.get_channel(Channels.reddit)\n\n async def get_access_token(self) -> None:\n \"\"\"\n Get a Reddit API OAuth2 access token and assign it to self.access_token.\n\n A token is valid for 1 hour. There will be MAX_RETRIES to get a token, after which the cog\n will be unloaded and a ClientError raised if retrieval was still unsuccessful.\n \"\"\"\n for i in range(1, self.MAX_RETRIES + 1):\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/access_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"grant_type\": \"client_credentials\",\n \"duration\": \"temporary\"\n }\n )\n\n if response.status == 200 and response.content_type == \"application/json\":\n content = await response.json()\n expiration = int(content[\"expires_in\"]) - 60 # Subtract 1 minute for leeway.\n self.access_token = AccessToken(\n token=content[\"access_token\"],\n expires_at=datetime.utcnow() + timedelta(seconds=expiration)\n )\n\n log.debug(f\"New token acquired; expires on UTC {self.access_token.expires_at}\")\n return\n else:\n log.debug(\n f\"Failed to get an access token: \"\n f\"status {response.status} & content type {response.content_type}; \"\n f\"retrying ({i}/{self.MAX_RETRIES})\"\n )\n\n await asyncio.sleep(3)\n\n self.bot.remove_cog(self.qualified_name)\n raise ClientError(\"Authentication with the Reddit API failed. Unloading the cog.\")\n\n async def revoke_access_token(self) -> None:\n \"\"\"\n Revoke the OAuth2 access token for the Reddit API.\n\n For security reasons, it's good practice to revoke the token when it's no longer being used.\n \"\"\"\n response = await self.bot.http_session.post(\n url=f\"{self.URL}/api/v1/revoke_token\",\n headers=self.HEADERS,\n auth=self.client_auth,\n data={\n \"token\": self.access_token.token,\n \"token_type_hint\": \"access_token\"\n }\n )\n\n if response.status == 204 and response.content_type == \"application/json\":\n self.access_token = None\n else:\n log.warning(f\"Unable to revoke access token: status {response.status}.\")\n\n async def fetch_posts(self, route: str, *, amount: int = 25, params: dict = None) -> List[dict]:\n \"\"\"A helper method to fetch a certain amount of Reddit posts at a given route.\"\"\"\n # Reddit's JSON responses only provide 25 posts at most.\n if not 25 >= amount > 0:\n raise ValueError(\"Invalid amount of subreddit posts requested.\")\n\n # Renew the token if necessary.\n if not self.access_token or self.access_token.expires_at < datetime.utcnow():\n await self.get_access_token()\n\n url = f\"{self.OAUTH_URL}/{route}\"\n for _ in range(self.MAX_RETRIES):\n response = await self.bot.http_session.get(\n url=url,\n headers={**self.HEADERS, \"Authorization\": f\"bearer {self.access_token.token}\"},\n params=params\n )\n if response.status == 200 and response.content_type == 'application/json':\n # Got appropriate response - process and return.\n content = await response.json()\n posts = content[\"data\"][\"children\"]\n return posts[:amount]\n\n await asyncio.sleep(3)\n\n log.debug(f\"Invalid response from: {url} - status code {response.status}, mimetype {response.content_type}\")\n return list() # Failed to get appropriate response within allowed number of retries.\n\n async def get_top_posts(self, subreddit: Subreddit, time: str = \"all\", amount: int = 5) -> Embed:\n \"\"\"\n Get the top amount of posts for a given subreddit within a specified timeframe.\n\n A time of \"all\" will get posts from all time, \"day\" will get top daily posts and \"week\" will get the top\n weekly posts.\n\n The amount should be between 0 and 25 as Reddit's JSON requests only provide 25 posts at most.\n \"\"\"\n embed = Embed(description=\"\")\n\n posts = await self.fetch_posts(\n route=f\"{subreddit}/top\",\n amount=amount,\n params={\"t\": time}\n )\n\n if not posts:\n embed.title = random.choice(ERROR_REPLIES)\n embed.colour = Colour.red()\n embed.description = (\n \"Sorry! We couldn't find any posts from that subreddit. \"\n \"If this problem persists, please let us know.\"\n )\n\n return embed\n\n for post in posts:\n data = post[\"data\"]\n\n text = data[\"selftext\"]\n if text:\n text = textwrap.shorten(text, width=128, placeholder=\"...\")\n text += \"\\n\" # Add newline to separate embed info\n\n ups = data[\"ups\"]\n comments = data[\"num_comments\"]\n author = data[\"author\"]\n\n title = textwrap.shorten(data[\"title\"], width=64, placeholder=\"...\")\n link = self.URL + data[\"permalink\"]\n\n embed.description += (\n f\"**[{title}]({link})**\\n\"\n f\"{text}\"\n f\"{Emojis.upvotes} {ups} {Emojis.comments} {comments} {Emojis.user} {author}\\n\\n\"\n )\n\n embed.colour = Colour.blurple()\n return embed\n\n @loop()\n async def auto_poster_loop(self) -> None:\n \"\"\"Post the top 5 posts daily, and the top 5 posts weekly.\"\"\"\n # once we upgrade to d.py 1.3 this can be removed and the loop can use the `time=datetime.time.min` parameter\n now = datetime.utcnow()\n tomorrow = now + timedelta(days=1)\n midnight_tomorrow = tomorrow.replace(hour=0, minute=0, second=0)\n seconds_until = (midnight_tomorrow - now).total_seconds()\n\n await asyncio.sleep(seconds_until)\n\n await self.bot.wait_until_guild_available()\n if not self.webhook:\n await self.bot.fetch_webhook(Webhooks.reddit)\n\n if datetime.utcnow().weekday() == 0:\n await self.top_weekly_posts()\n # if it's a monday send the top weekly posts\n\n for subreddit in RedditConfig.subreddits:\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n await self.webhook.send(username=f\"{subreddit} Top Daily Posts\", embed=top_posts)\n\n async def top_weekly_posts(self) -> None:\n \"\"\"Post a summary of the top posts.\"\"\"\n for subreddit in RedditConfig.subreddits:\n # Send and pin the new weekly posts.\n top_posts = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n message = await self.webhook.send(wait=True, username=f\"{subreddit} Top Weekly Posts\", embed=top_posts)\n\n if subreddit.lower() == \"r/python\":\n if not self.channel:\n log.warning(\"Failed to get #reddit channel to remove pins in the weekly loop.\")\n return\n\n # Remove the oldest pins so that only 12 remain at most.\n pins = await self.channel.pins()\n\n while len(pins) >= 12:\n await pins[-1].unpin()\n del pins[-1]\n\n await message.pin()\n\n @group(name=\"reddit\", invoke_without_command=True)\n async def reddit_group(self, ctx: Context) -> None:\n \"\"\"View the top posts from various subreddits.\"\"\"\n await ctx.send_help(ctx.command)\n\n @reddit_group.command(name=\"top\")\n async def top_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of all time from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"all\")\n\n await ctx.send(content=f\"Here are the top {subreddit} posts of all time!\", embed=embed)\n\n @reddit_group.command(name=\"daily\")\n async def daily_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of today from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"day\")\n\n await ctx.send(content=f\"Here are today's top {subreddit} posts!\", embed=embed)\n\n @reddit_group.command(name=\"weekly\")\n async def weekly_command(self, ctx: Context, subreddit: Subreddit = \"r/Python\") -> None:\n \"\"\"Send the top posts of this week from a given subreddit.\"\"\"\n async with ctx.typing():\n embed = await self.get_top_posts(subreddit=subreddit, time=\"week\")\n\n await ctx.send(content=f\"Here are this week's top {subreddit} posts!\", embed=embed)\n\n @with_role(*STAFF_ROLES)\n @reddit_group.command(name=\"subreddits\", aliases=(\"subs\",))\n async def subreddits_command(self, ctx: Context) -> None:\n \"\"\"Send a paginated embed of all the subreddits we're relaying.\"\"\"\n embed = Embed()\n embed.title = \"Relayed subreddits.\"\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n RedditConfig.subreddits,\n ctx, embed,\n footer_text=\"Use the reddit commands along with these to view their posts.\",\n empty=False,\n max_lines=15\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Reddit cog.\"\"\"\n if not RedditConfig.secret or not RedditConfig.client_id:\n log.error(\"Credentials not provided, cog not loaded.\")\n return\n bot.add_cog(Reddit(bot))\n", "path": "bot/cogs/reddit.py"}]} | 3,982 | 240 |
gh_patches_debug_10275 | rasdani/github-patches | git_diff | mindsdb__lightwood-524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`sktime` missing dependency
In requirements.txt
`sktime[arima] >= 0.5.0`
bat
`(mindsdb) C:\Users\User>pip install sktime[arima]>=0.5.0
WARNING: sktime 0.7.0 does not provide the extra 'arima'`
install
`(mindsdb) C:\Users\User>pip install sktime[arima]==0.5.0`
`sktime` missing dependency
In requirements.txt
`sktime[arima] >= 0.5.0`
bat
`(mindsdb) C:\Users\User>pip install sktime[arima]>=0.5.0
WARNING: sktime 0.7.0 does not provide the extra 'arima'`
install
`(mindsdb) C:\Users\User>pip install sktime[arima]==0.5.0`
</issue>
<code>
[start of lightwood/model/sktime.py]
1 import numpy as np
2 import pandas as pd
3 from typing import Dict, List, Union
4 from sktime.forecasting.arima import AutoARIMA
5
6 from lightwood.api import dtype
7 from lightwood.helpers.log import log
8 from lightwood.model.base import BaseModel
9 from lightwood.encoder.time_series.helpers.common import get_group_matches
10 from lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs
11
12
13 class SkTime(BaseModel):
14 forecaster: str
15 n_ts_predictions: int
16 target: str
17 supports_proba: bool
18
19 def __init__(
20 self, stop_after: int, target: str, dtype_dict: Dict[str, str],
21 n_ts_predictions: int, ts_analysis: Dict):
22 super().__init__(stop_after)
23 self.target = target
24 dtype_dict[target] = dtype.float
25 self.model_class = AutoARIMA
26 self.models = {}
27 self.n_ts_predictions = n_ts_predictions
28 self.ts_analysis = ts_analysis
29 self.forecasting_horizon = np.arange(1, self.n_ts_predictions)
30 self.cutoff_index = {} # marks index at which training data stops and forecasting window starts
31 self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by
32 self.supports_proba = False
33 self.stable = True
34
35 def fit(self, ds_arr: List[EncodedDs]) -> None:
36 log.info('Started fitting sktime forecaster for array prediction')
37
38 all_folds = ConcatedEncodedDs(ds_arr)
39 df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis["tss"].order_by[0]}')
40 data = {'data': df[self.target],
41 'group_info': {gcol: df[gcol].tolist()
42 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
43
44 for group in self.ts_analysis['group_combinations']:
45 self.models[group] = self.model_class()
46
47 if self.grouped_by == ['__default']:
48 series_idxs = data['data'].index
49 series_data = data['data'].values
50 else:
51 series_idxs, series_data = get_group_matches(data, group)
52
53 if series_data.size > 0:
54 series = pd.Series(series_data.squeeze(), index=series_idxs)
55 series = series.sort_index(ascending=True)
56 series = series.reset_index(drop=True)
57 try:
58 self.models[group].fit(series)
59 except ValueError:
60 self.models[group] = self.model_class(deseasonalize=False)
61 self.models[group].fit(series)
62
63 self.cutoff_index[group] = len(series)
64
65 if self.grouped_by == ['__default']:
66 break
67
68 def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame:
69 if predict_proba:
70 log.warning('This model does not output probability estimates')
71
72 length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)
73 ydf = pd.DataFrame(0, # zero-filled
74 index=np.arange(length),
75 columns=['prediction'],
76 dtype=object)
77
78 data = {'data': ds.data_frame[self.target].reset_index(drop=True),
79 'group_info': {gcol: ds.data_frame[gcol].tolist()
80 for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
81
82 # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder
83
84 for group in self.ts_analysis['group_combinations']:
85
86 if self.grouped_by == ['__default']:
87 series_idxs = data['data'].index
88 series_data = data['data'].values
89 else:
90 series_idxs, series_data = get_group_matches(data, group)
91
92 if series_data.size > 0:
93 series = pd.Series(series_data.squeeze(), index=series_idxs)
94 series = series.sort_index(ascending=True)
95 series = series.reset_index(drop=True)
96
97 for idx, _ in enumerate(series.iteritems()):
98 ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict(
99 np.arange(idx, # +cutoff
100 idx + self.n_ts_predictions)).tolist() # +cutoff
101
102 if self.grouped_by == ['__default']:
103 break
104
105 return ydf[['prediction']]
106
[end of lightwood/model/sktime.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightwood/model/sktime.py b/lightwood/model/sktime.py
--- a/lightwood/model/sktime.py
+++ b/lightwood/model/sktime.py
@@ -42,7 +42,8 @@
for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}
for group in self.ts_analysis['group_combinations']:
- self.models[group] = self.model_class()
+ # many warnings might be thrown inside of statsmodels during stepwise procedure
+ self.models[group] = self.model_class(suppress_warnings=True)
if self.grouped_by == ['__default']:
series_idxs = data['data'].index
| {"golden_diff": "diff --git a/lightwood/model/sktime.py b/lightwood/model/sktime.py\n--- a/lightwood/model/sktime.py\n+++ b/lightwood/model/sktime.py\n@@ -42,7 +42,8 @@\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n \n for group in self.ts_analysis['group_combinations']:\n- self.models[group] = self.model_class()\n+ # many warnings might be thrown inside of statsmodels during stepwise procedure\n+ self.models[group] = self.model_class(suppress_warnings=True)\n \n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n", "issue": "`sktime` missing dependency\nIn requirements.txt\r\n`sktime[arima] >= 0.5.0`\r\nbat \r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]>=0.5.0\r\nWARNING: sktime 0.7.0 does not provide the extra 'arima'`\r\ninstall\r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]==0.5.0`\n`sktime` missing dependency\nIn requirements.txt\r\n`sktime[arima] >= 0.5.0`\r\nbat \r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]>=0.5.0\r\nWARNING: sktime 0.7.0 does not provide the extra 'arima'`\r\ninstall\r\n`(mindsdb) C:\\Users\\User>pip install sktime[arima]==0.5.0`\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom typing import Dict, List, Union\nfrom sktime.forecasting.arima import AutoARIMA\n\nfrom lightwood.api import dtype\nfrom lightwood.helpers.log import log\nfrom lightwood.model.base import BaseModel\nfrom lightwood.encoder.time_series.helpers.common import get_group_matches\nfrom lightwood.data.encoded_ds import EncodedDs, ConcatedEncodedDs\n\n\nclass SkTime(BaseModel):\n forecaster: str\n n_ts_predictions: int\n target: str\n supports_proba: bool\n\n def __init__(\n self, stop_after: int, target: str, dtype_dict: Dict[str, str],\n n_ts_predictions: int, ts_analysis: Dict):\n super().__init__(stop_after)\n self.target = target\n dtype_dict[target] = dtype.float\n self.model_class = AutoARIMA\n self.models = {}\n self.n_ts_predictions = n_ts_predictions\n self.ts_analysis = ts_analysis\n self.forecasting_horizon = np.arange(1, self.n_ts_predictions)\n self.cutoff_index = {} # marks index at which training data stops and forecasting window starts\n self.grouped_by = ['__default'] if not ts_analysis['tss'].group_by else ts_analysis['tss'].group_by\n self.supports_proba = False\n self.stable = True\n\n def fit(self, ds_arr: List[EncodedDs]) -> None:\n log.info('Started fitting sktime forecaster for array prediction')\n\n all_folds = ConcatedEncodedDs(ds_arr)\n df = all_folds.data_frame.sort_values(by=f'__mdb_original_{self.ts_analysis[\"tss\"].order_by[0]}')\n data = {'data': df[self.target],\n 'group_info': {gcol: df[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n for group in self.ts_analysis['group_combinations']:\n self.models[group] = self.model_class()\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n try:\n self.models[group].fit(series)\n except ValueError:\n self.models[group] = self.model_class(deseasonalize=False)\n self.models[group].fit(series)\n\n self.cutoff_index[group] = len(series)\n\n if self.grouped_by == ['__default']:\n break\n\n def __call__(self, ds: Union[EncodedDs, ConcatedEncodedDs], predict_proba: bool = False) -> pd.DataFrame:\n if predict_proba:\n log.warning('This model does not output probability estimates')\n\n length = sum(ds.encoded_ds_lenghts) if isinstance(ds, ConcatedEncodedDs) else len(ds)\n ydf = pd.DataFrame(0, # zero-filled\n index=np.arange(length),\n columns=['prediction'],\n dtype=object)\n\n data = {'data': ds.data_frame[self.target].reset_index(drop=True),\n 'group_info': {gcol: ds.data_frame[gcol].tolist()\n for gcol in self.grouped_by} if self.ts_analysis['tss'].group_by else {}}\n\n # all_idxs = list(range(length)) # @TODO: substract, and assign empty predictions to remainder\n\n for group in self.ts_analysis['group_combinations']:\n\n if self.grouped_by == ['__default']:\n series_idxs = data['data'].index\n series_data = data['data'].values\n else:\n series_idxs, series_data = get_group_matches(data, group)\n\n if series_data.size > 0:\n series = pd.Series(series_data.squeeze(), index=series_idxs)\n series = series.sort_index(ascending=True)\n series = series.reset_index(drop=True)\n\n for idx, _ in enumerate(series.iteritems()):\n ydf['prediction'].iloc[series_idxs[idx]] = self.models[group].predict(\n np.arange(idx, # +cutoff\n idx + self.n_ts_predictions)).tolist() # +cutoff\n\n if self.grouped_by == ['__default']:\n break\n\n return ydf[['prediction']]\n", "path": "lightwood/model/sktime.py"}]} | 1,913 | 154 |
gh_patches_debug_26823 | rasdani/github-patches | git_diff | ydataai__ydata-profiling-67 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add badges for "highly skewed", "zeros"
I enjoy the badges on the report and am happy to submit a PR for this if it isn't being worked on elsewhere.
</issue>
<code>
[start of pandas_profiling/templates.py]
1 # coding=UTF-8
2
3 '''This file contains all templates used for generating the HTML profile report'''
4
5 from jinja2 import Environment, PackageLoader
6
7 # Initializing Jinja
8 pl = PackageLoader('pandas_profiling', 'templates')
9 jinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)
10
11 # Mapping between template name and file
12 templates = {'freq_table_row': 'freq_table_row.html',
13 'mini_freq_table_row': 'mini_freq_table_row.html',
14 'freq_table': 'freq_table.html',
15 'mini_freq_table': 'mini_freq_table.html',
16 'row_num': 'row_num.html',
17 'row_date': 'row_date.html',
18 'row_cat': 'row_cat.html',
19 'row_bool': 'row_bool.html',
20 'row_corr': 'row_corr.html',
21 'row_recoded': 'row_recoded.html',
22 'row_const': 'row_const.html',
23 'row_unique': 'row_unique.html',
24 'overview': 'overview.html',
25 'sample': 'sample.html',
26 'base': 'base.html',
27 'wrapper': 'wrapper.html'
28 }
29
30 # Mapping between row type and var type
31 var_type = {'NUM': 'Numeric',
32 'DATE': 'Date',
33 'CAT': 'Categorical',
34 'UNIQUE': 'Categorical, Unique',
35 'BOOL': 'Boolean',
36 'CONST': 'Constant',
37 'CORR': 'Highly correlated',
38 'RECODED': 'Recoded'
39 }
40
41
42 def template(template_name):
43 """Return a jinja template ready for rendering. If needed, global variables are initialized.
44
45 Parameters
46 ----------
47 template_name: str, the name of the template as defined in the templates mapping
48
49 Returns
50 -------
51 The Jinja template ready for rendering
52 """
53 globals = None
54 if template_name.startswith('row_'):
55 # This is a row template setting global variable
56 globals = dict()
57 globals['vartype'] = var_type[template_name.split('_')[1].upper()]
58 return jinja2_env.get_template(templates[template_name], globals=globals)
59
60
61 # mapping between row type and template name
62 row_templates_dict = {'NUM': template('row_num'),
63 'DATE': template('row_date'),
64 'DISCRETE': template('row_num'),
65 'CAT': template('row_cat'),
66 'BOOL': template('row_bool'),
67 'UNIQUE': template('row_unique'),
68 'CONST': template('row_const'),
69 'CORR': template('row_corr'),
70 'RECODED': template('row_recoded')
71 }
72
73 # The number of column to use in the display of the frequency table according to the category
74 mini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}
75
76 messages = dict()
77 messages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class="label label-primary">Rejected</span>'
78 messages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (ρ = {0[correlation]}) <span class="label label-primary">Rejected</span>'
79 messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class="label label-primary">Rejected</span>'
80 messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>'
81 messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>'
82 messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]})'
83 messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>'
84 messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>'
85 messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'
86
87 message_row = u'<li>{message}</li>'
88
[end of pandas_profiling/templates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandas_profiling/templates.py b/pandas_profiling/templates.py
--- a/pandas_profiling/templates.py
+++ b/pandas_profiling/templates.py
@@ -79,9 +79,9 @@
messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class="label label-primary">Rejected</span>'
messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>'
messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>'
-messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]})'
+messages['skewness'] = u'{varname} is highly skewed (γ1 = {0[skewness]}) <span class="label label-info">Skewed</span>'
messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>'
messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>'
-messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'
+messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class="label label-info">Zeros</span>'
message_row = u'<li>{message}</li>'
| {"golden_diff": "diff --git a/pandas_profiling/templates.py b/pandas_profiling/templates.py\n--- a/pandas_profiling/templates.py\n+++ b/pandas_profiling/templates.py\n@@ -79,9 +79,9 @@\n messages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\n messages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\n messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\n-messages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]})'\n+messages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]}) <span class=\"label label-info\">Skewed</span>'\n messages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\n messages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\n-messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'\n+messages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros <span class=\"label label-info\">Zeros</span>'\n \n message_row = u'<li>{message}</li>'\n", "issue": "Add badges for \"highly skewed\", \"zeros\"\nI enjoy the badges on the report and am happy to submit a PR for this if it isn't being worked on elsewhere.\n", "before_files": [{"content": "# coding=UTF-8\n\n'''This file contains all templates used for generating the HTML profile report'''\n\nfrom jinja2 import Environment, PackageLoader\n\n# Initializing Jinja\npl = PackageLoader('pandas_profiling', 'templates')\njinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)\n\n# Mapping between template name and file\ntemplates = {'freq_table_row': 'freq_table_row.html',\n 'mini_freq_table_row': 'mini_freq_table_row.html',\n 'freq_table': 'freq_table.html',\n 'mini_freq_table': 'mini_freq_table.html',\n 'row_num': 'row_num.html',\n 'row_date': 'row_date.html',\n 'row_cat': 'row_cat.html',\n 'row_bool': 'row_bool.html',\n 'row_corr': 'row_corr.html',\n 'row_recoded': 'row_recoded.html',\n 'row_const': 'row_const.html',\n 'row_unique': 'row_unique.html',\n 'overview': 'overview.html',\n 'sample': 'sample.html',\n 'base': 'base.html',\n 'wrapper': 'wrapper.html'\n }\n\n# Mapping between row type and var type\nvar_type = {'NUM': 'Numeric',\n 'DATE': 'Date',\n 'CAT': 'Categorical',\n 'UNIQUE': 'Categorical, Unique',\n 'BOOL': 'Boolean',\n 'CONST': 'Constant',\n 'CORR': 'Highly correlated',\n 'RECODED': 'Recoded'\n }\n\n\ndef template(template_name):\n \"\"\"Return a jinja template ready for rendering. If needed, global variables are initialized.\n\n Parameters\n ----------\n template_name: str, the name of the template as defined in the templates mapping\n\n Returns\n -------\n The Jinja template ready for rendering\n \"\"\"\n globals = None\n if template_name.startswith('row_'):\n # This is a row template setting global variable\n globals = dict()\n globals['vartype'] = var_type[template_name.split('_')[1].upper()]\n return jinja2_env.get_template(templates[template_name], globals=globals)\n\n\n# mapping between row type and template name\nrow_templates_dict = {'NUM': template('row_num'),\n 'DATE': template('row_date'),\n 'DISCRETE': template('row_num'),\n 'CAT': template('row_cat'),\n 'BOOL': template('row_bool'),\n 'UNIQUE': template('row_unique'),\n 'CONST': template('row_const'),\n 'CORR': template('row_corr'),\n 'RECODED': template('row_recoded')\n }\n\n# The number of column to use in the display of the frequency table according to the category\nmini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}\n\nmessages = dict()\nmessages['CONST'] = u'{0[varname]} has constant value {0[mode]} <span class=\"label label-primary\">Rejected</span>'\nmessages['CORR'] = u'{0[varname]} is highly correlated with {0[correlation_var]} (\u03c1 = {0[correlation]}) <span class=\"label label-primary\">Rejected</span>'\nmessages['RECODED'] = u'{0[varname]} is a recoding of {0[correlation_var]} <span class=\"label label-primary\">Rejected</span>'\nmessages['HIGH_CARDINALITY'] = u'{varname} has a high cardinality: {0[distinct_count]} distinct values <span class=\"label label-warning\">Warning</span>'\nmessages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class=\"label label-warning\">Warning</span>'\nmessages['skewness'] = u'{varname} is highly skewed (\u03b31 = {0[skewness]})'\nmessages['p_missing'] = u'{varname} has {0[n_missing]} / {0[p_missing]} missing values <span class=\"label label-default\">Missing</span>'\nmessages['p_infinite'] = u'{varname} has {0[n_infinite]} / {0[p_infinite]} infinite values <span class=\"label label-default\">Infinite</span>'\nmessages['p_zeros'] = u'{varname} has {0[n_zeros]} / {0[p_zeros]} zeros'\n\nmessage_row = u'<li>{message}</li>'\n", "path": "pandas_profiling/templates.py"}]} | 1,670 | 377 |
gh_patches_debug_29517 | rasdani/github-patches | git_diff | pypa__setuptools-2878 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate setuptools.installer but not setup_requires
> You're going to break entire workflows and distros with this, please reconsider.
I'm tempted to un-deprecate `setup_requires` as a parameter, but to deprecate/remove only the `fetch_build_eggs` part. In other words, continue to allow `setup_requires` to supply requirements for PEP 517 hooks (`get_requires_for_build_sdist` and `get_requires_for_build_wheel`).
_Originally posted by @jaraco in https://github.com/pypa/setuptools/issues/2824#issuecomment-967430754_
</issue>
<code>
[start of setuptools/installer.py]
1 import glob
2 import os
3 import subprocess
4 import sys
5 import tempfile
6 from distutils import log
7 from distutils.errors import DistutilsError
8
9 import pkg_resources
10 from setuptools.wheel import Wheel
11
12
13 def _fixup_find_links(find_links):
14 """Ensure find-links option end-up being a list of strings."""
15 if isinstance(find_links, str):
16 return find_links.split()
17 assert isinstance(find_links, (tuple, list))
18 return find_links
19
20
21 def fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME
22 """Fetch an egg needed for building.
23
24 Use pip/wheel to fetch/build a wheel."""
25 # Warn if wheel is not available
26 try:
27 pkg_resources.get_distribution('wheel')
28 except pkg_resources.DistributionNotFound:
29 dist.announce('WARNING: The wheel package is not available.', log.WARN)
30 # Ignore environment markers; if supplied, it is required.
31 req = strip_marker(req)
32 # Take easy_install options into account, but do not override relevant
33 # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
34 # take precedence.
35 opts = dist.get_option_dict('easy_install')
36 if 'allow_hosts' in opts:
37 raise DistutilsError('the `allow-hosts` option is not supported '
38 'when using pip to install requirements.')
39 quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ
40 if 'PIP_INDEX_URL' in os.environ:
41 index_url = None
42 elif 'index_url' in opts:
43 index_url = opts['index_url'][1]
44 else:
45 index_url = None
46 find_links = (
47 _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts
48 else []
49 )
50 if dist.dependency_links:
51 find_links.extend(dist.dependency_links)
52 eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
53 environment = pkg_resources.Environment()
54 for egg_dist in pkg_resources.find_distributions(eggs_dir):
55 if egg_dist in req and environment.can_add(egg_dist):
56 return egg_dist
57 with tempfile.TemporaryDirectory() as tmpdir:
58 cmd = [
59 sys.executable, '-m', 'pip',
60 '--disable-pip-version-check',
61 'wheel', '--no-deps',
62 '-w', tmpdir,
63 ]
64 if quiet:
65 cmd.append('--quiet')
66 if index_url is not None:
67 cmd.extend(('--index-url', index_url))
68 for link in find_links or []:
69 cmd.extend(('--find-links', link))
70 # If requirement is a PEP 508 direct URL, directly pass
71 # the URL to pip, as `req @ url` does not work on the
72 # command line.
73 cmd.append(req.url or str(req))
74 try:
75 subprocess.check_call(cmd)
76 except subprocess.CalledProcessError as e:
77 raise DistutilsError(str(e)) from e
78 wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
79 dist_location = os.path.join(eggs_dir, wheel.egg_name())
80 wheel.install_as_egg(dist_location)
81 dist_metadata = pkg_resources.PathMetadata(
82 dist_location, os.path.join(dist_location, 'EGG-INFO'))
83 dist = pkg_resources.Distribution.from_filename(
84 dist_location, metadata=dist_metadata)
85 return dist
86
87
88 def strip_marker(req):
89 """
90 Return a new requirement without the environment marker to avoid
91 calling pip with something like `babel; extra == "i18n"`, which
92 would always be ignored.
93 """
94 # create a copy to avoid mutating the input
95 req = pkg_resources.Requirement.parse(str(req))
96 req.marker = None
97 return req
98
[end of setuptools/installer.py]
[start of setuptools/__init__.py]
1 """Extensions to the 'distutils' for large or complex distributions"""
2
3 from fnmatch import fnmatchcase
4 import functools
5 import os
6 import re
7 import warnings
8
9 import _distutils_hack.override # noqa: F401
10
11 import distutils.core
12 from distutils.errors import DistutilsOptionError
13 from distutils.util import convert_path
14
15 from ._deprecation_warning import SetuptoolsDeprecationWarning
16
17 import setuptools.version
18 from setuptools.extension import Extension
19 from setuptools.dist import Distribution
20 from setuptools.depends import Require
21 from . import monkey
22
23
24 __all__ = [
25 'setup',
26 'Distribution',
27 'Command',
28 'Extension',
29 'Require',
30 'SetuptoolsDeprecationWarning',
31 'find_packages',
32 'find_namespace_packages',
33 ]
34
35 __version__ = setuptools.version.__version__
36
37 bootstrap_install_from = None
38
39
40 class PackageFinder:
41 """
42 Generate a list of all Python packages found within a directory
43 """
44
45 @classmethod
46 def find(cls, where='.', exclude=(), include=('*',)):
47 """Return a list all Python packages found within directory 'where'
48
49 'where' is the root directory which will be searched for packages. It
50 should be supplied as a "cross-platform" (i.e. URL-style) path; it will
51 be converted to the appropriate local path syntax.
52
53 'exclude' is a sequence of package names to exclude; '*' can be used
54 as a wildcard in the names, such that 'foo.*' will exclude all
55 subpackages of 'foo' (but not 'foo' itself).
56
57 'include' is a sequence of package names to include. If it's
58 specified, only the named packages will be included. If it's not
59 specified, all found packages will be included. 'include' can contain
60 shell style wildcard patterns just like 'exclude'.
61 """
62
63 return list(
64 cls._find_packages_iter(
65 convert_path(where),
66 cls._build_filter('ez_setup', '*__pycache__', *exclude),
67 cls._build_filter(*include),
68 )
69 )
70
71 @classmethod
72 def _find_packages_iter(cls, where, exclude, include):
73 """
74 All the packages found in 'where' that pass the 'include' filter, but
75 not the 'exclude' filter.
76 """
77 for root, dirs, files in os.walk(where, followlinks=True):
78 # Copy dirs to iterate over it, then empty dirs.
79 all_dirs = dirs[:]
80 dirs[:] = []
81
82 for dir in all_dirs:
83 full_path = os.path.join(root, dir)
84 rel_path = os.path.relpath(full_path, where)
85 package = rel_path.replace(os.path.sep, '.')
86
87 # Skip directory trees that are not valid packages
88 if '.' in dir or not cls._looks_like_package(full_path):
89 continue
90
91 # Should this package be included?
92 if include(package) and not exclude(package):
93 yield package
94
95 # Keep searching subdirectories, as there may be more packages
96 # down there, even if the parent was excluded.
97 dirs.append(dir)
98
99 @staticmethod
100 def _looks_like_package(path):
101 """Does a directory look like a package?"""
102 return os.path.isfile(os.path.join(path, '__init__.py'))
103
104 @staticmethod
105 def _build_filter(*patterns):
106 """
107 Given a list of patterns, return a callable that will be true only if
108 the input matches at least one of the patterns.
109 """
110 return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
111
112
113 class PEP420PackageFinder(PackageFinder):
114 @staticmethod
115 def _looks_like_package(path):
116 return True
117
118
119 find_packages = PackageFinder.find
120 find_namespace_packages = PEP420PackageFinder.find
121
122
123 def _install_setup_requires(attrs):
124 # Note: do not use `setuptools.Distribution` directly, as
125 # our PEP 517 backend patch `distutils.core.Distribution`.
126 class MinimalDistribution(distutils.core.Distribution):
127 """
128 A minimal version of a distribution for supporting the
129 fetch_build_eggs interface.
130 """
131
132 def __init__(self, attrs):
133 _incl = 'dependency_links', 'setup_requires'
134 filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}
135 distutils.core.Distribution.__init__(self, filtered)
136
137 def finalize_options(self):
138 """
139 Disable finalize_options to avoid building the working set.
140 Ref #2158.
141 """
142
143 dist = MinimalDistribution(attrs)
144
145 # Honor setup.cfg's options.
146 dist.parse_config_files(ignore_option_errors=True)
147 if dist.setup_requires:
148 warnings.warn(
149 "setup_requires is deprecated. Supply build "
150 "dependencies using PEP 517 pyproject.toml build-requires.",
151 SetuptoolsDeprecationWarning,
152 )
153 dist.fetch_build_eggs(dist.setup_requires)
154
155
156 def setup(**attrs):
157 # Make sure we have any requirements needed to interpret 'attrs'.
158 _install_setup_requires(attrs)
159 return distutils.core.setup(**attrs)
160
161
162 setup.__doc__ = distutils.core.setup.__doc__
163
164
165 _Command = monkey.get_unpatched(distutils.core.Command)
166
167
168 class Command(_Command):
169 __doc__ = _Command.__doc__
170
171 command_consumes_arguments = False
172
173 def __init__(self, dist, **kw):
174 """
175 Construct the command for dist, updating
176 vars(self) with any keyword parameters.
177 """
178 _Command.__init__(self, dist)
179 vars(self).update(kw)
180
181 def _ensure_stringlike(self, option, what, default=None):
182 val = getattr(self, option)
183 if val is None:
184 setattr(self, option, default)
185 return default
186 elif not isinstance(val, str):
187 raise DistutilsOptionError(
188 "'%s' must be a %s (got `%s`)" % (option, what, val)
189 )
190 return val
191
192 def ensure_string_list(self, option):
193 r"""Ensure that 'option' is a list of strings. If 'option' is
194 currently a string, we split it either on /,\s*/ or /\s+/, so
195 "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
196 ["foo", "bar", "baz"].
197 """
198 val = getattr(self, option)
199 if val is None:
200 return
201 elif isinstance(val, str):
202 setattr(self, option, re.split(r',\s*|\s+', val))
203 else:
204 if isinstance(val, list):
205 ok = all(isinstance(v, str) for v in val)
206 else:
207 ok = False
208 if not ok:
209 raise DistutilsOptionError(
210 "'%s' must be a list of strings (got %r)" % (option, val)
211 )
212
213 def reinitialize_command(self, command, reinit_subcommands=0, **kw):
214 cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
215 vars(cmd).update(kw)
216 return cmd
217
218
219 def _find_all_simple(path):
220 """
221 Find all files under 'path'
222 """
223 results = (
224 os.path.join(base, file)
225 for base, dirs, files in os.walk(path, followlinks=True)
226 for file in files
227 )
228 return filter(os.path.isfile, results)
229
230
231 def findall(dir=os.curdir):
232 """
233 Find all files under 'dir' and return the list of full filenames.
234 Unless dir is '.', return full filenames with dir prepended.
235 """
236 files = _find_all_simple(dir)
237 if dir == os.curdir:
238 make_rel = functools.partial(os.path.relpath, start=dir)
239 files = map(make_rel, files)
240 return list(files)
241
242
243 class sic(str):
244 """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
245
246
247 # Apply monkey patches
248 monkey.patch_all()
249
[end of setuptools/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/__init__.py b/setuptools/__init__.py
--- a/setuptools/__init__.py
+++ b/setuptools/__init__.py
@@ -4,7 +4,6 @@
import functools
import os
import re
-import warnings
import _distutils_hack.override # noqa: F401
@@ -145,11 +144,6 @@
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
- warnings.warn(
- "setup_requires is deprecated. Supply build "
- "dependencies using PEP 517 pyproject.toml build-requires.",
- SetuptoolsDeprecationWarning,
- )
dist.fetch_build_eggs(dist.setup_requires)
diff --git a/setuptools/installer.py b/setuptools/installer.py
--- a/setuptools/installer.py
+++ b/setuptools/installer.py
@@ -3,11 +3,13 @@
import subprocess
import sys
import tempfile
+import warnings
from distutils import log
from distutils.errors import DistutilsError
import pkg_resources
from setuptools.wheel import Wheel
+from ._deprecation_warning import SetuptoolsDeprecationWarning
def _fixup_find_links(find_links):
@@ -22,6 +24,11 @@
"""Fetch an egg needed for building.
Use pip/wheel to fetch/build a wheel."""
+ warnings.warn(
+ "setuptools.installer is deprecated. Requirements should "
+ "be satisfied by a PEP 517 installer.",
+ SetuptoolsDeprecationWarning,
+ )
# Warn if wheel is not available
try:
pkg_resources.get_distribution('wheel')
| {"golden_diff": "diff --git a/setuptools/__init__.py b/setuptools/__init__.py\n--- a/setuptools/__init__.py\n+++ b/setuptools/__init__.py\n@@ -4,7 +4,6 @@\n import functools\n import os\n import re\n-import warnings\n \n import _distutils_hack.override # noqa: F401\n \n@@ -145,11 +144,6 @@\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n- warnings.warn(\n- \"setup_requires is deprecated. Supply build \"\n- \"dependencies using PEP 517 pyproject.toml build-requires.\",\n- SetuptoolsDeprecationWarning,\n- )\n dist.fetch_build_eggs(dist.setup_requires)\n \n \ndiff --git a/setuptools/installer.py b/setuptools/installer.py\n--- a/setuptools/installer.py\n+++ b/setuptools/installer.py\n@@ -3,11 +3,13 @@\n import subprocess\n import sys\n import tempfile\n+import warnings\n from distutils import log\n from distutils.errors import DistutilsError\n \n import pkg_resources\n from setuptools.wheel import Wheel\n+from ._deprecation_warning import SetuptoolsDeprecationWarning\n \n \n def _fixup_find_links(find_links):\n@@ -22,6 +24,11 @@\n \"\"\"Fetch an egg needed for building.\n \n Use pip/wheel to fetch/build a wheel.\"\"\"\n+ warnings.warn(\n+ \"setuptools.installer is deprecated. Requirements should \"\n+ \"be satisfied by a PEP 517 installer.\",\n+ SetuptoolsDeprecationWarning,\n+ )\n # Warn if wheel is not available\n try:\n pkg_resources.get_distribution('wheel')\n", "issue": "Deprecate setuptools.installer but not setup_requires\n> You're going to break entire workflows and distros with this, please reconsider.\r\n\r\nI'm tempted to un-deprecate `setup_requires` as a parameter, but to deprecate/remove only the `fetch_build_eggs` part. In other words, continue to allow `setup_requires` to supply requirements for PEP 517 hooks (`get_requires_for_build_sdist` and `get_requires_for_build_wheel`).\r\n\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/setuptools/issues/2824#issuecomment-967430754_\n", "before_files": [{"content": "import glob\nimport os\nimport subprocess\nimport sys\nimport tempfile\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nimport pkg_resources\nfrom setuptools.wheel import Wheel\n\n\ndef _fixup_find_links(find_links):\n \"\"\"Ensure find-links option end-up being a list of strings.\"\"\"\n if isinstance(find_links, str):\n return find_links.split()\n assert isinstance(find_links, (tuple, list))\n return find_links\n\n\ndef fetch_build_egg(dist, req): # noqa: C901 # is too complex (16) # FIXME\n \"\"\"Fetch an egg needed for building.\n\n Use pip/wheel to fetch/build a wheel.\"\"\"\n # Warn if wheel is not available\n try:\n pkg_resources.get_distribution('wheel')\n except pkg_resources.DistributionNotFound:\n dist.announce('WARNING: The wheel package is not available.', log.WARN)\n # Ignore environment markers; if supplied, it is required.\n req = strip_marker(req)\n # Take easy_install options into account, but do not override relevant\n # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll\n # take precedence.\n opts = dist.get_option_dict('easy_install')\n if 'allow_hosts' in opts:\n raise DistutilsError('the `allow-hosts` option is not supported '\n 'when using pip to install requirements.')\n quiet = 'PIP_QUIET' not in os.environ and 'PIP_VERBOSE' not in os.environ\n if 'PIP_INDEX_URL' in os.environ:\n index_url = None\n elif 'index_url' in opts:\n index_url = opts['index_url'][1]\n else:\n index_url = None\n find_links = (\n _fixup_find_links(opts['find_links'][1])[:] if 'find_links' in opts\n else []\n )\n if dist.dependency_links:\n find_links.extend(dist.dependency_links)\n eggs_dir = os.path.realpath(dist.get_egg_cache_dir())\n environment = pkg_resources.Environment()\n for egg_dist in pkg_resources.find_distributions(eggs_dir):\n if egg_dist in req and environment.can_add(egg_dist):\n return egg_dist\n with tempfile.TemporaryDirectory() as tmpdir:\n cmd = [\n sys.executable, '-m', 'pip',\n '--disable-pip-version-check',\n 'wheel', '--no-deps',\n '-w', tmpdir,\n ]\n if quiet:\n cmd.append('--quiet')\n if index_url is not None:\n cmd.extend(('--index-url', index_url))\n for link in find_links or []:\n cmd.extend(('--find-links', link))\n # If requirement is a PEP 508 direct URL, directly pass\n # the URL to pip, as `req @ url` does not work on the\n # command line.\n cmd.append(req.url or str(req))\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n raise DistutilsError(str(e)) from e\n wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])\n dist_location = os.path.join(eggs_dir, wheel.egg_name())\n wheel.install_as_egg(dist_location)\n dist_metadata = pkg_resources.PathMetadata(\n dist_location, os.path.join(dist_location, 'EGG-INFO'))\n dist = pkg_resources.Distribution.from_filename(\n dist_location, metadata=dist_metadata)\n return dist\n\n\ndef strip_marker(req):\n \"\"\"\n Return a new requirement without the environment marker to avoid\n calling pip with something like `babel; extra == \"i18n\"`, which\n would always be ignored.\n \"\"\"\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req\n", "path": "setuptools/installer.py"}, {"content": "\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nfrom fnmatch import fnmatchcase\nimport functools\nimport os\nimport re\nimport warnings\n\nimport _distutils_hack.override # noqa: F401\n\nimport distutils.core\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path\n\nfrom ._deprecation_warning import SetuptoolsDeprecationWarning\n\nimport setuptools.version\nfrom setuptools.extension import Extension\nfrom setuptools.dist import Distribution\nfrom setuptools.depends import Require\nfrom . import monkey\n\n\n__all__ = [\n 'setup',\n 'Distribution',\n 'Command',\n 'Extension',\n 'Require',\n 'SetuptoolsDeprecationWarning',\n 'find_packages',\n 'find_namespace_packages',\n]\n\n__version__ = setuptools.version.__version__\n\nbootstrap_install_from = None\n\n\nclass PackageFinder:\n \"\"\"\n Generate a list of all Python packages found within a directory\n \"\"\"\n\n @classmethod\n def find(cls, where='.', exclude=(), include=('*',)):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' is the root directory which will be searched for packages. It\n should be supplied as a \"cross-platform\" (i.e. URL-style) path; it will\n be converted to the appropriate local path syntax.\n\n 'exclude' is a sequence of package names to exclude; '*' can be used\n as a wildcard in the names, such that 'foo.*' will exclude all\n subpackages of 'foo' (but not 'foo' itself).\n\n 'include' is a sequence of package names to include. If it's\n specified, only the named packages will be included. If it's not\n specified, all found packages will be included. 'include' can contain\n shell style wildcard patterns just like 'exclude'.\n \"\"\"\n\n return list(\n cls._find_packages_iter(\n convert_path(where),\n cls._build_filter('ez_setup', '*__pycache__', *exclude),\n cls._build_filter(*include),\n )\n )\n\n @classmethod\n def _find_packages_iter(cls, where, exclude, include):\n \"\"\"\n All the packages found in 'where' that pass the 'include' filter, but\n not the 'exclude' filter.\n \"\"\"\n for root, dirs, files in os.walk(where, followlinks=True):\n # Copy dirs to iterate over it, then empty dirs.\n all_dirs = dirs[:]\n dirs[:] = []\n\n for dir in all_dirs:\n full_path = os.path.join(root, dir)\n rel_path = os.path.relpath(full_path, where)\n package = rel_path.replace(os.path.sep, '.')\n\n # Skip directory trees that are not valid packages\n if '.' in dir or not cls._looks_like_package(full_path):\n continue\n\n # Should this package be included?\n if include(package) and not exclude(package):\n yield package\n\n # Keep searching subdirectories, as there may be more packages\n # down there, even if the parent was excluded.\n dirs.append(dir)\n\n @staticmethod\n def _looks_like_package(path):\n \"\"\"Does a directory look like a package?\"\"\"\n return os.path.isfile(os.path.join(path, '__init__.py'))\n\n @staticmethod\n def _build_filter(*patterns):\n \"\"\"\n Given a list of patterns, return a callable that will be true only if\n the input matches at least one of the patterns.\n \"\"\"\n return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)\n\n\nclass PEP420PackageFinder(PackageFinder):\n @staticmethod\n def _looks_like_package(path):\n return True\n\n\nfind_packages = PackageFinder.find\nfind_namespace_packages = PEP420PackageFinder.find\n\n\ndef _install_setup_requires(attrs):\n # Note: do not use `setuptools.Distribution` directly, as\n # our PEP 517 backend patch `distutils.core.Distribution`.\n class MinimalDistribution(distutils.core.Distribution):\n \"\"\"\n A minimal version of a distribution for supporting the\n fetch_build_eggs interface.\n \"\"\"\n\n def __init__(self, attrs):\n _incl = 'dependency_links', 'setup_requires'\n filtered = {k: attrs[k] for k in set(_incl) & set(attrs)}\n distutils.core.Distribution.__init__(self, filtered)\n\n def finalize_options(self):\n \"\"\"\n Disable finalize_options to avoid building the working set.\n Ref #2158.\n \"\"\"\n\n dist = MinimalDistribution(attrs)\n\n # Honor setup.cfg's options.\n dist.parse_config_files(ignore_option_errors=True)\n if dist.setup_requires:\n warnings.warn(\n \"setup_requires is deprecated. Supply build \"\n \"dependencies using PEP 517 pyproject.toml build-requires.\",\n SetuptoolsDeprecationWarning,\n )\n dist.fetch_build_eggs(dist.setup_requires)\n\n\ndef setup(**attrs):\n # Make sure we have any requirements needed to interpret 'attrs'.\n _install_setup_requires(attrs)\n return distutils.core.setup(**attrs)\n\n\nsetup.__doc__ = distutils.core.setup.__doc__\n\n\n_Command = monkey.get_unpatched(distutils.core.Command)\n\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n \"\"\"\n Construct the command for dist, updating\n vars(self) with any keyword parameters.\n \"\"\"\n _Command.__init__(self, dist)\n vars(self).update(kw)\n\n def _ensure_stringlike(self, option, what, default=None):\n val = getattr(self, option)\n if val is None:\n setattr(self, option, default)\n return default\n elif not isinstance(val, str):\n raise DistutilsOptionError(\n \"'%s' must be a %s (got `%s`)\" % (option, what, val)\n )\n return val\n\n def ensure_string_list(self, option):\n r\"\"\"Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n \"\"\"\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\" % (option, val)\n )\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n vars(cmd).update(kw)\n return cmd\n\n\ndef _find_all_simple(path):\n \"\"\"\n Find all files under 'path'\n \"\"\"\n results = (\n os.path.join(base, file)\n for base, dirs, files in os.walk(path, followlinks=True)\n for file in files\n )\n return filter(os.path.isfile, results)\n\n\ndef findall(dir=os.curdir):\n \"\"\"\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n \"\"\"\n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n\nclass sic(str):\n \"\"\"Treat this string as-is (https://en.wikipedia.org/wiki/Sic)\"\"\"\n\n\n# Apply monkey patches\nmonkey.patch_all()\n", "path": "setuptools/__init__.py"}]} | 4,089 | 377 |
gh_patches_debug_23934 | rasdani/github-patches | git_diff | microsoft__playwright-python-159 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stdout/stderr pipe throws stacktrace if stdout/stderr is mocked by Pytest
I think more an upstream issue (here) instead of for the Pytest plugin.
Stacktrace:
```
pytest project-name/browser_tests --browser chromium --base-url http://localhost:8000/
============================= test session starts ==============================
platform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1
baseurl: http://localhost:8000/
django: settings: config.settings.test (from option)
Using --randomly-seed=1
rootdir: /home/runner/work/project-name/project-name, configfile: setup.cfg
plugins: freezegun-0.4.2, base-url-1.4.2, playwright-0.0.5, cov-2.10.0, sugar-0.9.4, django-3.9.0, randomly-3.4.1, celery-4.4.7, factoryboy-2.0.3, Faker-4.1.1, env-0.6.2
collected 15 items
project-name/browser_tests/test_internal_manager_dashboard.py E [ 6%]
==================================== ERRORS ====================================
_ ERROR at setup of test123[chromium] _
launch_browser = <function launch_browser.<locals>.launch at 0x7f6bc73de430>
@pytest.fixture(scope="session")
def browser(launch_browser: Callable[[], Browser]) -> Generator[Browser, None, None]:
> browser = launch_browser()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:97: in launch
pw_context = sync_playwright()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/__init__.py:27: in sync_playwright
return SyncPlaywrightContextManager()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:72: in __init__
self._connection = run_driver()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:67: in run_driver
return loop.run_until_complete(run_driver_async())
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:616: in run_until_complete
return future.result()
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:48: in run_driver_async
proc = await asyncio.create_subprocess_exec(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/subprocess.py:236: in create_subprocess_exec
transport, protocol = await loop.subprocess_exec(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:1630: in subprocess_exec
transport = await self._make_subprocess_transport(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:197: in _make_subprocess_transport
transp = _UnixSubprocessTransport(self, protocol, args, shell,
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_subprocess.py:36: in __init__
self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:789: in _start
self._proc = subprocess.Popen(
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:804: in __init__
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <subprocess.Popen object at 0x7f6bc73dfa60>
stdin = <socket.socket [closed] fd=-1, family=AddressFamily.AF_UNIX, type=SocketKind.SOCK_STREAM, proto=0>
stdout = -1, stderr = <_io.TextIOWrapper encoding='UTF-8'>
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
if c2pwrite != -1:
errwrite = c2pwrite
else: # child's stdout is not set, use parent's stdout
errwrite = sys.__stdout__.fileno()
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
> errwrite = stderr.fileno()
E io.UnsupportedOperation: fileno
/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:1504: UnsupportedOperation
---------------------------- Captured stdout setup -----------------------------
kektus <_io.TextIOWrapper encoding='UTF-8'>
=========================== short test summary info ============================
ERROR project-name/browser_tests/test123.py::test123[chromium]
!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 1 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!
=============================== 1 error in 0.67s ===============================
Makefile:23: recipe for target 'browser_test' failed
```
Using the `-s` parameter is a workaround for that. Issue is persistent since https://github.com/microsoft/playwright-python/pull/145.
</issue>
<code>
[start of playwright/main.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import subprocess
17 import sys
18 from typing import Any
19
20 from greenlet import greenlet
21
22 from playwright.async_api import Playwright as AsyncPlaywright
23 from playwright.connection import Connection
24 from playwright.helper import Error
25 from playwright.object_factory import create_remote_object
26 from playwright.path_utils import get_file_dirname
27 from playwright.playwright import Playwright
28 from playwright.sync_api import Playwright as SyncPlaywright
29 from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber
30
31
32 def compute_driver_name() -> str:
33 platform = sys.platform
34 if platform == "darwin":
35 result = "driver-macos"
36 elif platform == "linux":
37 result = "driver-linux"
38 elif platform == "win32":
39 result = "driver-win.exe"
40 return result
41
42
43 async def run_driver_async() -> Connection:
44 package_path = get_file_dirname()
45 driver_name = compute_driver_name()
46 driver_executable = package_path / "drivers" / driver_name
47
48 proc = await asyncio.create_subprocess_exec(
49 str(driver_executable),
50 stdin=asyncio.subprocess.PIPE,
51 stdout=asyncio.subprocess.PIPE,
52 stderr=sys.stderr,
53 limit=32768,
54 )
55 assert proc.stdout
56 assert proc.stdin
57 connection = Connection(
58 proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()
59 )
60 return connection
61
62
63 def run_driver() -> Connection:
64 loop = asyncio.get_event_loop()
65 if loop.is_running():
66 raise Error("Can only run one Playwright at a time.")
67 return loop.run_until_complete(run_driver_async())
68
69
70 class SyncPlaywrightContextManager:
71 def __init__(self) -> None:
72 self._connection = run_driver()
73 self._playwright: SyncPlaywright
74
75 def __enter__(self) -> SyncPlaywright:
76 g_self = greenlet.getcurrent()
77
78 def callback_wrapper(playwright_impl: Playwright) -> None:
79 self._playwright = SyncPlaywright(playwright_impl)
80 g_self.switch()
81
82 self._connection.call_on_object_with_known_name("Playwright", callback_wrapper)
83 set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))
84 dispatcher_fiber().switch()
85 return self._playwright
86
87 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
88 self._connection.stop_sync()
89
90
91 class AsyncPlaywrightContextManager:
92 def __init__(self) -> None:
93 self._connection: Connection
94
95 async def __aenter__(self) -> AsyncPlaywright:
96 self._connection = await run_driver_async()
97 self._connection.run_async()
98 return AsyncPlaywright(
99 await self._connection.wait_for_object_with_known_name("Playwright")
100 )
101
102 async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
103 self._connection.stop_async()
104
105
106 if sys.platform == "win32":
107 # Use ProactorEventLoop in 3.7, which is default in 3.8
108 loop = asyncio.ProactorEventLoop()
109 asyncio.set_event_loop(loop)
110
111
112 def main() -> None:
113 if "install" not in sys.argv:
114 print('Run "python -m playwright install" to complete installation')
115 return
116 package_path = get_file_dirname()
117 driver_name = compute_driver_name()
118 driver_executable = package_path / "drivers" / driver_name
119 print("Installing the browsers...")
120 subprocess.check_call(f"{driver_executable} install", shell=True)
121
122 print("Playwright is now ready for use")
123
[end of playwright/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/playwright/main.py b/playwright/main.py
--- a/playwright/main.py
+++ b/playwright/main.py
@@ -13,6 +13,7 @@
# limitations under the License.
import asyncio
+import io
import subprocess
import sys
from typing import Any
@@ -45,11 +46,21 @@
driver_name = compute_driver_name()
driver_executable = package_path / "drivers" / driver_name
+ # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80
+ def _get_stderr_fileno() -> int:
+ try:
+ return sys.stderr.fileno()
+ except io.UnsupportedOperation:
+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
+ # This is potentially dangerous, but the best we can do.
+ return sys.__stderr__.fileno()
+
proc = await asyncio.create_subprocess_exec(
str(driver_executable),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=sys.stderr,
+ stderr=_get_stderr_fileno(),
limit=32768,
)
assert proc.stdout
| {"golden_diff": "diff --git a/playwright/main.py b/playwright/main.py\n--- a/playwright/main.py\n+++ b/playwright/main.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import asyncio\n+import io\n import subprocess\n import sys\n from typing import Any\n@@ -45,11 +46,21 @@\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n \n+ # Sourced from: https://github.com/pytest-dev/pytest/blob/49827adcb9256c9c9c06a25729421dcc3c385edc/src/_pytest/faulthandler.py#L73-L80\n+ def _get_stderr_fileno() -> int:\n+ try:\n+ return sys.stderr.fileno()\n+ except io.UnsupportedOperation:\n+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n+ # This is potentially dangerous, but the best we can do.\n+ return sys.__stderr__.fileno()\n+\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n- stderr=sys.stderr,\n+ stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n", "issue": "Stdout/stderr pipe throws stacktrace if stdout/stderr is mocked by Pytest\nI think more an upstream issue (here) instead of for the Pytest plugin.\r\n\r\nStacktrace:\r\n\r\n```\r\npytest project-name/browser_tests --browser chromium --base-url http://localhost:8000/\r\n============================= test session starts ==============================\r\nplatform linux -- Python 3.8.5, pytest-6.0.1, py-1.9.0, pluggy-0.13.1\r\nbaseurl: http://localhost:8000/\r\ndjango: settings: config.settings.test (from option)\r\nUsing --randomly-seed=1\r\nrootdir: /home/runner/work/project-name/project-name, configfile: setup.cfg\r\nplugins: freezegun-0.4.2, base-url-1.4.2, playwright-0.0.5, cov-2.10.0, sugar-0.9.4, django-3.9.0, randomly-3.4.1, celery-4.4.7, factoryboy-2.0.3, Faker-4.1.1, env-0.6.2\r\ncollected 15 items\r\n\r\nproject-name/browser_tests/test_internal_manager_dashboard.py E [ 6%]\r\n\r\n\r\n==================================== ERRORS ====================================\r\n_ ERROR at setup of test123[chromium] _\r\n\r\nlaunch_browser = <function launch_browser.<locals>.launch at 0x7f6bc73de430>\r\n\r\n @pytest.fixture(scope=\"session\")\r\n def browser(launch_browser: Callable[[], Browser]) -> Generator[Browser, None, None]:\r\n> browser = launch_browser()\r\n\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:114: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/pytest_playwright/pytest_playwright.py:97: in launch\r\n pw_context = sync_playwright()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/__init__.py:27: in sync_playwright\r\n return SyncPlaywrightContextManager()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:72: in __init__\r\n self._connection = run_driver()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:67: in run_driver\r\n return loop.run_until_complete(run_driver_async())\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:616: in run_until_complete\r\n return future.result()\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/site-packages/playwright/main.py:48: in run_driver_async\r\n proc = await asyncio.create_subprocess_exec(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/subprocess.py:236: in create_subprocess_exec\r\n transport, protocol = await loop.subprocess_exec(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_events.py:1630: in subprocess_exec\r\n transport = await self._make_subprocess_transport(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:197: in _make_subprocess_transport\r\n transp = _UnixSubprocessTransport(self, protocol, args, shell,\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/base_subprocess.py:36: in __init__\r\n self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/asyncio/unix_events.py:789: in _start\r\n self._proc = subprocess.Popen(\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:804: in __init__\r\n errread, errwrite) = self._get_handles(stdin, stdout, stderr)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nself = <subprocess.Popen object at 0x7f6bc73dfa60>\r\nstdin = <socket.socket [closed] fd=-1, family=AddressFamily.AF_UNIX, type=SocketKind.SOCK_STREAM, proto=0>\r\nstdout = -1, stderr = <_io.TextIOWrapper encoding='UTF-8'>\r\n\r\n def _get_handles(self, stdin, stdout, stderr):\r\n \"\"\"Construct and return tuple with IO objects:\r\n p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite\r\n \"\"\"\r\n p2cread, p2cwrite = -1, -1\r\n c2pread, c2pwrite = -1, -1\r\n errread, errwrite = -1, -1\r\n \r\n if stdin is None:\r\n pass\r\n elif stdin == PIPE:\r\n p2cread, p2cwrite = os.pipe()\r\n elif stdin == DEVNULL:\r\n p2cread = self._get_devnull()\r\n elif isinstance(stdin, int):\r\n p2cread = stdin\r\n else:\r\n # Assuming file-like object\r\n p2cread = stdin.fileno()\r\n \r\n if stdout is None:\r\n pass\r\n elif stdout == PIPE:\r\n c2pread, c2pwrite = os.pipe()\r\n elif stdout == DEVNULL:\r\n c2pwrite = self._get_devnull()\r\n elif isinstance(stdout, int):\r\n c2pwrite = stdout\r\n else:\r\n # Assuming file-like object\r\n c2pwrite = stdout.fileno()\r\n \r\n if stderr is None:\r\n pass\r\n elif stderr == PIPE:\r\n errread, errwrite = os.pipe()\r\n elif stderr == STDOUT:\r\n if c2pwrite != -1:\r\n errwrite = c2pwrite\r\n else: # child's stdout is not set, use parent's stdout\r\n errwrite = sys.__stdout__.fileno()\r\n elif stderr == DEVNULL:\r\n errwrite = self._get_devnull()\r\n elif isinstance(stderr, int):\r\n errwrite = stderr\r\n else:\r\n # Assuming file-like object\r\n> errwrite = stderr.fileno()\r\nE io.UnsupportedOperation: fileno\r\n\r\n/opt/hostedtoolcache/Python/3.8.5/x64/lib/python3.8/subprocess.py:1504: UnsupportedOperation\r\n---------------------------- Captured stdout setup -----------------------------\r\nkektus <_io.TextIOWrapper encoding='UTF-8'>\r\n=========================== short test summary info ============================\r\nERROR project-name/browser_tests/test123.py::test123[chromium]\r\n!!!!!!!!!!!!!!!!!!!!!!!!!! stopping after 1 failures !!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n=============================== 1 error in 0.67s ===============================\r\nMakefile:23: recipe for target 'browser_test' failed\r\n\r\n```\r\n\r\nUsing the `-s` parameter is a workaround for that. Issue is persistent since https://github.com/microsoft/playwright-python/pull/145.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}]} | 3,393 | 336 |
gh_patches_debug_19278 | rasdani/github-patches | git_diff | Pycord-Development__pycord-645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Redesign ApplicationContext.respond
### Summary
The ApplicationContext.respond property needs to be redesigned.
### What is the feature request for?
The core library
### The Problem
The `ApplicationContext.respond` property returns 2 different functions depending on `InteractionResponse.is_done()`. Both take different parameters and return different objects. This is bad practice because it can result in unexpected behavior. Also, type-hinting is kinda hard.
### The Ideal Solution
It might make sense to create a function `ApplicationContext.respond` that merges all parameters. Depending on `InteractionResponse.is_done()` it ignores the parameter of the other function. The only remaining problem is the different return types. This would simplify the situation, but isn't perfect yet. Maybe someone got a better idea.
### The Current Solution
A property that returns 2 different functions depending on `InteractionResponse.is_done()`.
### Additional Context
_No response_
</issue>
<code>
[start of discord/commands/context.py]
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25 from __future__ import annotations
26
27 from typing import TYPE_CHECKING, Optional, Union
28
29 import discord.abc
30
31 if TYPE_CHECKING:
32 import discord
33 from discord import Bot
34 from discord.state import ConnectionState
35
36 from .commands import ApplicationCommand, Option
37 from ..cog import Cog
38
39 from ..guild import Guild
40 from ..interactions import Interaction, InteractionResponse
41 from ..member import Member
42 from ..message import Message
43 from ..user import User
44 from ..utils import cached_property
45
46 __all__ = (
47 "ApplicationContext",
48 "AutocompleteContext"
49 )
50
51 class ApplicationContext(discord.abc.Messageable):
52 """Represents a Discord application command interaction context.
53
54 This class is not created manually and is instead passed to application
55 commands as the first parameter.
56
57 .. versionadded:: 2.0
58
59 Attributes
60 -----------
61 bot: :class:`.Bot`
62 The bot that the command belongs to.
63 interaction: :class:`.Interaction`
64 The interaction object that invoked the command.
65 command: :class:`.ApplicationCommand`
66 The command that this context belongs to.
67 """
68
69 def __init__(self, bot: Bot, interaction: Interaction):
70 self.bot = bot
71 self.interaction = interaction
72
73 # below attributes will be set after initialization
74 self.command: ApplicationCommand = None # type: ignore
75 self.focused: Option = None # type: ignore
76 self.value: str = None # type: ignore
77 self.options: dict = None # type: ignore
78
79 self._state: ConnectionState = self.interaction._state
80
81 async def _get_channel(self) -> discord.abc.Messageable:
82 return self.channel
83
84 @cached_property
85 def channel(self):
86 return self.interaction.channel
87
88 @cached_property
89 def channel_id(self) -> Optional[int]:
90 return self.interaction.channel_id
91
92 @cached_property
93 def guild(self) -> Optional[Guild]:
94 return self.interaction.guild
95
96 @cached_property
97 def guild_id(self) -> Optional[int]:
98 return self.interaction.guild_id
99
100 @cached_property
101 def locale(self) -> Optional[str]:
102 return self.interaction.locale
103
104 @cached_property
105 def guild_locale(self) -> Optional[str]:
106 return self.interaction.guild_locale
107
108 @cached_property
109 def me(self) -> Union[Member, User]:
110 return self.guild.me if self.guild is not None else self.bot.user
111
112 @cached_property
113 def message(self) -> Optional[Message]:
114 return self.interaction.message
115
116 @cached_property
117 def user(self) -> Optional[Union[Member, User]]:
118 return self.interaction.user
119
120 @cached_property
121 def author(self) -> Optional[Union[Member, User]]:
122 return self.user
123
124 @property
125 def voice_client(self):
126 if self.guild is None:
127 return None
128
129 return self.guild.voice_client
130
131 @cached_property
132 def response(self) -> InteractionResponse:
133 return self.interaction.response
134
135 @property
136 def respond(self):
137 return self.followup.send if self.response.is_done() else self.interaction.response.send_message
138
139 @property
140 def defer(self):
141 return self.interaction.response.defer
142
143 @property
144 def followup(self):
145 return self.interaction.followup
146
147 async def delete(self):
148 """Calls :attr:`~discord.commands.ApplicationContext.respond`.
149 If the response is done, then calls :attr:`~discord.commands.ApplicationContext.respond` first."""
150 if not self.response.is_done():
151 await self.defer()
152
153 return await self.interaction.delete_original_message()
154
155 @property
156 def edit(self):
157 return self.interaction.edit_original_message
158
159 @property
160 def cog(self) -> Optional[Cog]:
161 """Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist."""
162 if self.command is None:
163 return None
164
165 return self.command.cog
166
167
168 class AutocompleteContext:
169 """Represents context for a slash command's option autocomplete.
170
171 This class is not created manually and is instead passed to an Option's autocomplete callback.
172
173 .. versionadded:: 2.0
174
175 Attributes
176 -----------
177 bot: :class:`.Bot`
178 The bot that the command belongs to.
179 interaction: :class:`.Interaction`
180 The interaction object that invoked the autocomplete.
181 command: :class:`.ApplicationCommand`
182 The command that this context belongs to.
183 focused: :class:`.Option`
184 The option the user is currently typing.
185 value: :class:`.str`
186 The content of the focused option.
187 options :class:`.dict`
188 A name to value mapping of the options that the user has selected before this option.
189 """
190
191 __slots__ = ("bot", "interaction", "command", "focused", "value", "options")
192
193 def __init__(self, bot: Bot, interaction: Interaction) -> None:
194 self.bot = bot
195 self.interaction = interaction
196
197 self.command: ApplicationCommand = None # type: ignore
198 self.focused: Option = None # type: ignore
199 self.value: str = None # type: ignore
200 self.options: dict = None # type: ignore
201
202 @property
203 def cog(self) -> Optional[Cog]:
204 """Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist."""
205 if self.command is None:
206 return None
207
208 return self.command.cog
209
[end of discord/commands/context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/discord/commands/context.py b/discord/commands/context.py
--- a/discord/commands/context.py
+++ b/discord/commands/context.py
@@ -134,7 +134,26 @@
@property
def respond(self):
- return self.followup.send if self.response.is_done() else self.interaction.response.send_message
+ if not self.response.is_done():
+ return self.interaction.response.send_message
+ else:
+ raise RuntimeError(f"Interaction was already issued a response. Try using {type(self).__name__}.send_followup() instead.")
+
+ @property
+ async def send_response(self) -> Callable[..., Union[Interaction, Webhook]]:
+ """Callable[..., Union[:class:`~.Interaction`, :class:`~.Webhook`]]: Sends either a response
+ or a followup response depending if the interaction has been responded to yet or not."""
+ if not self.response.is_done():
+ return self.interaction.response.send_message # self.response
+ else:
+ return self.followup.send # self.send_followup
+
+ @property
+ def send_followup(self):
+ if self.response.is_done():
+ return self.followup.send
+ else:
+ raise RuntimeError(f"Interaction was not yet issued a response. Try using {type(self).__name__}.respond() first.")
@property
def defer(self):
| {"golden_diff": "diff --git a/discord/commands/context.py b/discord/commands/context.py\n--- a/discord/commands/context.py\n+++ b/discord/commands/context.py\n@@ -134,7 +134,26 @@\n \n @property\n def respond(self):\n- return self.followup.send if self.response.is_done() else self.interaction.response.send_message\n+ if not self.response.is_done():\n+ return self.interaction.response.send_message\n+ else:\n+ raise RuntimeError(f\"Interaction was already issued a response. Try using {type(self).__name__}.send_followup() instead.\")\n+\n+ @property\n+ async def send_response(self) -> Callable[..., Union[Interaction, Webhook]]:\n+ \"\"\"Callable[..., Union[:class:`~.Interaction`, :class:`~.Webhook`]]: Sends either a response\n+ or a followup response depending if the interaction has been responded to yet or not.\"\"\"\n+ if not self.response.is_done():\n+ return self.interaction.response.send_message # self.response\n+ else:\n+ return self.followup.send # self.send_followup\n+\n+ @property\n+ def send_followup(self):\n+ if self.response.is_done():\n+ return self.followup.send\n+ else:\n+ raise RuntimeError(f\"Interaction was not yet issued a response. Try using {type(self).__name__}.respond() first.\")\n \n @property\n def defer(self):\n", "issue": "Redesign ApplicationContext.respond\n### Summary\n\nThe ApplicationContext.respond property needs to be redesigned.\n\n### What is the feature request for?\n\nThe core library\n\n### The Problem\n\nThe `ApplicationContext.respond` property returns 2 different functions depending on `InteractionResponse.is_done()`. Both take different parameters and return different objects. This is bad practice because it can result in unexpected behavior. Also, type-hinting is kinda hard. \n\n### The Ideal Solution\n\nIt might make sense to create a function `ApplicationContext.respond` that merges all parameters. Depending on `InteractionResponse.is_done()` it ignores the parameter of the other function. The only remaining problem is the different return types. This would simplify the situation, but isn't perfect yet. Maybe someone got a better idea.\n\n### The Current Solution\n\nA property that returns 2 different functions depending on `InteractionResponse.is_done()`.\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional, Union\n\nimport discord.abc\n\nif TYPE_CHECKING:\n import discord\n from discord import Bot\n from discord.state import ConnectionState\n\n from .commands import ApplicationCommand, Option\n from ..cog import Cog\n\nfrom ..guild import Guild\nfrom ..interactions import Interaction, InteractionResponse\nfrom ..member import Member\nfrom ..message import Message\nfrom ..user import User\nfrom ..utils import cached_property\n\n__all__ = (\n \"ApplicationContext\",\n \"AutocompleteContext\"\n)\n\nclass ApplicationContext(discord.abc.Messageable):\n \"\"\"Represents a Discord application command interaction context.\n\n This class is not created manually and is instead passed to application\n commands as the first parameter.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n bot: :class:`.Bot`\n The bot that the command belongs to.\n interaction: :class:`.Interaction`\n The interaction object that invoked the command.\n command: :class:`.ApplicationCommand`\n The command that this context belongs to.\n \"\"\"\n\n def __init__(self, bot: Bot, interaction: Interaction):\n self.bot = bot\n self.interaction = interaction\n\n # below attributes will be set after initialization\n self.command: ApplicationCommand = None # type: ignore\n self.focused: Option = None # type: ignore\n self.value: str = None # type: ignore\n self.options: dict = None # type: ignore\n\n self._state: ConnectionState = self.interaction._state\n\n async def _get_channel(self) -> discord.abc.Messageable:\n return self.channel\n\n @cached_property\n def channel(self):\n return self.interaction.channel\n\n @cached_property\n def channel_id(self) -> Optional[int]:\n return self.interaction.channel_id\n\n @cached_property\n def guild(self) -> Optional[Guild]:\n return self.interaction.guild\n\n @cached_property\n def guild_id(self) -> Optional[int]:\n return self.interaction.guild_id\n\n @cached_property\n def locale(self) -> Optional[str]:\n return self.interaction.locale\n\n @cached_property\n def guild_locale(self) -> Optional[str]:\n return self.interaction.guild_locale\n\n @cached_property\n def me(self) -> Union[Member, User]:\n return self.guild.me if self.guild is not None else self.bot.user\n\n @cached_property\n def message(self) -> Optional[Message]:\n return self.interaction.message\n\n @cached_property\n def user(self) -> Optional[Union[Member, User]]:\n return self.interaction.user\n\n @cached_property\n def author(self) -> Optional[Union[Member, User]]:\n return self.user\n\n @property\n def voice_client(self):\n if self.guild is None:\n return None\n \n return self.guild.voice_client\n\n @cached_property\n def response(self) -> InteractionResponse:\n return self.interaction.response\n\n @property\n def respond(self):\n return self.followup.send if self.response.is_done() else self.interaction.response.send_message\n\n @property\n def defer(self):\n return self.interaction.response.defer\n\n @property\n def followup(self):\n return self.interaction.followup\n\n async def delete(self):\n \"\"\"Calls :attr:`~discord.commands.ApplicationContext.respond`.\n If the response is done, then calls :attr:`~discord.commands.ApplicationContext.respond` first.\"\"\"\n if not self.response.is_done():\n await self.defer()\n\n return await self.interaction.delete_original_message()\n\n @property\n def edit(self):\n return self.interaction.edit_original_message\n\n @property\n def cog(self) -> Optional[Cog]:\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist.\"\"\"\n if self.command is None:\n return None\n \n return self.command.cog\n\n\nclass AutocompleteContext:\n \"\"\"Represents context for a slash command's option autocomplete.\n\n This class is not created manually and is instead passed to an Option's autocomplete callback.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n bot: :class:`.Bot`\n The bot that the command belongs to. \n interaction: :class:`.Interaction`\n The interaction object that invoked the autocomplete.\n command: :class:`.ApplicationCommand`\n The command that this context belongs to.\n focused: :class:`.Option`\n The option the user is currently typing.\n value: :class:`.str`\n The content of the focused option.\n options :class:`.dict`\n A name to value mapping of the options that the user has selected before this option.\n \"\"\"\n\n __slots__ = (\"bot\", \"interaction\", \"command\", \"focused\", \"value\", \"options\")\n \n def __init__(self, bot: Bot, interaction: Interaction) -> None:\n self.bot = bot\n self.interaction = interaction\n\n self.command: ApplicationCommand = None # type: ignore\n self.focused: Option = None # type: ignore\n self.value: str = None # type: ignore\n self.options: dict = None # type: ignore\n\n @property\n def cog(self) -> Optional[Cog]:\n \"\"\"Optional[:class:`.Cog`]: Returns the cog associated with this context's command. ``None`` if it does not exist.\"\"\"\n if self.command is None:\n return None\n \n return self.command.cog\n", "path": "discord/commands/context.py"}]} | 2,716 | 314 |
gh_patches_debug_19915 | rasdani/github-patches | git_diff | apache__airflow-15247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tasks in an infinite slots pool are never scheduled
**Apache Airflow version**: v2.0.0 and up
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): not tested with K8
**Environment**:
all
**What happened**:
Executing the unit test included below, or create an infinite pool ( `-1` slots ) and tasks that should be executed in that pool.
```
INFO airflow.jobs.scheduler_job.SchedulerJob:scheduler_job.py:991 Not scheduling since there are -1 open slots in pool test_scheduler_verify_infinite_pool
```
**What you expected to happen**:
To schedule tasks, or to drop support for infinite slots pools?
**How to reproduce it**:
easiest one is this unit test:
```
def test_scheduler_verify_infinite_pool(self):
"""
Test that TIs are still scheduled if we only have one infinite pool.
"""
dag = DAG(dag_id='test_scheduler_verify_infinite_pool', start_date=DEFAULT_DATE)
BashOperator(
task_id='test_scheduler_verify_infinite_pool_t0',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_infinite_pool',
bash_command='echo hi',
)
dagbag = DagBag(
dag_folder=os.path.join(settings.DAGS_FOLDER, "no_dags.py"),
include_examples=False,
read_dags_from_db=True,
)
dagbag.bag_dag(dag=dag, root_dag=dag)
dagbag.sync_to_db()
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_infinite_pool', slots=-1)
session.add(pool)
session.commit()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
scheduler = SchedulerJob(executor=self.null_exec)
scheduler.processor_agent = mock.MagicMock()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
scheduler._schedule_dag_run(dr, {}, session)
task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)
# Let's make sure we don't end up with a `max_tis` == 0
assert len(task_instances_list) >= 1
```
**Anything else we need to know**:
Overall I'm not sure whether it's worth fixing in those various spots:
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L908
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L971
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L988
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1041
https://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1056
Or whether to restrict `-1` ( infinite ) slots in pools:
https://github.com/bperson/airflow/blob/master/airflow/models/pool.py#L49
</issue>
<code>
[start of airflow/models/pool.py]
1 #
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 from typing import Dict, Iterable, Optional, Tuple
20
21 from sqlalchemy import Column, Integer, String, Text, func
22 from sqlalchemy.orm.session import Session
23
24 from airflow.exceptions import AirflowException
25 from airflow.models.base import Base
26 from airflow.ti_deps.dependencies_states import EXECUTION_STATES
27 from airflow.typing_compat import TypedDict
28 from airflow.utils.session import provide_session
29 from airflow.utils.sqlalchemy import nowait, with_row_locks
30 from airflow.utils.state import State
31
32
33 class PoolStats(TypedDict):
34 """Dictionary containing Pool Stats"""
35
36 total: int
37 running: int
38 queued: int
39 open: int
40
41
42 class Pool(Base):
43 """the class to get Pool info."""
44
45 __tablename__ = "slot_pool"
46
47 id = Column(Integer, primary_key=True)
48 pool = Column(String(256), unique=True)
49 # -1 for infinite
50 slots = Column(Integer, default=0)
51 description = Column(Text)
52
53 DEFAULT_POOL_NAME = 'default_pool'
54
55 def __repr__(self):
56 return str(self.pool) # pylint: disable=E0012
57
58 @staticmethod
59 @provide_session
60 def get_pool(pool_name, session: Session = None):
61 """
62 Get the Pool with specific pool name from the Pools.
63
64 :param pool_name: The pool name of the Pool to get.
65 :param session: SQLAlchemy ORM Session
66 :return: the pool object
67 """
68 return session.query(Pool).filter(Pool.pool == pool_name).first()
69
70 @staticmethod
71 @provide_session
72 def get_default_pool(session: Session = None):
73 """
74 Get the Pool of the default_pool from the Pools.
75
76 :param session: SQLAlchemy ORM Session
77 :return: the pool object
78 """
79 return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)
80
81 @staticmethod
82 @provide_session
83 def slots_stats(
84 *,
85 lock_rows: bool = False,
86 session: Session = None,
87 ) -> Dict[str, PoolStats]:
88 """
89 Get Pool stats (Number of Running, Queued, Open & Total tasks)
90
91 If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a
92 non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an
93 OperationalError.
94
95 :param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns
96 :param session: SQLAlchemy ORM Session
97 """
98 from airflow.models.taskinstance import TaskInstance # Avoid circular import
99
100 pools: Dict[str, PoolStats] = {}
101
102 query = session.query(Pool.pool, Pool.slots)
103
104 if lock_rows:
105 query = with_row_locks(query, session=session, **nowait(session))
106
107 pool_rows: Iterable[Tuple[str, int]] = query.all()
108 for (pool_name, total_slots) in pool_rows:
109 pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
110
111 state_count_by_pool = (
112 session.query(TaskInstance.pool, TaskInstance.state, func.sum(TaskInstance.pool_slots))
113 .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
114 .group_by(TaskInstance.pool, TaskInstance.state)
115 ).all()
116
117 # calculate queued and running metrics
118 count: int
119 for (pool_name, state, count) in state_count_by_pool:
120 stats_dict: Optional[PoolStats] = pools.get(pool_name)
121 if not stats_dict:
122 continue
123 # TypedDict key must be a string literal, so we use if-statements to set value
124 if state == "running":
125 stats_dict["running"] = count
126 elif state == "queued":
127 stats_dict["queued"] = count
128 else:
129 raise AirflowException(f"Unexpected state. Expected values: {EXECUTION_STATES}.")
130
131 # calculate open metric
132 for pool_name, stats_dict in pools.items():
133 if stats_dict["total"] == -1:
134 # -1 means infinite
135 stats_dict["open"] = -1
136 else:
137 stats_dict["open"] = stats_dict["total"] - stats_dict["running"] - stats_dict["queued"]
138
139 return pools
140
141 def to_json(self):
142 """
143 Get the Pool in a json structure
144
145 :return: the pool object in json format
146 """
147 return {
148 'id': self.id,
149 'pool': self.pool,
150 'slots': self.slots,
151 'description': self.description,
152 }
153
154 @provide_session
155 def occupied_slots(self, session: Session):
156 """
157 Get the number of slots used by running/queued tasks at the moment.
158
159 :param session: SQLAlchemy ORM Session
160 :return: the used number of slots
161 """
162 from airflow.models.taskinstance import TaskInstance # Avoid circular import
163
164 return int(
165 session.query(func.sum(TaskInstance.pool_slots))
166 .filter(TaskInstance.pool == self.pool)
167 .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))
168 .scalar()
169 or 0
170 )
171
172 @provide_session
173 def running_slots(self, session: Session):
174 """
175 Get the number of slots used by running tasks at the moment.
176
177 :param session: SQLAlchemy ORM Session
178 :return: the used number of slots
179 """
180 from airflow.models.taskinstance import TaskInstance # Avoid circular import
181
182 return int(
183 session.query(func.sum(TaskInstance.pool_slots))
184 .filter(TaskInstance.pool == self.pool)
185 .filter(TaskInstance.state == State.RUNNING)
186 .scalar()
187 or 0
188 )
189
190 @provide_session
191 def queued_slots(self, session: Session):
192 """
193 Get the number of slots used by queued tasks at the moment.
194
195 :param session: SQLAlchemy ORM Session
196 :return: the used number of slots
197 """
198 from airflow.models.taskinstance import TaskInstance # Avoid circular import
199
200 return int(
201 session.query(func.sum(TaskInstance.pool_slots))
202 .filter(TaskInstance.pool == self.pool)
203 .filter(TaskInstance.state == State.QUEUED)
204 .scalar()
205 or 0
206 )
207
208 @provide_session
209 def open_slots(self, session: Session) -> float:
210 """
211 Get the number of slots open at the moment.
212
213 :param session: SQLAlchemy ORM Session
214 :return: the number of slots
215 """
216 if self.slots == -1:
217 return float('inf')
218 else:
219 return self.slots - self.occupied_slots(session)
220
[end of airflow/models/pool.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/models/pool.py b/airflow/models/pool.py
--- a/airflow/models/pool.py
+++ b/airflow/models/pool.py
@@ -106,6 +106,8 @@
pool_rows: Iterable[Tuple[str, int]] = query.all()
for (pool_name, total_slots) in pool_rows:
+ if total_slots == -1:
+ total_slots = float('inf') # type: ignore
pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)
state_count_by_pool = (
@@ -115,8 +117,10 @@
).all()
# calculate queued and running metrics
- count: int
for (pool_name, state, count) in state_count_by_pool:
+ # Some databases return decimal.Decimal here.
+ count = int(count)
+
stats_dict: Optional[PoolStats] = pools.get(pool_name)
if not stats_dict:
continue
| {"golden_diff": "diff --git a/airflow/models/pool.py b/airflow/models/pool.py\n--- a/airflow/models/pool.py\n+++ b/airflow/models/pool.py\n@@ -106,6 +106,8 @@\n \n pool_rows: Iterable[Tuple[str, int]] = query.all()\n for (pool_name, total_slots) in pool_rows:\n+ if total_slots == -1:\n+ total_slots = float('inf') # type: ignore\n pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)\n \n state_count_by_pool = (\n@@ -115,8 +117,10 @@\n ).all()\n \n # calculate queued and running metrics\n- count: int\n for (pool_name, state, count) in state_count_by_pool:\n+ # Some databases return decimal.Decimal here.\n+ count = int(count)\n+\n stats_dict: Optional[PoolStats] = pools.get(pool_name)\n if not stats_dict:\n continue\n", "issue": "Tasks in an infinite slots pool are never scheduled\n**Apache Airflow version**: v2.0.0 and up\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): not tested with K8\r\n**Environment**:\r\nall\r\n\r\n**What happened**:\r\n\r\nExecuting the unit test included below, or create an infinite pool ( `-1` slots ) and tasks that should be executed in that pool.\r\n```\r\nINFO airflow.jobs.scheduler_job.SchedulerJob:scheduler_job.py:991 Not scheduling since there are -1 open slots in pool test_scheduler_verify_infinite_pool\r\n```\r\n\r\n**What you expected to happen**:\r\n\r\nTo schedule tasks, or to drop support for infinite slots pools?\r\n\r\n**How to reproduce it**:\r\neasiest one is this unit test:\r\n```\r\ndef test_scheduler_verify_infinite_pool(self):\r\n \"\"\"\r\n Test that TIs are still scheduled if we only have one infinite pool.\r\n \"\"\"\r\n dag = DAG(dag_id='test_scheduler_verify_infinite_pool', start_date=DEFAULT_DATE)\r\n BashOperator(\r\n task_id='test_scheduler_verify_infinite_pool_t0',\r\n dag=dag,\r\n owner='airflow',\r\n pool='test_scheduler_verify_infinite_pool',\r\n bash_command='echo hi',\r\n )\r\n\r\n dagbag = DagBag(\r\n dag_folder=os.path.join(settings.DAGS_FOLDER, \"no_dags.py\"),\r\n include_examples=False,\r\n read_dags_from_db=True,\r\n )\r\n dagbag.bag_dag(dag=dag, root_dag=dag)\r\n dagbag.sync_to_db()\r\n\r\n session = settings.Session()\r\n pool = Pool(pool='test_scheduler_verify_infinite_pool', slots=-1)\r\n session.add(pool)\r\n session.commit()\r\n\r\n dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))\r\n\r\n scheduler = SchedulerJob(executor=self.null_exec)\r\n scheduler.processor_agent = mock.MagicMock()\r\n\r\n dr = dag.create_dagrun(\r\n run_type=DagRunType.SCHEDULED,\r\n execution_date=DEFAULT_DATE,\r\n state=State.RUNNING,\r\n )\r\n scheduler._schedule_dag_run(dr, {}, session)\r\n\r\n task_instances_list = scheduler._executable_task_instances_to_queued(max_tis=32, session=session)\r\n\r\n # Let's make sure we don't end up with a `max_tis` == 0\r\n assert len(task_instances_list) >= 1\r\n```\r\n\r\n**Anything else we need to know**:\r\n\r\nOverall I'm not sure whether it's worth fixing in those various spots:\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L908\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L971\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L988\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1041\r\nhttps://github.com/bperson/airflow/blob/master/airflow/jobs/scheduler_job.py#L1056\r\n\r\nOr whether to restrict `-1` ( infinite ) slots in pools:\r\nhttps://github.com/bperson/airflow/blob/master/airflow/models/pool.py#L49\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import Dict, Iterable, Optional, Tuple\n\nfrom sqlalchemy import Column, Integer, String, Text, func\nfrom sqlalchemy.orm.session import Session\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.models.base import Base\nfrom airflow.ti_deps.dependencies_states import EXECUTION_STATES\nfrom airflow.typing_compat import TypedDict\nfrom airflow.utils.session import provide_session\nfrom airflow.utils.sqlalchemy import nowait, with_row_locks\nfrom airflow.utils.state import State\n\n\nclass PoolStats(TypedDict):\n \"\"\"Dictionary containing Pool Stats\"\"\"\n\n total: int\n running: int\n queued: int\n open: int\n\n\nclass Pool(Base):\n \"\"\"the class to get Pool info.\"\"\"\n\n __tablename__ = \"slot_pool\"\n\n id = Column(Integer, primary_key=True)\n pool = Column(String(256), unique=True)\n # -1 for infinite\n slots = Column(Integer, default=0)\n description = Column(Text)\n\n DEFAULT_POOL_NAME = 'default_pool'\n\n def __repr__(self):\n return str(self.pool) # pylint: disable=E0012\n\n @staticmethod\n @provide_session\n def get_pool(pool_name, session: Session = None):\n \"\"\"\n Get the Pool with specific pool name from the Pools.\n\n :param pool_name: The pool name of the Pool to get.\n :param session: SQLAlchemy ORM Session\n :return: the pool object\n \"\"\"\n return session.query(Pool).filter(Pool.pool == pool_name).first()\n\n @staticmethod\n @provide_session\n def get_default_pool(session: Session = None):\n \"\"\"\n Get the Pool of the default_pool from the Pools.\n\n :param session: SQLAlchemy ORM Session\n :return: the pool object\n \"\"\"\n return Pool.get_pool(Pool.DEFAULT_POOL_NAME, session=session)\n\n @staticmethod\n @provide_session\n def slots_stats(\n *,\n lock_rows: bool = False,\n session: Session = None,\n ) -> Dict[str, PoolStats]:\n \"\"\"\n Get Pool stats (Number of Running, Queued, Open & Total tasks)\n\n If ``lock_rows`` is True, and the database engine in use supports the ``NOWAIT`` syntax, then a\n non-blocking lock will be attempted -- if the lock is not available then SQLAlchemy will throw an\n OperationalError.\n\n :param lock_rows: Should we attempt to obtain a row-level lock on all the Pool rows returns\n :param session: SQLAlchemy ORM Session\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n pools: Dict[str, PoolStats] = {}\n\n query = session.query(Pool.pool, Pool.slots)\n\n if lock_rows:\n query = with_row_locks(query, session=session, **nowait(session))\n\n pool_rows: Iterable[Tuple[str, int]] = query.all()\n for (pool_name, total_slots) in pool_rows:\n pools[pool_name] = PoolStats(total=total_slots, running=0, queued=0, open=0)\n\n state_count_by_pool = (\n session.query(TaskInstance.pool, TaskInstance.state, func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))\n .group_by(TaskInstance.pool, TaskInstance.state)\n ).all()\n\n # calculate queued and running metrics\n count: int\n for (pool_name, state, count) in state_count_by_pool:\n stats_dict: Optional[PoolStats] = pools.get(pool_name)\n if not stats_dict:\n continue\n # TypedDict key must be a string literal, so we use if-statements to set value\n if state == \"running\":\n stats_dict[\"running\"] = count\n elif state == \"queued\":\n stats_dict[\"queued\"] = count\n else:\n raise AirflowException(f\"Unexpected state. Expected values: {EXECUTION_STATES}.\")\n\n # calculate open metric\n for pool_name, stats_dict in pools.items():\n if stats_dict[\"total\"] == -1:\n # -1 means infinite\n stats_dict[\"open\"] = -1\n else:\n stats_dict[\"open\"] = stats_dict[\"total\"] - stats_dict[\"running\"] - stats_dict[\"queued\"]\n\n return pools\n\n def to_json(self):\n \"\"\"\n Get the Pool in a json structure\n\n :return: the pool object in json format\n \"\"\"\n return {\n 'id': self.id,\n 'pool': self.pool,\n 'slots': self.slots,\n 'description': self.description,\n }\n\n @provide_session\n def occupied_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by running/queued tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state.in_(list(EXECUTION_STATES)))\n .scalar()\n or 0\n )\n\n @provide_session\n def running_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by running tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state == State.RUNNING)\n .scalar()\n or 0\n )\n\n @provide_session\n def queued_slots(self, session: Session):\n \"\"\"\n Get the number of slots used by queued tasks at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the used number of slots\n \"\"\"\n from airflow.models.taskinstance import TaskInstance # Avoid circular import\n\n return int(\n session.query(func.sum(TaskInstance.pool_slots))\n .filter(TaskInstance.pool == self.pool)\n .filter(TaskInstance.state == State.QUEUED)\n .scalar()\n or 0\n )\n\n @provide_session\n def open_slots(self, session: Session) -> float:\n \"\"\"\n Get the number of slots open at the moment.\n\n :param session: SQLAlchemy ORM Session\n :return: the number of slots\n \"\"\"\n if self.slots == -1:\n return float('inf')\n else:\n return self.slots - self.occupied_slots(session)\n", "path": "airflow/models/pool.py"}]} | 3,393 | 233 |
gh_patches_debug_6036 | rasdani/github-patches | git_diff | sunpy__sunpy-2824 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
show_colormaps() is not big enough to show the colormap names
See attached picture. This affects the gallery too - see http://docs.sunpy.org/en/stable/generated/gallery/plotting/sunpy_colormaps_reference.html#sphx-glr-generated-gallery-plotting-sunpy-colormaps-reference-py

</issue>
<code>
[start of sunpy/cm/cm.py]
1 """
2 This module provides a set of colormaps specific for solar data.
3 """
4 from __future__ import absolute_import, division, print_function
5
6 from copy import deepcopy
7
8 import numpy as np
9 import matplotlib.pyplot as plt
10 import matplotlib.cm as mplcm
11
12 from sunpy.cm import color_tables as ct
13 from sunpy.util import deprecated
14
15 __all__ = ['get_cmap', 'show_colormaps', 'cmlist']
16
17 sdoaia94 = ct.aia_color_table(94)
18 sdoaia131 = ct.aia_color_table(131)
19 sdoaia171 = ct.aia_color_table(171)
20 sdoaia193 = ct.aia_color_table(193)
21 sdoaia211 = ct.aia_color_table(211)
22 sdoaia304 = ct.aia_color_table(304)
23 sdoaia335 = ct.aia_color_table(335)
24 sdoaia1600 = ct.aia_color_table(1600)
25 sdoaia1700 = ct.aia_color_table(1700)
26 sdoaia4500 = ct.aia_color_table(4500)
27
28 sohoeit171 = ct.eit_color_table(171)
29 sohoeit195 = ct.eit_color_table(195)
30 sohoeit284 = ct.eit_color_table(284)
31 sohoeit304 = ct.eit_color_table(304)
32
33 # The color tables below returns one of the fundamental color tables for SOHO
34 # LASCO images. These are not the same as those used in SSWIDL. This is
35 # because the SSWIDL color scaling for LASCO level 0.5 and 1.0 is highly
36 # compressed and does not display the data well.
37 soholasco2 = deepcopy(mplcm.get_cmap("gist_heat"))
38 soholasco2.name = 'SOHO LASCO C2'
39 soholasco3 = deepcopy(mplcm.get_cmap("bone"))
40 soholasco3.name = 'SOHO LASCO C3'
41
42 # These are the SSWIDL color tables.
43 sswidlsoholasco2 = ct.sswidl_lasco_color_table(2)
44 sswidlsoholasco3 = ct.sswidl_lasco_color_table(3)
45
46 stereocor1 = ct.cor_color_table(1)
47 stereocor2 = ct.cor_color_table(2)
48
49 stereohi1 = ct.stereo_hi_color_table(1)
50 stereohi2 = ct.stereo_hi_color_table(2)
51
52 yohkohsxtal = ct.sxt_color_table('al')
53 yohkohsxtwh = ct.sxt_color_table('wh')
54
55 hinodexrt = ct.xrt_color_table()
56 hinodesotintensity = ct.sot_color_table('intensity')
57
58 trace171 = ct.trace_color_table('171')
59 trace195 = ct.trace_color_table('195')
60 trace284 = ct.trace_color_table('284')
61 trace1216 = ct.trace_color_table('1216')
62 trace1550 = ct.trace_color_table('1550')
63 trace1600 = ct.trace_color_table('1600')
64 trace1700 = ct.trace_color_table('1700')
65 traceWL = ct.trace_color_table('WL')
66
67 hmimag = ct.hmi_mag_color_table()
68
69 cmlist = {
70 'sdoaia94': sdoaia94,
71 'sdoaia131': sdoaia131,
72 'sdoaia171': sdoaia171,
73 'sdoaia193': sdoaia193,
74 'sdoaia211': sdoaia211,
75 'sdoaia304': sdoaia304,
76 'sdoaia335': sdoaia335,
77 'sdoaia1600': sdoaia1600,
78 'sdoaia1700': sdoaia1700,
79 'sdoaia4500': sdoaia4500,
80 'sohoeit171': sohoeit171,
81 'sohoeit195': sohoeit195,
82 'sohoeit284': sohoeit284,
83 'sohoeit304': sohoeit304,
84 'soholasco2': soholasco2,
85 'soholasco3': soholasco3,
86 'sswidlsoholasco2': sswidlsoholasco2,
87 'sswidlsoholasco3': sswidlsoholasco3,
88 'stereocor1': stereocor1,
89 'stereocor2': stereocor2,
90 'stereohi1': stereohi1,
91 'stereohi2': stereohi2,
92 'rhessi': mplcm.jet,
93 'yohkohsxtal': yohkohsxtal,
94 'yohkohsxtwh': yohkohsxtwh,
95 'hinodexrt': hinodexrt,
96 'hinodesotintensity': hinodesotintensity,
97 'trace171': trace171,
98 'trace195': trace195,
99 'trace284': trace284,
100 'trace1216': trace1216,
101 'trace1550': trace1550,
102 'trace1600': trace1600,
103 'trace1700': trace1700,
104 'traceWL': traceWL,
105 'hmimag': hmimag,
106 'irissji1330': ct.iris_sji_color_table('1330'),
107 'irissji1400': ct.iris_sji_color_table('1400'),
108 'irissji1600': ct.iris_sji_color_table('1600'),
109 'irissji2796': ct.iris_sji_color_table('2796'),
110 'irissji2832': ct.iris_sji_color_table('2832'),
111 'irissji5000': ct.iris_sji_color_table('5000'),
112 'irissjiFUV': ct.iris_sji_color_table('FUV'),
113 'irissjiNUV': ct.iris_sji_color_table('NUV'),
114 'irissjiSJI_NUV': ct.iris_sji_color_table('SJI_NUV')
115 }
116
117 # Register the colormaps with matplotlib so plt.get_cmap('sdoaia171') works
118 for name, cmap in cmlist.items():
119 mplcm.register_cmap(name=name, cmap=cmap)
120
121
122 @deprecated("0.9",
123 "'sunpy.cm.get_cmap' is dprecated, use 'plt.get_cmap' from Matplotlib "
124 "to load the colormaps instead.",
125 alternative='plt.get_cmap')
126 def get_cmap(name):
127 """
128 Get a colormap.
129
130 Parameters
131 ----------
132 name : string
133 The name of a color map.
134
135 Returns
136 -------
137 value : matplotlib colormap
138
139 See Also
140 --------
141
142 Examples
143 --------
144 >>> import sunpy.cm as cm
145 >>> colormap = cm.get_cmap(name = 'sdoaia94')
146
147 References
148 ----------
149 | https://matplotlib.org/api/cm_api.html
150
151 """
152 if name in cmlist:
153 return cmlist.get(name)
154 else:
155 raise ValueError("Colormap {name!s} is not recognized".format(name=name))
156
157
158 def show_colormaps(search=None):
159 """Displays a plot of the custom color maps supported in SunPy.
160
161 Parameters
162 ----------
163 search : str
164 A string to search for in the names of the color maps (e.g. aia, EIT,
165 171). Case insensitive.
166
167 Returns
168 -------
169 None : none
170
171 Examples
172 --------
173 >>> import sunpy.cm as cm
174 >>> cm.show_colormaps()
175 >>> cm.show_colormaps(search='aia')
176 >>> cm.show_colormaps(search='171')
177
178 References
179 ----------
180
181 """
182
183 if search is not None:
184 maps = sorted({k: v for (k, v) in cmlist.items() if k.lower().count(search.lower())})
185 if len(maps) == 0:
186 raise KeyError('No color maps found for search term "{:s}"'.format(search))
187 else:
188 maps = sorted(cmlist)
189
190 nmaps = len(maps) + 1
191
192 a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103
193 a = np.vstack((a, a))
194
195 fig = plt.figure(figsize=(5, 10),dpi=64)
196 fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)
197 for i, name in enumerate(maps):
198 ax = plt.subplot(nmaps, 1, i + 1)
199 plt.axis("off")
200 plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')
201 pos = list(ax.get_position().bounds)
202 fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,
203 horizontalalignment='right')
204 plt.show()
205
[end of sunpy/cm/cm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/cm/cm.py b/sunpy/cm/cm.py
--- a/sunpy/cm/cm.py
+++ b/sunpy/cm/cm.py
@@ -192,7 +192,7 @@
a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103
a = np.vstack((a, a))
- fig = plt.figure(figsize=(5, 10),dpi=64)
+ fig = plt.figure(figsize=(7, 10),dpi=128)
fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)
for i, name in enumerate(maps):
ax = plt.subplot(nmaps, 1, i + 1)
| {"golden_diff": "diff --git a/sunpy/cm/cm.py b/sunpy/cm/cm.py\n--- a/sunpy/cm/cm.py\n+++ b/sunpy/cm/cm.py\n@@ -192,7 +192,7 @@\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n \n- fig = plt.figure(figsize=(5, 10),dpi=64)\n+ fig = plt.figure(figsize=(7, 10),dpi=128)\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n", "issue": "show_colormaps() is not big enough to show the colormap names\nSee attached picture. This affects the gallery too - see http://docs.sunpy.org/en/stable/generated/gallery/plotting/sunpy_colormaps_reference.html#sphx-glr-generated-gallery-plotting-sunpy-colormaps-reference-py\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis module provides a set of colormaps specific for solar data.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nfrom copy import deepcopy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as mplcm\n\nfrom sunpy.cm import color_tables as ct\nfrom sunpy.util import deprecated\n\n__all__ = ['get_cmap', 'show_colormaps', 'cmlist']\n\nsdoaia94 = ct.aia_color_table(94)\nsdoaia131 = ct.aia_color_table(131)\nsdoaia171 = ct.aia_color_table(171)\nsdoaia193 = ct.aia_color_table(193)\nsdoaia211 = ct.aia_color_table(211)\nsdoaia304 = ct.aia_color_table(304)\nsdoaia335 = ct.aia_color_table(335)\nsdoaia1600 = ct.aia_color_table(1600)\nsdoaia1700 = ct.aia_color_table(1700)\nsdoaia4500 = ct.aia_color_table(4500)\n\nsohoeit171 = ct.eit_color_table(171)\nsohoeit195 = ct.eit_color_table(195)\nsohoeit284 = ct.eit_color_table(284)\nsohoeit304 = ct.eit_color_table(304)\n\n# The color tables below returns one of the fundamental color tables for SOHO\n# LASCO images. These are not the same as those used in SSWIDL. This is\n# because the SSWIDL color scaling for LASCO level 0.5 and 1.0 is highly\n# compressed and does not display the data well.\nsoholasco2 = deepcopy(mplcm.get_cmap(\"gist_heat\"))\nsoholasco2.name = 'SOHO LASCO C2'\nsoholasco3 = deepcopy(mplcm.get_cmap(\"bone\"))\nsoholasco3.name = 'SOHO LASCO C3'\n\n# These are the SSWIDL color tables.\nsswidlsoholasco2 = ct.sswidl_lasco_color_table(2)\nsswidlsoholasco3 = ct.sswidl_lasco_color_table(3)\n\nstereocor1 = ct.cor_color_table(1)\nstereocor2 = ct.cor_color_table(2)\n\nstereohi1 = ct.stereo_hi_color_table(1)\nstereohi2 = ct.stereo_hi_color_table(2)\n\nyohkohsxtal = ct.sxt_color_table('al')\nyohkohsxtwh = ct.sxt_color_table('wh')\n\nhinodexrt = ct.xrt_color_table()\nhinodesotintensity = ct.sot_color_table('intensity')\n\ntrace171 = ct.trace_color_table('171')\ntrace195 = ct.trace_color_table('195')\ntrace284 = ct.trace_color_table('284')\ntrace1216 = ct.trace_color_table('1216')\ntrace1550 = ct.trace_color_table('1550')\ntrace1600 = ct.trace_color_table('1600')\ntrace1700 = ct.trace_color_table('1700')\ntraceWL = ct.trace_color_table('WL')\n\nhmimag = ct.hmi_mag_color_table()\n\ncmlist = {\n 'sdoaia94': sdoaia94,\n 'sdoaia131': sdoaia131,\n 'sdoaia171': sdoaia171,\n 'sdoaia193': sdoaia193,\n 'sdoaia211': sdoaia211,\n 'sdoaia304': sdoaia304,\n 'sdoaia335': sdoaia335,\n 'sdoaia1600': sdoaia1600,\n 'sdoaia1700': sdoaia1700,\n 'sdoaia4500': sdoaia4500,\n 'sohoeit171': sohoeit171,\n 'sohoeit195': sohoeit195,\n 'sohoeit284': sohoeit284,\n 'sohoeit304': sohoeit304,\n 'soholasco2': soholasco2,\n 'soholasco3': soholasco3,\n 'sswidlsoholasco2': sswidlsoholasco2,\n 'sswidlsoholasco3': sswidlsoholasco3,\n 'stereocor1': stereocor1,\n 'stereocor2': stereocor2,\n 'stereohi1': stereohi1,\n 'stereohi2': stereohi2,\n 'rhessi': mplcm.jet,\n 'yohkohsxtal': yohkohsxtal,\n 'yohkohsxtwh': yohkohsxtwh,\n 'hinodexrt': hinodexrt,\n 'hinodesotintensity': hinodesotintensity,\n 'trace171': trace171,\n 'trace195': trace195,\n 'trace284': trace284,\n 'trace1216': trace1216,\n 'trace1550': trace1550,\n 'trace1600': trace1600,\n 'trace1700': trace1700,\n 'traceWL': traceWL,\n 'hmimag': hmimag,\n 'irissji1330': ct.iris_sji_color_table('1330'),\n 'irissji1400': ct.iris_sji_color_table('1400'),\n 'irissji1600': ct.iris_sji_color_table('1600'),\n 'irissji2796': ct.iris_sji_color_table('2796'),\n 'irissji2832': ct.iris_sji_color_table('2832'),\n 'irissji5000': ct.iris_sji_color_table('5000'),\n 'irissjiFUV': ct.iris_sji_color_table('FUV'),\n 'irissjiNUV': ct.iris_sji_color_table('NUV'),\n 'irissjiSJI_NUV': ct.iris_sji_color_table('SJI_NUV')\n}\n\n# Register the colormaps with matplotlib so plt.get_cmap('sdoaia171') works\nfor name, cmap in cmlist.items():\n mplcm.register_cmap(name=name, cmap=cmap)\n\n\n@deprecated(\"0.9\",\n \"'sunpy.cm.get_cmap' is dprecated, use 'plt.get_cmap' from Matplotlib \"\n \"to load the colormaps instead.\",\n alternative='plt.get_cmap')\ndef get_cmap(name):\n \"\"\"\n Get a colormap.\n\n Parameters\n ----------\n name : string\n The name of a color map.\n\n Returns\n -------\n value : matplotlib colormap\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> colormap = cm.get_cmap(name = 'sdoaia94')\n\n References\n ----------\n | https://matplotlib.org/api/cm_api.html\n\n \"\"\"\n if name in cmlist:\n return cmlist.get(name)\n else:\n raise ValueError(\"Colormap {name!s} is not recognized\".format(name=name))\n\n\ndef show_colormaps(search=None):\n \"\"\"Displays a plot of the custom color maps supported in SunPy.\n\n Parameters\n ----------\n search : str\n A string to search for in the names of the color maps (e.g. aia, EIT,\n 171). Case insensitive.\n\n Returns\n -------\n None : none\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> cm.show_colormaps()\n >>> cm.show_colormaps(search='aia')\n >>> cm.show_colormaps(search='171')\n\n References\n ----------\n\n \"\"\"\n\n if search is not None:\n maps = sorted({k: v for (k, v) in cmlist.items() if k.lower().count(search.lower())})\n if len(maps) == 0:\n raise KeyError('No color maps found for search term \"{:s}\"'.format(search))\n else:\n maps = sorted(cmlist)\n\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(5, 10),dpi=64)\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n plt.show()\n", "path": "sunpy/cm/cm.py"}]} | 3,352 | 191 |
gh_patches_debug_13624 | rasdani/github-patches | git_diff | encode__httpx-1391 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Path encoding broken for non-ASCII in WSGI mode
### Describe the bug
When using httpx to call into a WSGI application (my preferred way to test Flask app nowadays), the web view receives urlencoded paths.
I'm not sure where this really belongs to (is it a bug in httpx or Flask!?), but I've been working around it for months, so I guess I'll make myself unpopular and try it here first. If it turns out to be Flask's fault, I'l headl over and bother the Pallets folks.
### To reproduce
```python
from flask import Flask, request
import httpx
app = Flask(__name__)
@app.route('/<path>')
def hello_world(path):
return f"path: { path }, query: { request.args['a'] }, url: { request.url }"
if __name__ == "__main__":
with httpx.Client(app=app, base_url="http://testserver") as client:
resp = client.get("/ä", params={"a": "ä"})
print("httpx", resp.text)
with app.test_client() as client:
resp = client.get("/ä?a=%C3%A4")
print("flask", resp.get_data().decode("utf-8"))
```
### Expected behavior
```
httpx path: ä, query: ä, url: http://testserver/ä?a=ä
flask path: ä, query: ä, url: http://localhost/ä?a=ä
```
### Actual behavior
```
httpx path: %C3%A4, query: ä, url: http://testserver/%25C3%25A4?a=ä
flask path: ä, query: ä, url: http://localhost/ä?a=ä
```
NB
- it seems to handle query parameters just fine.
- `%25` is the ASCII code of the percent sign
### Environment
- OS: macOS
- Python version: 3.8.6
- HTTPX version: 0.16.1
- Async environment: n/a
- HTTP proxy: n/a
- Custom certificates: no
</issue>
<code>
[start of httpx/_transports/wsgi.py]
1 import io
2 import itertools
3 import typing
4
5 import httpcore
6
7
8 def _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:
9 body = iter(body)
10 for chunk in body:
11 if chunk:
12 return itertools.chain([chunk], body)
13 return []
14
15
16 class WSGITransport(httpcore.SyncHTTPTransport):
17 """
18 A custom transport that handles sending requests directly to an WSGI app.
19 The simplest way to use this functionality is to use the `app` argument.
20
21 ```
22 client = httpx.Client(app=app)
23 ```
24
25 Alternatively, you can setup the transport instance explicitly.
26 This allows you to include any additional configuration arguments specific
27 to the WSGITransport class:
28
29 ```
30 transport = httpx.WSGITransport(
31 app=app,
32 script_name="/submount",
33 remote_addr="1.2.3.4"
34 )
35 client = httpx.Client(transport=transport)
36 ```
37
38 Arguments:
39
40 * `app` - The ASGI application.
41 * `raise_app_exceptions` - Boolean indicating if exceptions in the application
42 should be raised. Default to `True`. Can be set to `False` for use cases
43 such as testing the content of a client 500 response.
44 * `script_name` - The root path on which the ASGI application should be mounted.
45 * `remote_addr` - A string indicating the client IP of incoming requests.
46 ```
47 """
48
49 def __init__(
50 self,
51 app: typing.Callable,
52 raise_app_exceptions: bool = True,
53 script_name: str = "",
54 remote_addr: str = "127.0.0.1",
55 ) -> None:
56 self.app = app
57 self.raise_app_exceptions = raise_app_exceptions
58 self.script_name = script_name
59 self.remote_addr = remote_addr
60
61 def request(
62 self,
63 method: bytes,
64 url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],
65 headers: typing.List[typing.Tuple[bytes, bytes]] = None,
66 stream: httpcore.SyncByteStream = None,
67 ext: dict = None,
68 ) -> typing.Tuple[
69 int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict
70 ]:
71 headers = [] if headers is None else headers
72 stream = httpcore.PlainByteStream(content=b"") if stream is None else stream
73
74 scheme, host, port, full_path = url
75 path, _, query = full_path.partition(b"?")
76 environ = {
77 "wsgi.version": (1, 0),
78 "wsgi.url_scheme": scheme.decode("ascii"),
79 "wsgi.input": io.BytesIO(b"".join(stream)),
80 "wsgi.errors": io.BytesIO(),
81 "wsgi.multithread": True,
82 "wsgi.multiprocess": False,
83 "wsgi.run_once": False,
84 "REQUEST_METHOD": method.decode(),
85 "SCRIPT_NAME": self.script_name,
86 "PATH_INFO": path.decode("ascii"),
87 "QUERY_STRING": query.decode("ascii"),
88 "SERVER_NAME": host.decode("ascii"),
89 "SERVER_PORT": str(port),
90 "REMOTE_ADDR": self.remote_addr,
91 }
92 for header_key, header_value in headers:
93 key = header_key.decode("ascii").upper().replace("-", "_")
94 if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
95 key = "HTTP_" + key
96 environ[key] = header_value.decode("ascii")
97
98 seen_status = None
99 seen_response_headers = None
100 seen_exc_info = None
101
102 def start_response(
103 status: str, response_headers: list, exc_info: typing.Any = None
104 ) -> None:
105 nonlocal seen_status, seen_response_headers, seen_exc_info
106 seen_status = status
107 seen_response_headers = response_headers
108 seen_exc_info = exc_info
109
110 result = self.app(environ, start_response)
111 # This is needed because the status returned by start_response
112 # shouldn't be used until the first non-empty chunk has been served.
113 result = _skip_leading_empty_chunks(result)
114
115 assert seen_status is not None
116 assert seen_response_headers is not None
117 if seen_exc_info and self.raise_app_exceptions:
118 raise seen_exc_info[1]
119
120 status_code = int(seen_status.split()[0])
121 headers = [
122 (key.encode("ascii"), value.encode("ascii"))
123 for key, value in seen_response_headers
124 ]
125 stream = httpcore.IteratorByteStream(iterator=result)
126 ext = {}
127
128 return (status_code, headers, stream, ext)
129
[end of httpx/_transports/wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/_transports/wsgi.py b/httpx/_transports/wsgi.py
--- a/httpx/_transports/wsgi.py
+++ b/httpx/_transports/wsgi.py
@@ -1,6 +1,7 @@
import io
import itertools
import typing
+from urllib.parse import unquote
import httpcore
@@ -83,7 +84,7 @@
"wsgi.run_once": False,
"REQUEST_METHOD": method.decode(),
"SCRIPT_NAME": self.script_name,
- "PATH_INFO": path.decode("ascii"),
+ "PATH_INFO": unquote(path.decode("ascii")),
"QUERY_STRING": query.decode("ascii"),
"SERVER_NAME": host.decode("ascii"),
"SERVER_PORT": str(port),
| {"golden_diff": "diff --git a/httpx/_transports/wsgi.py b/httpx/_transports/wsgi.py\n--- a/httpx/_transports/wsgi.py\n+++ b/httpx/_transports/wsgi.py\n@@ -1,6 +1,7 @@\n import io\n import itertools\n import typing\n+from urllib.parse import unquote\n \n import httpcore\n \n@@ -83,7 +84,7 @@\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n- \"PATH_INFO\": path.decode(\"ascii\"),\n+ \"PATH_INFO\": unquote(path.decode(\"ascii\")),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n", "issue": "Path encoding broken for non-ASCII in WSGI mode\n### Describe the bug\r\n\r\nWhen using httpx to call into a WSGI application (my preferred way to test Flask app nowadays), the web view receives urlencoded paths.\r\n\r\nI'm not sure where this really belongs to (is it a bug in httpx or Flask!?), but I've been working around it for months, so I guess I'll make myself unpopular and try it here first. If it turns out to be Flask's fault, I'l headl over and bother the Pallets folks.\r\n\r\n### To reproduce\r\n\r\n```python\r\nfrom flask import Flask, request\r\n\r\nimport httpx\r\n\r\n\r\napp = Flask(__name__)\r\n\r\[email protected]('/<path>')\r\ndef hello_world(path):\r\n return f\"path: { path }, query: { request.args['a'] }, url: { request.url }\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n with httpx.Client(app=app, base_url=\"http://testserver\") as client:\r\n resp = client.get(\"/\u00e4\", params={\"a\": \"\u00e4\"})\r\n\r\n print(\"httpx\", resp.text)\r\n\r\n with app.test_client() as client:\r\n resp = client.get(\"/\u00e4?a=%C3%A4\")\r\n\r\n print(\"flask\", resp.get_data().decode(\"utf-8\"))\r\n```\r\n\r\n### Expected behavior\r\n\r\n```\r\nhttpx path: \u00e4, query: \u00e4, url: http://testserver/\u00e4?a=\u00e4\r\nflask path: \u00e4, query: \u00e4, url: http://localhost/\u00e4?a=\u00e4\r\n```\r\n\r\n### Actual behavior\r\n\r\n```\r\nhttpx path: %C3%A4, query: \u00e4, url: http://testserver/%25C3%25A4?a=\u00e4\r\nflask path: \u00e4, query: \u00e4, url: http://localhost/\u00e4?a=\u00e4\r\n```\r\n\r\nNB\r\n\r\n- it seems to handle query parameters just fine.\r\n- `%25` is the ASCII code of the percent sign\r\n\r\n### Environment\r\n\r\n- OS: macOS\r\n- Python version: 3.8.6\r\n- HTTPX version: 0.16.1\r\n- Async environment: n/a\r\n- HTTP proxy: n/a\r\n- Custom certificates: no\r\n\n", "before_files": [{"content": "import io\nimport itertools\nimport typing\n\nimport httpcore\n\n\ndef _skip_leading_empty_chunks(body: typing.Iterable) -> typing.Iterable:\n body = iter(body)\n for chunk in body:\n if chunk:\n return itertools.chain([chunk], body)\n return []\n\n\nclass WSGITransport(httpcore.SyncHTTPTransport):\n \"\"\"\n A custom transport that handles sending requests directly to an WSGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.Client(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the WSGITransport class:\n\n ```\n transport = httpx.WSGITransport(\n app=app,\n script_name=\"/submount\",\n remote_addr=\"1.2.3.4\"\n )\n client = httpx.Client(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `script_name` - The root path on which the ASGI application should be mounted.\n * `remote_addr` - A string indicating the client IP of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: typing.Callable,\n raise_app_exceptions: bool = True,\n script_name: str = \"\",\n remote_addr: str = \"127.0.0.1\",\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.script_name = script_name\n self.remote_addr = remote_addr\n\n def request(\n self,\n method: bytes,\n url: typing.Tuple[bytes, bytes, typing.Optional[int], bytes],\n headers: typing.List[typing.Tuple[bytes, bytes]] = None,\n stream: httpcore.SyncByteStream = None,\n ext: dict = None,\n ) -> typing.Tuple[\n int, typing.List[typing.Tuple[bytes, bytes]], httpcore.SyncByteStream, dict\n ]:\n headers = [] if headers is None else headers\n stream = httpcore.PlainByteStream(content=b\"\") if stream is None else stream\n\n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n environ = {\n \"wsgi.version\": (1, 0),\n \"wsgi.url_scheme\": scheme.decode(\"ascii\"),\n \"wsgi.input\": io.BytesIO(b\"\".join(stream)),\n \"wsgi.errors\": io.BytesIO(),\n \"wsgi.multithread\": True,\n \"wsgi.multiprocess\": False,\n \"wsgi.run_once\": False,\n \"REQUEST_METHOD\": method.decode(),\n \"SCRIPT_NAME\": self.script_name,\n \"PATH_INFO\": path.decode(\"ascii\"),\n \"QUERY_STRING\": query.decode(\"ascii\"),\n \"SERVER_NAME\": host.decode(\"ascii\"),\n \"SERVER_PORT\": str(port),\n \"REMOTE_ADDR\": self.remote_addr,\n }\n for header_key, header_value in headers:\n key = header_key.decode(\"ascii\").upper().replace(\"-\", \"_\")\n if key not in (\"CONTENT_TYPE\", \"CONTENT_LENGTH\"):\n key = \"HTTP_\" + key\n environ[key] = header_value.decode(\"ascii\")\n\n seen_status = None\n seen_response_headers = None\n seen_exc_info = None\n\n def start_response(\n status: str, response_headers: list, exc_info: typing.Any = None\n ) -> None:\n nonlocal seen_status, seen_response_headers, seen_exc_info\n seen_status = status\n seen_response_headers = response_headers\n seen_exc_info = exc_info\n\n result = self.app(environ, start_response)\n # This is needed because the status returned by start_response\n # shouldn't be used until the first non-empty chunk has been served.\n result = _skip_leading_empty_chunks(result)\n\n assert seen_status is not None\n assert seen_response_headers is not None\n if seen_exc_info and self.raise_app_exceptions:\n raise seen_exc_info[1]\n\n status_code = int(seen_status.split()[0])\n headers = [\n (key.encode(\"ascii\"), value.encode(\"ascii\"))\n for key, value in seen_response_headers\n ]\n stream = httpcore.IteratorByteStream(iterator=result)\n ext = {}\n\n return (status_code, headers, stream, ext)\n", "path": "httpx/_transports/wsgi.py"}]} | 2,296 | 165 |
gh_patches_debug_14076 | rasdani/github-patches | git_diff | google__clusterfuzz-1726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
in local development, zip uploads are broken
```
| File "/build/clusterfuzz/src/appengine/handlers/jobs.py", line 132, in post
| blob_info = self.get_upload()
| File "/build/clusterfuzz/src/appengine/handlers/base_handler.py", line 317, in get_upload
| blob_info = storage.GcsBlobInfo.from_key(upload_key)
| File "/build/clusterfuzz/src/python/google_cloud_utils/storage.py", line 556, in from_key
| logs.log_error('Failed to get blob from key %s.' % key)
| LogError: Failed to get blob from key 0e3179ad-31b4-4ba5-a45c-86a610e065c7.
| Traceback (most recent call last):
| File "/build/clusterfuzz/src/python/google_cloud_utils/storage.py", line 554, in from_key
| return GcsBlobInfo(blobs_bucket(), key)
| File "/build/clusterfuzz/src/python/google_cloud_utils/storage.py", line 536, in __init__
| self.filename = gcs_object['metadata'].get(BLOB_FILENAME_METADATA_KEY)
| TypeError: 'NoneType' object is not subscriptable
```
</issue>
<code>
[start of src/local/butler/run_server.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """run_server.py run the Clusterfuzz server locally."""
15 from __future__ import print_function
16 from future import standard_library
17 standard_library.install_aliases()
18 import os
19 import shutil
20 import threading
21 import time
22 import urllib.request
23
24 from local.butler import appengine
25 from local.butler import common
26 from local.butler import constants
27 from python.config import local_config
28 from python.tests.test_libs import test_utils
29
30
31 def bootstrap_db():
32 """Bootstrap the DB."""
33
34 def bootstrap():
35 # Wait for the server to run.
36 time.sleep(10)
37 print('Bootstrapping datastore...')
38 common.execute(
39 ('python butler.py run setup '
40 '--non-dry-run --local --config-dir={config_dir}'
41 ).format(config_dir=constants.TEST_CONFIG_DIR),
42 exit_on_error=False)
43
44 thread = threading.Thread(target=bootstrap)
45 thread.start()
46
47
48 def create_local_bucket(local_gcs_buckets_path, name):
49 """Create a local bucket."""
50 blobs_bucket = os.path.join(local_gcs_buckets_path, name)
51 if not os.path.exists(blobs_bucket):
52 os.mkdir(blobs_bucket)
53
54
55 def bootstrap_gcs(storage_path):
56 """Bootstrap GCS."""
57 local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs')
58 if not os.path.exists(local_gcs_buckets_path):
59 os.mkdir(local_gcs_buckets_path)
60
61 config = local_config.ProjectConfig()
62 create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
63 create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))
64 create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))
65 create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))
66 create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket'))
67 create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET'))
68 create_local_bucket(local_gcs_buckets_path,
69 config.get('env.QUARANTINE_BUCKET'))
70 create_local_bucket(local_gcs_buckets_path,
71 config.get('env.SHARED_CORPUS_BUCKET'))
72 create_local_bucket(local_gcs_buckets_path,
73 config.get('env.FUZZ_LOGS_BUCKET'))
74 create_local_bucket(local_gcs_buckets_path,
75 config.get('env.MUTATOR_PLUGINS_BUCKET'))
76
77 # Symlink local GCS bucket path to appengine src dir to bypass sandboxing
78 # issues.
79 common.symlink(
80 src=local_gcs_buckets_path,
81 target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))
82
83
84 def start_cron_threads():
85 """Start threads to trigger essential cron jobs."""
86
87 request_timeout = 10 * 60 # 10 minutes.
88
89 def trigger(interval_seconds, target):
90 """Trigger a cron job."""
91 while True:
92 time.sleep(interval_seconds)
93
94 try:
95 url = 'http://{host}/{target}'.format(
96 host=constants.CRON_SERVICE_HOST, target=target)
97 request = urllib.request.Request(url)
98 request.add_header('X-Appengine-Cron', 'true')
99 response = urllib.request.urlopen(request, timeout=request_timeout)
100 response.read(60) # wait for request to finish.
101 except Exception:
102 continue
103
104 crons = (
105 (90, 'cleanup'),
106 (60, 'triage'),
107 (6 * 3600, 'schedule-progression-tasks'),
108 (12 * 3600, 'schedule-corpus-pruning'),
109 )
110
111 for interval, cron in crons:
112 thread = threading.Thread(target=trigger, args=(interval, cron))
113 thread.daemon = True
114 thread.start()
115
116
117 def execute(args):
118 """Run the server."""
119 os.environ['LOCAL_DEVELOPMENT'] = 'True'
120 common.kill_leftover_emulators()
121
122 if not args.skip_install_deps:
123 common.install_dependencies()
124
125 # Do this everytime as a past deployment might have changed these.
126 appengine.symlink_dirs()
127
128 # Deploy all yaml files from test project for basic appengine deployment and
129 # local testing to work. This needs to be called on every iteration as a past
130 # deployment might have overwritten or deleted these config files.
131 yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod3')
132 appengine.copy_yamls_and_preprocess(yaml_paths)
133
134 # Build templates.
135 appengine.build_templates()
136
137 # Clean storage directory if needed.
138 if args.bootstrap or args.clean:
139 if os.path.exists(args.storage_path):
140 print('Clearing local datastore by removing %s.' % args.storage_path)
141 shutil.rmtree(args.storage_path)
142 if not os.path.exists(args.storage_path):
143 os.makedirs(args.storage_path)
144
145 # Set up local GCS buckets and symlinks.
146 bootstrap_gcs(args.storage_path)
147
148 # Start pubsub emulator.
149 pubsub_emulator = test_utils.start_cloud_emulator(
150 'pubsub',
151 args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST],
152 data_dir=args.storage_path)
153 test_utils.setup_pubsub(constants.TEST_APP_ID)
154
155 # Start Datastore emulator
156 datastore_emulator = test_utils.start_cloud_emulator(
157 'datastore',
158 args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST],
159 data_dir=args.storage_path,
160 store_on_disk=True)
161
162 # Start our custom GCS emulator.
163 local_gcs = common.execute_async(
164 'go run emulators/gcs.go -storage-path=' + args.storage_path, cwd='local')
165
166 if args.bootstrap:
167 bootstrap_db()
168
169 start_cron_threads()
170
171 os.environ['APPLICATION_ID'] = constants.TEST_APP_ID
172 os.environ['LOCAL_DEVELOPMENT'] = 'True'
173 os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs'
174 os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST
175 os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST
176 os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST
177 os.environ['GAE_ENV'] = 'dev'
178 try:
179 cron_server = common.execute_async(
180 'gunicorn -b :{port} main:app'.format(port=constants.CRON_SERVICE_PORT),
181 cwd=os.path.join('src', 'appengine'))
182
183 common.execute(
184 'gunicorn -b :{port} main:app'.format(
185 port=constants.DEV_APPSERVER_PORT),
186 cwd=os.path.join('src', 'appengine'))
187 except KeyboardInterrupt:
188 print('Server has been stopped. Exit.')
189 cron_server.terminate()
190 datastore_emulator.cleanup()
191 pubsub_emulator.cleanup()
192 local_gcs.terminate()
193
[end of src/local/butler/run_server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/local/butler/run_server.py b/src/local/butler/run_server.py
--- a/src/local/butler/run_server.py
+++ b/src/local/butler/run_server.py
@@ -59,7 +59,12 @@
os.mkdir(local_gcs_buckets_path)
config = local_config.ProjectConfig()
- create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
+ test_blobs_bucket = os.environ.get('TEST_BLOBS_BUCKET')
+ if test_blobs_bucket:
+ create_local_bucket(local_gcs_buckets_path, test_blobs_bucket)
+ else:
+ create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))
+
create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))
create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))
| {"golden_diff": "diff --git a/src/local/butler/run_server.py b/src/local/butler/run_server.py\n--- a/src/local/butler/run_server.py\n+++ b/src/local/butler/run_server.py\n@@ -59,7 +59,12 @@\n os.mkdir(local_gcs_buckets_path)\n \n config = local_config.ProjectConfig()\n- create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n+ test_blobs_bucket = os.environ.get('TEST_BLOBS_BUCKET')\n+ if test_blobs_bucket:\n+ create_local_bucket(local_gcs_buckets_path, test_blobs_bucket)\n+ else:\n+ create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n+\n create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))\n", "issue": "in local development, zip uploads are broken\n```\r\n| File \"/build/clusterfuzz/src/appengine/handlers/jobs.py\", line 132, in post\r\n| blob_info = self.get_upload()\r\n| File \"/build/clusterfuzz/src/appengine/handlers/base_handler.py\", line 317, in get_upload\r\n| blob_info = storage.GcsBlobInfo.from_key(upload_key)\r\n| File \"/build/clusterfuzz/src/python/google_cloud_utils/storage.py\", line 556, in from_key\r\n| logs.log_error('Failed to get blob from key %s.' % key)\r\n| LogError: Failed to get blob from key 0e3179ad-31b4-4ba5-a45c-86a610e065c7.\r\n| Traceback (most recent call last):\r\n| File \"/build/clusterfuzz/src/python/google_cloud_utils/storage.py\", line 554, in from_key\r\n| return GcsBlobInfo(blobs_bucket(), key)\r\n| File \"/build/clusterfuzz/src/python/google_cloud_utils/storage.py\", line 536, in __init__\r\n| self.filename = gcs_object['metadata'].get(BLOB_FILENAME_METADATA_KEY)\r\n| TypeError: 'NoneType' object is not subscriptable\r\n```\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"run_server.py run the Clusterfuzz server locally.\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nimport os\nimport shutil\nimport threading\nimport time\nimport urllib.request\n\nfrom local.butler import appengine\nfrom local.butler import common\nfrom local.butler import constants\nfrom python.config import local_config\nfrom python.tests.test_libs import test_utils\n\n\ndef bootstrap_db():\n \"\"\"Bootstrap the DB.\"\"\"\n\n def bootstrap():\n # Wait for the server to run.\n time.sleep(10)\n print('Bootstrapping datastore...')\n common.execute(\n ('python butler.py run setup '\n '--non-dry-run --local --config-dir={config_dir}'\n ).format(config_dir=constants.TEST_CONFIG_DIR),\n exit_on_error=False)\n\n thread = threading.Thread(target=bootstrap)\n thread.start()\n\n\ndef create_local_bucket(local_gcs_buckets_path, name):\n \"\"\"Create a local bucket.\"\"\"\n blobs_bucket = os.path.join(local_gcs_buckets_path, name)\n if not os.path.exists(blobs_bucket):\n os.mkdir(blobs_bucket)\n\n\ndef bootstrap_gcs(storage_path):\n \"\"\"Bootstrap GCS.\"\"\"\n local_gcs_buckets_path = os.path.join(storage_path, 'local_gcs')\n if not os.path.exists(local_gcs_buckets_path):\n os.mkdir(local_gcs_buckets_path)\n\n config = local_config.ProjectConfig()\n create_local_bucket(local_gcs_buckets_path, config.get('blobs.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('deployment.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('bigquery.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('backup.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('logs.fuzzer.bucket'))\n create_local_bucket(local_gcs_buckets_path, config.get('env.CORPUS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.QUARANTINE_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.SHARED_CORPUS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.FUZZ_LOGS_BUCKET'))\n create_local_bucket(local_gcs_buckets_path,\n config.get('env.MUTATOR_PLUGINS_BUCKET'))\n\n # Symlink local GCS bucket path to appengine src dir to bypass sandboxing\n # issues.\n common.symlink(\n src=local_gcs_buckets_path,\n target=os.path.join(appengine.SRC_DIR_PY, 'local_gcs'))\n\n\ndef start_cron_threads():\n \"\"\"Start threads to trigger essential cron jobs.\"\"\"\n\n request_timeout = 10 * 60 # 10 minutes.\n\n def trigger(interval_seconds, target):\n \"\"\"Trigger a cron job.\"\"\"\n while True:\n time.sleep(interval_seconds)\n\n try:\n url = 'http://{host}/{target}'.format(\n host=constants.CRON_SERVICE_HOST, target=target)\n request = urllib.request.Request(url)\n request.add_header('X-Appengine-Cron', 'true')\n response = urllib.request.urlopen(request, timeout=request_timeout)\n response.read(60) # wait for request to finish.\n except Exception:\n continue\n\n crons = (\n (90, 'cleanup'),\n (60, 'triage'),\n (6 * 3600, 'schedule-progression-tasks'),\n (12 * 3600, 'schedule-corpus-pruning'),\n )\n\n for interval, cron in crons:\n thread = threading.Thread(target=trigger, args=(interval, cron))\n thread.daemon = True\n thread.start()\n\n\ndef execute(args):\n \"\"\"Run the server.\"\"\"\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n common.kill_leftover_emulators()\n\n if not args.skip_install_deps:\n common.install_dependencies()\n\n # Do this everytime as a past deployment might have changed these.\n appengine.symlink_dirs()\n\n # Deploy all yaml files from test project for basic appengine deployment and\n # local testing to work. This needs to be called on every iteration as a past\n # deployment might have overwritten or deleted these config files.\n yaml_paths = local_config.GAEConfig().get_absolute_path('deployment.prod3')\n appengine.copy_yamls_and_preprocess(yaml_paths)\n\n # Build templates.\n appengine.build_templates()\n\n # Clean storage directory if needed.\n if args.bootstrap or args.clean:\n if os.path.exists(args.storage_path):\n print('Clearing local datastore by removing %s.' % args.storage_path)\n shutil.rmtree(args.storage_path)\n if not os.path.exists(args.storage_path):\n os.makedirs(args.storage_path)\n\n # Set up local GCS buckets and symlinks.\n bootstrap_gcs(args.storage_path)\n\n # Start pubsub emulator.\n pubsub_emulator = test_utils.start_cloud_emulator(\n 'pubsub',\n args=['--host-port=' + constants.PUBSUB_EMULATOR_HOST],\n data_dir=args.storage_path)\n test_utils.setup_pubsub(constants.TEST_APP_ID)\n\n # Start Datastore emulator\n datastore_emulator = test_utils.start_cloud_emulator(\n 'datastore',\n args=['--host-port=' + constants.DATASTORE_EMULATOR_HOST],\n data_dir=args.storage_path,\n store_on_disk=True)\n\n # Start our custom GCS emulator.\n local_gcs = common.execute_async(\n 'go run emulators/gcs.go -storage-path=' + args.storage_path, cwd='local')\n\n if args.bootstrap:\n bootstrap_db()\n\n start_cron_threads()\n\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = 'local_gcs'\n os.environ['LOCAL_GCS_SERVER_HOST'] = constants.LOCAL_GCS_SERVER_HOST\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['GAE_ENV'] = 'dev'\n try:\n cron_server = common.execute_async(\n 'gunicorn -b :{port} main:app'.format(port=constants.CRON_SERVICE_PORT),\n cwd=os.path.join('src', 'appengine'))\n\n common.execute(\n 'gunicorn -b :{port} main:app'.format(\n port=constants.DEV_APPSERVER_PORT),\n cwd=os.path.join('src', 'appengine'))\n except KeyboardInterrupt:\n print('Server has been stopped. Exit.')\n cron_server.terminate()\n datastore_emulator.cleanup()\n pubsub_emulator.cleanup()\n local_gcs.terminate()\n", "path": "src/local/butler/run_server.py"}]} | 2,889 | 206 |
gh_patches_debug_27208 | rasdani/github-patches | git_diff | scrapy__scrapy-4721 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Smarter generator check for combined yield/return statements: ignore nested functions
## Summary
Currently, if spider methods are generators that yield results and contain nested function, then the following warning is issued:
```
[py.warnings] WARNING: /Library/Python/3.7/site-packages/scrapy/core/scraper.py:148: UserWarning: The "MySpider.parse" method is a generator and includes a "return" statement with a value different than None. This could lead to unexpected behaviour. Please see https://docs.python.org/3/reference/simple_stmts.html#the-return-statement for details about the semantics of the "return" statement within generators
warn_on_generator_with_return_value(spider, callback)
```
The example of a simple spider that results in the warning:
```
import scrapy
class MySpider(scrapy.Spider):
name = "MySpider"
start_urls = ["https://scrapy.org"]
def parse(self, response):
def is_external(url):
href = url.css('::attr(href)').get()
return href.startswith('http') and 'scrapy.org' not in href
links = [link for link in response.css('a') if is_external(link)]
for link in links:
yield {'link': link.css('::attr(href)').get(), 'text': link.css('::text').get()}
```
I know it's a bit artificial example as the nested function can be moved, but there is nothing wrong with nested function conceptually.
## Motivation
I have a midsize spider function that includes some nested helper functions that I'd like to keep close to where they are called.
## Describe alternatives you've considered
Moving nested function out of the generator is an easy fix, but it constrains expressivity of the code.
## Additional context
Related function: is_generator_with_return_value
</issue>
<code>
[start of scrapy/utils/misc.py]
1 """Helper functions which don't fit anywhere else"""
2 import ast
3 import inspect
4 import os
5 import re
6 import hashlib
7 import warnings
8 from contextlib import contextmanager
9 from importlib import import_module
10 from pkgutil import iter_modules
11 from textwrap import dedent
12
13 from w3lib.html import replace_entities
14
15 from scrapy.utils.datatypes import LocalWeakReferencedCache
16 from scrapy.utils.python import flatten, to_unicode
17 from scrapy.item import _BaseItem
18 from scrapy.utils.deprecate import ScrapyDeprecationWarning
19
20
21 _ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes
22
23
24 def arg_to_iter(arg):
25 """Convert an argument to an iterable. The argument can be a None, single
26 value, or an iterable.
27
28 Exception: if arg is a dict, [arg] will be returned
29 """
30 if arg is None:
31 return []
32 elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):
33 return arg
34 else:
35 return [arg]
36
37
38 def load_object(path):
39 """Load an object given its absolute object path, and return it.
40
41 object can be the import path of a class, function, variable or an
42 instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'
43 """
44
45 try:
46 dot = path.rindex('.')
47 except ValueError:
48 raise ValueError("Error loading object '%s': not a full path" % path)
49
50 module, name = path[:dot], path[dot + 1:]
51 mod = import_module(module)
52
53 try:
54 obj = getattr(mod, name)
55 except AttributeError:
56 raise NameError("Module '%s' doesn't define any object named '%s'" % (module, name))
57
58 return obj
59
60
61 def walk_modules(path):
62 """Loads a module and all its submodules from the given module path and
63 returns them. If *any* module throws an exception while importing, that
64 exception is thrown back.
65
66 For example: walk_modules('scrapy.utils')
67 """
68
69 mods = []
70 mod = import_module(path)
71 mods.append(mod)
72 if hasattr(mod, '__path__'):
73 for _, subpath, ispkg in iter_modules(mod.__path__):
74 fullpath = path + '.' + subpath
75 if ispkg:
76 mods += walk_modules(fullpath)
77 else:
78 submod = import_module(fullpath)
79 mods.append(submod)
80 return mods
81
82
83 def extract_regex(regex, text, encoding='utf-8'):
84 """Extract a list of unicode strings from the given text/encoding using the following policies:
85
86 * if the regex contains a named group called "extract" that will be returned
87 * if the regex contains multiple numbered groups, all those will be returned (flattened)
88 * if the regex doesn't contain any group the entire regex matching is returned
89 """
90 warnings.warn(
91 "scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.",
92 ScrapyDeprecationWarning,
93 stacklevel=2
94 )
95
96 if isinstance(regex, str):
97 regex = re.compile(regex, re.UNICODE)
98
99 try:
100 strings = [regex.search(text).group('extract')] # named group
101 except Exception:
102 strings = regex.findall(text) # full regex or numbered groups
103 strings = flatten(strings)
104
105 if isinstance(text, str):
106 return [replace_entities(s, keep=['lt', 'amp']) for s in strings]
107 else:
108 return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])
109 for s in strings]
110
111
112 def md5sum(file):
113 """Calculate the md5 checksum of a file-like object without reading its
114 whole content in memory.
115
116 >>> from io import BytesIO
117 >>> md5sum(BytesIO(b'file content to hash'))
118 '784406af91dd5a54fbb9c84c2236595a'
119 """
120 m = hashlib.md5()
121 while True:
122 d = file.read(8096)
123 if not d:
124 break
125 m.update(d)
126 return m.hexdigest()
127
128
129 def rel_has_nofollow(rel):
130 """Return True if link rel attribute has nofollow type"""
131 return rel is not None and 'nofollow' in rel.split()
132
133
134 def create_instance(objcls, settings, crawler, *args, **kwargs):
135 """Construct a class instance using its ``from_crawler`` or
136 ``from_settings`` constructors, if available.
137
138 At least one of ``settings`` and ``crawler`` needs to be different from
139 ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.
140 If ``crawler`` is ``None``, only the ``from_settings`` constructor will be
141 tried.
142
143 ``*args`` and ``**kwargs`` are forwarded to the constructors.
144
145 Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.
146
147 .. versionchanged:: 2.2
148 Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an
149 extension has not been implemented correctly).
150 """
151 if settings is None:
152 if crawler is None:
153 raise ValueError("Specify at least one of settings and crawler.")
154 settings = crawler.settings
155 if crawler and hasattr(objcls, 'from_crawler'):
156 instance = objcls.from_crawler(crawler, *args, **kwargs)
157 method_name = 'from_crawler'
158 elif hasattr(objcls, 'from_settings'):
159 instance = objcls.from_settings(settings, *args, **kwargs)
160 method_name = 'from_settings'
161 else:
162 instance = objcls(*args, **kwargs)
163 method_name = '__new__'
164 if instance is None:
165 raise TypeError("%s.%s returned None" % (objcls.__qualname__, method_name))
166 return instance
167
168
169 @contextmanager
170 def set_environ(**kwargs):
171 """Temporarily set environment variables inside the context manager and
172 fully restore previous environment afterwards
173 """
174
175 original_env = {k: os.environ.get(k) for k in kwargs}
176 os.environ.update(kwargs)
177 try:
178 yield
179 finally:
180 for k, v in original_env.items():
181 if v is None:
182 del os.environ[k]
183 else:
184 os.environ[k] = v
185
186
187 _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
188
189
190 def is_generator_with_return_value(callable):
191 """
192 Returns True if a callable is a generator function which includes a
193 'return' statement with a value different than None, False otherwise
194 """
195 if callable in _generator_callbacks_cache:
196 return _generator_callbacks_cache[callable]
197
198 def returns_none(return_node):
199 value = return_node.value
200 return value is None or isinstance(value, ast.NameConstant) and value.value is None
201
202 if inspect.isgeneratorfunction(callable):
203 tree = ast.parse(dedent(inspect.getsource(callable)))
204 for node in ast.walk(tree):
205 if isinstance(node, ast.Return) and not returns_none(node):
206 _generator_callbacks_cache[callable] = True
207 return _generator_callbacks_cache[callable]
208
209 _generator_callbacks_cache[callable] = False
210 return _generator_callbacks_cache[callable]
211
212
213 def warn_on_generator_with_return_value(spider, callable):
214 """
215 Logs a warning if a callable is a generator function and includes
216 a 'return' statement with a value different than None
217 """
218 if is_generator_with_return_value(callable):
219 warnings.warn(
220 'The "{}.{}" method is a generator and includes a "return" statement with a '
221 'value different than None. This could lead to unexpected behaviour. Please see '
222 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '
223 'for details about the semantics of the "return" statement within generators'
224 .format(spider.__class__.__name__, callable.__name__), stacklevel=2,
225 )
226
[end of scrapy/utils/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py
--- a/scrapy/utils/misc.py
+++ b/scrapy/utils/misc.py
@@ -5,6 +5,7 @@
import re
import hashlib
import warnings
+from collections import deque
from contextlib import contextmanager
from importlib import import_module
from pkgutil import iter_modules
@@ -184,6 +185,22 @@
os.environ[k] = v
+def walk_callable(node):
+ """Similar to ``ast.walk``, but walks only function body and skips nested
+ functions defined within the node.
+ """
+ todo = deque([node])
+ walked_func_def = False
+ while todo:
+ node = todo.popleft()
+ if isinstance(node, ast.FunctionDef):
+ if walked_func_def:
+ continue
+ walked_func_def = True
+ todo.extend(ast.iter_child_nodes(node))
+ yield node
+
+
_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)
@@ -201,7 +218,7 @@
if inspect.isgeneratorfunction(callable):
tree = ast.parse(dedent(inspect.getsource(callable)))
- for node in ast.walk(tree):
+ for node in walk_callable(tree):
if isinstance(node, ast.Return) and not returns_none(node):
_generator_callbacks_cache[callable] = True
return _generator_callbacks_cache[callable]
| {"golden_diff": "diff --git a/scrapy/utils/misc.py b/scrapy/utils/misc.py\n--- a/scrapy/utils/misc.py\n+++ b/scrapy/utils/misc.py\n@@ -5,6 +5,7 @@\n import re\n import hashlib\n import warnings\n+from collections import deque\n from contextlib import contextmanager\n from importlib import import_module\n from pkgutil import iter_modules\n@@ -184,6 +185,22 @@\n os.environ[k] = v\n \n \n+def walk_callable(node):\n+ \"\"\"Similar to ``ast.walk``, but walks only function body and skips nested\n+ functions defined within the node.\n+ \"\"\"\n+ todo = deque([node])\n+ walked_func_def = False\n+ while todo:\n+ node = todo.popleft()\n+ if isinstance(node, ast.FunctionDef):\n+ if walked_func_def:\n+ continue\n+ walked_func_def = True\n+ todo.extend(ast.iter_child_nodes(node))\n+ yield node\n+\n+\n _generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n \n \n@@ -201,7 +218,7 @@\n \n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n- for node in ast.walk(tree):\n+ for node in walk_callable(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n", "issue": "Smarter generator check for combined yield/return statements: ignore nested functions\n## Summary\r\nCurrently, if spider methods are generators that yield results and contain nested function, then the following warning is issued:\r\n\r\n```\r\n[py.warnings] WARNING: /Library/Python/3.7/site-packages/scrapy/core/scraper.py:148: UserWarning: The \"MySpider.parse\" method is a generator and includes a \"return\" statement with a value different than None. This could lead to unexpected behaviour. Please see https://docs.python.org/3/reference/simple_stmts.html#the-return-statement for details about the semantics of the \"return\" statement within generators\r\n warn_on_generator_with_return_value(spider, callback)\r\n```\r\n\r\nThe example of a simple spider that results in the warning:\r\n```\r\nimport scrapy\r\n\r\nclass MySpider(scrapy.Spider):\r\n name = \"MySpider\"\r\n start_urls = [\"https://scrapy.org\"]\r\n \r\n def parse(self, response):\r\n \r\n def is_external(url):\r\n href = url.css('::attr(href)').get()\r\n return href.startswith('http') and 'scrapy.org' not in href\r\n \r\n links = [link for link in response.css('a') if is_external(link)]\r\n for link in links:\r\n yield {'link': link.css('::attr(href)').get(), 'text': link.css('::text').get()}\r\n```\r\n\r\nI know it's a bit artificial example as the nested function can be moved, but there is nothing wrong with nested function conceptually.\r\n\r\n## Motivation\r\n\r\nI have a midsize spider function that includes some nested helper functions that I'd like to keep close to where they are called.\r\n\r\n## Describe alternatives you've considered\r\n\r\nMoving nested function out of the generator is an easy fix, but it constrains expressivity of the code.\r\n\r\n## Additional context\r\n\r\nRelated function: is_generator_with_return_value\r\n\n", "before_files": [{"content": "\"\"\"Helper functions which don't fit anywhere else\"\"\"\nimport ast\nimport inspect\nimport os\nimport re\nimport hashlib\nimport warnings\nfrom contextlib import contextmanager\nfrom importlib import import_module\nfrom pkgutil import iter_modules\nfrom textwrap import dedent\n\nfrom w3lib.html import replace_entities\n\nfrom scrapy.utils.datatypes import LocalWeakReferencedCache\nfrom scrapy.utils.python import flatten, to_unicode\nfrom scrapy.item import _BaseItem\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\n\n\n_ITERABLE_SINGLE_VALUES = dict, _BaseItem, str, bytes\n\n\ndef arg_to_iter(arg):\n \"\"\"Convert an argument to an iterable. The argument can be a None, single\n value, or an iterable.\n\n Exception: if arg is a dict, [arg] will be returned\n \"\"\"\n if arg is None:\n return []\n elif not isinstance(arg, _ITERABLE_SINGLE_VALUES) and hasattr(arg, '__iter__'):\n return arg\n else:\n return [arg]\n\n\ndef load_object(path):\n \"\"\"Load an object given its absolute object path, and return it.\n\n object can be the import path of a class, function, variable or an\n instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'\n \"\"\"\n\n try:\n dot = path.rindex('.')\n except ValueError:\n raise ValueError(\"Error loading object '%s': not a full path\" % path)\n\n module, name = path[:dot], path[dot + 1:]\n mod = import_module(module)\n\n try:\n obj = getattr(mod, name)\n except AttributeError:\n raise NameError(\"Module '%s' doesn't define any object named '%s'\" % (module, name))\n\n return obj\n\n\ndef walk_modules(path):\n \"\"\"Loads a module and all its submodules from the given module path and\n returns them. If *any* module throws an exception while importing, that\n exception is thrown back.\n\n For example: walk_modules('scrapy.utils')\n \"\"\"\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods\n\n\ndef extract_regex(regex, text, encoding='utf-8'):\n \"\"\"Extract a list of unicode strings from the given text/encoding using the following policies:\n\n * if the regex contains a named group called \"extract\" that will be returned\n * if the regex contains multiple numbered groups, all those will be returned (flattened)\n * if the regex doesn't contain any group the entire regex matching is returned\n \"\"\"\n warnings.warn(\n \"scrapy.utils.misc.extract_regex has moved to parsel.utils.extract_regex.\",\n ScrapyDeprecationWarning,\n stacklevel=2\n )\n\n if isinstance(regex, str):\n regex = re.compile(regex, re.UNICODE)\n\n try:\n strings = [regex.search(text).group('extract')] # named group\n except Exception:\n strings = regex.findall(text) # full regex or numbered groups\n strings = flatten(strings)\n\n if isinstance(text, str):\n return [replace_entities(s, keep=['lt', 'amp']) for s in strings]\n else:\n return [replace_entities(to_unicode(s, encoding), keep=['lt', 'amp'])\n for s in strings]\n\n\ndef md5sum(file):\n \"\"\"Calculate the md5 checksum of a file-like object without reading its\n whole content in memory.\n\n >>> from io import BytesIO\n >>> md5sum(BytesIO(b'file content to hash'))\n '784406af91dd5a54fbb9c84c2236595a'\n \"\"\"\n m = hashlib.md5()\n while True:\n d = file.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef rel_has_nofollow(rel):\n \"\"\"Return True if link rel attribute has nofollow type\"\"\"\n return rel is not None and 'nofollow' in rel.split()\n\n\ndef create_instance(objcls, settings, crawler, *args, **kwargs):\n \"\"\"Construct a class instance using its ``from_crawler`` or\n ``from_settings`` constructors, if available.\n\n At least one of ``settings`` and ``crawler`` needs to be different from\n ``None``. If ``settings `` is ``None``, ``crawler.settings`` will be used.\n If ``crawler`` is ``None``, only the ``from_settings`` constructor will be\n tried.\n\n ``*args`` and ``**kwargs`` are forwarded to the constructors.\n\n Raises ``ValueError`` if both ``settings`` and ``crawler`` are ``None``.\n\n .. versionchanged:: 2.2\n Raises ``TypeError`` if the resulting instance is ``None`` (e.g. if an\n extension has not been implemented correctly).\n \"\"\"\n if settings is None:\n if crawler is None:\n raise ValueError(\"Specify at least one of settings and crawler.\")\n settings = crawler.settings\n if crawler and hasattr(objcls, 'from_crawler'):\n instance = objcls.from_crawler(crawler, *args, **kwargs)\n method_name = 'from_crawler'\n elif hasattr(objcls, 'from_settings'):\n instance = objcls.from_settings(settings, *args, **kwargs)\n method_name = 'from_settings'\n else:\n instance = objcls(*args, **kwargs)\n method_name = '__new__'\n if instance is None:\n raise TypeError(\"%s.%s returned None\" % (objcls.__qualname__, method_name))\n return instance\n\n\n@contextmanager\ndef set_environ(**kwargs):\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n\n original_env = {k: os.environ.get(k) for k in kwargs}\n os.environ.update(kwargs)\n try:\n yield\n finally:\n for k, v in original_env.items():\n if v is None:\n del os.environ[k]\n else:\n os.environ[k] = v\n\n\n_generator_callbacks_cache = LocalWeakReferencedCache(limit=128)\n\n\ndef is_generator_with_return_value(callable):\n \"\"\"\n Returns True if a callable is a generator function which includes a\n 'return' statement with a value different than None, False otherwise\n \"\"\"\n if callable in _generator_callbacks_cache:\n return _generator_callbacks_cache[callable]\n\n def returns_none(return_node):\n value = return_node.value\n return value is None or isinstance(value, ast.NameConstant) and value.value is None\n\n if inspect.isgeneratorfunction(callable):\n tree = ast.parse(dedent(inspect.getsource(callable)))\n for node in ast.walk(tree):\n if isinstance(node, ast.Return) and not returns_none(node):\n _generator_callbacks_cache[callable] = True\n return _generator_callbacks_cache[callable]\n\n _generator_callbacks_cache[callable] = False\n return _generator_callbacks_cache[callable]\n\n\ndef warn_on_generator_with_return_value(spider, callable):\n \"\"\"\n Logs a warning if a callable is a generator function and includes\n a 'return' statement with a value different than None\n \"\"\"\n if is_generator_with_return_value(callable):\n warnings.warn(\n 'The \"{}.{}\" method is a generator and includes a \"return\" statement with a '\n 'value different than None. This could lead to unexpected behaviour. Please see '\n 'https://docs.python.org/3/reference/simple_stmts.html#the-return-statement '\n 'for details about the semantics of the \"return\" statement within generators'\n .format(spider.__class__.__name__, callable.__name__), stacklevel=2,\n )\n", "path": "scrapy/utils/misc.py"}]} | 3,235 | 320 |
gh_patches_debug_29181 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
project_list in container add project form contains all projects
@CarolingerSeilchenspringer at the moment all projects (not only the ones belonging to the organisation) can be added to a container. Should that stay that way?
</issue>
<code>
[start of meinberlin/apps/projectcontainers/forms.py]
1 from django.db.models import Q
2 from django.utils.translation import ugettext_lazy as _
3
4 from adhocracy4.dashboard.forms import ProjectCreateForm
5 from adhocracy4.dashboard.forms import ProjectDashboardForm
6 from meinberlin.apps.contrib.widgets import Select2MultipleWidget
7
8 from . import models
9
10 LABELS = {
11 'name': _('Title of your container'),
12 'description': _('Short description of your container'),
13 'tile_image': _('Logo'),
14 }
15
16 HELP_TEXTS = {
17 'name': _('This title will appear on the '
18 'teaser card and on top of the container '
19 'detail page. It should be max. 120 characters long'),
20 'description': _('This short description will appear on '
21 'the header of the container and in the teaser. '
22 'It should briefly state the goal of the '
23 'projects in max. 250 chars.'),
24 'tile_image': _(
25 'The image will be shown in the container tile.'
26 ),
27 }
28
29
30 class ContainerCreateForm(ProjectCreateForm):
31
32 class Meta:
33 model = models.ProjectContainer
34 fields = ['name', 'description',
35 'tile_image', 'tile_image_copyright']
36 labels = LABELS
37 help_texts = HELP_TEXTS
38
39
40 class ContainerBasicForm(ProjectDashboardForm):
41
42 class Meta:
43 model = models.ProjectContainer
44 fields = ['name', 'description', 'tile_image',
45 'tile_image_copyright', 'is_archived']
46 required_for_project_publish = ['name', 'description']
47 labels = LABELS
48 help_texts = HELP_TEXTS
49
50
51 class ContainerInformationForm(ProjectDashboardForm):
52
53 class Meta:
54 model = models.ProjectContainer
55 fields = ['information']
56 required_for_project_publish = ['information']
57 labels = {
58 'information': _('Description of your container'),
59 }
60
61
62 class ContainerProjectsForm(ProjectDashboardForm):
63
64 def __init__(self, *args, **kwargs):
65 super().__init__(*args, **kwargs)
66
67 # We tried to find a good balance between a short list and
68 # all necessary projects. The details may change over time.
69 # Projects that are already selected should remain in the queryset.
70 self.fields['projects'].queryset = self.fields['projects'].queryset \
71 .filter(projectcontainer=None)\
72 .filter(Q(containers=self.instance) |
73 (Q(containers=None) &
74 Q(is_archived=False) &
75 Q(is_public=True)))\
76 .order_by('name')
77
78 class Meta:
79 model = models.ProjectContainer
80 fields = ['projects']
81 required_for_project_publish = ['projects']
82 widgets = {
83 'projects': Select2MultipleWidget,
84 }
85
[end of meinberlin/apps/projectcontainers/forms.py]
[start of meinberlin/apps/projectcontainers/views.py]
1 from django.utils.translation import ugettext_lazy as _
2 from django.views import generic
3
4 from adhocracy4.dashboard import mixins as dashboard_mixins
5 from adhocracy4.dashboard.blueprints import ProjectBlueprint
6 from adhocracy4.dashboard.components.forms.views import \
7 ProjectComponentFormView
8 from adhocracy4.dashboard.views import ProjectCreateView
9
10 from . import forms
11 from . import models
12
13
14 class ContainerCreateView(ProjectCreateView):
15 model = models.ProjectContainer
16 slug_url_kwarg = 'project_slug'
17 form_class = forms.ContainerCreateForm
18 template_name = 'meinberlin_projectcontainers/container_create_form.html'
19 success_message = _('Container successfully created.')
20
21 blueprint = ProjectBlueprint(
22 title=_('Container'),
23 description=_(
24 'A container contains multiple projects.'
25 ),
26 content=[],
27 image='',
28 settings_model=None,
29 )
30
31
32 class ContainerBasicFormView(ProjectComponentFormView):
33 model = models.ProjectContainer
34
35 @property
36 def project(self):
37 project = super().project
38 return project.projectcontainer
39
40 def get_object(self, queryset=None):
41 return self.project
42
43
44 class ContainerProjectsView(ProjectComponentFormView):
45 model = models.ProjectContainer
46
47 @property
48 def project(self):
49 project = super().project
50 return project.projectcontainer
51
52 def get_object(self, queryset=None):
53 return self.project
54
55
56 class ContainerListView(dashboard_mixins.DashboardBaseMixin,
57 generic.ListView):
58 model = models.ProjectContainer
59 paginate_by = 12
60 template_name = 'meinberlin_projectcontainers/container_list.html'
61 permission_required = 'a4projects.add_project'
62 menu_item = 'project'
63
64 def get_queryset(self):
65 return super().get_queryset().filter(
66 organisation=self.organisation
67 )
68
69 def get_permission_object(self):
70 return self.organisation
71
[end of meinberlin/apps/projectcontainers/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/projectcontainers/forms.py b/meinberlin/apps/projectcontainers/forms.py
--- a/meinberlin/apps/projectcontainers/forms.py
+++ b/meinberlin/apps/projectcontainers/forms.py
@@ -62,12 +62,20 @@
class ContainerProjectsForm(ProjectDashboardForm):
def __init__(self, *args, **kwargs):
+ self.user = kwargs.pop('user')
+ self.organisation = kwargs.pop('organisation')
super().__init__(*args, **kwargs)
- # We tried to find a good balance between a short list and
- # all necessary projects. The details may change over time.
- # Projects that are already selected should remain in the queryset.
- self.fields['projects'].queryset = self.fields['projects'].queryset \
+ projects = self.fields['projects']\
+ .queryset.filter(organisation=self.organisation)
+ if not self.organisation.has_initiator(self.user):
+ user_groups = self.user.groups.all()
+ org_groups = self.organisation.groups.all()
+ shared_groups = user_groups & org_groups
+ group = shared_groups.distinct().first()
+ projects = projects.filter(group=group)
+
+ self.fields['projects'].queryset = projects \
.filter(projectcontainer=None)\
.filter(Q(containers=self.instance) |
(Q(containers=None) &
diff --git a/meinberlin/apps/projectcontainers/views.py b/meinberlin/apps/projectcontainers/views.py
--- a/meinberlin/apps/projectcontainers/views.py
+++ b/meinberlin/apps/projectcontainers/views.py
@@ -52,6 +52,12 @@
def get_object(self, queryset=None):
return self.project
+ def get_form_kwargs(self):
+ kwargs = super().get_form_kwargs()
+ kwargs['user'] = self.request.user
+ kwargs['organisation'] = self.project.organisation
+ return kwargs
+
class ContainerListView(dashboard_mixins.DashboardBaseMixin,
generic.ListView):
| {"golden_diff": "diff --git a/meinberlin/apps/projectcontainers/forms.py b/meinberlin/apps/projectcontainers/forms.py\n--- a/meinberlin/apps/projectcontainers/forms.py\n+++ b/meinberlin/apps/projectcontainers/forms.py\n@@ -62,12 +62,20 @@\n class ContainerProjectsForm(ProjectDashboardForm):\n \n def __init__(self, *args, **kwargs):\n+ self.user = kwargs.pop('user')\n+ self.organisation = kwargs.pop('organisation')\n super().__init__(*args, **kwargs)\n \n- # We tried to find a good balance between a short list and\n- # all necessary projects. The details may change over time.\n- # Projects that are already selected should remain in the queryset.\n- self.fields['projects'].queryset = self.fields['projects'].queryset \\\n+ projects = self.fields['projects']\\\n+ .queryset.filter(organisation=self.organisation)\n+ if not self.organisation.has_initiator(self.user):\n+ user_groups = self.user.groups.all()\n+ org_groups = self.organisation.groups.all()\n+ shared_groups = user_groups & org_groups\n+ group = shared_groups.distinct().first()\n+ projects = projects.filter(group=group)\n+\n+ self.fields['projects'].queryset = projects \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\ndiff --git a/meinberlin/apps/projectcontainers/views.py b/meinberlin/apps/projectcontainers/views.py\n--- a/meinberlin/apps/projectcontainers/views.py\n+++ b/meinberlin/apps/projectcontainers/views.py\n@@ -52,6 +52,12 @@\n def get_object(self, queryset=None):\n return self.project\n \n+ def get_form_kwargs(self):\n+ kwargs = super().get_form_kwargs()\n+ kwargs['user'] = self.request.user\n+ kwargs['organisation'] = self.project.organisation\n+ return kwargs\n+\n \n class ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n", "issue": "project_list in container add project form contains all projects\n@CarolingerSeilchenspringer at the moment all projects (not only the ones belonging to the organisation) can be added to a container. Should that stay that way?\r\n\n", "before_files": [{"content": "from django.db.models import Q\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.forms import ProjectCreateForm\nfrom adhocracy4.dashboard.forms import ProjectDashboardForm\nfrom meinberlin.apps.contrib.widgets import Select2MultipleWidget\n\nfrom . import models\n\nLABELS = {\n 'name': _('Title of your container'),\n 'description': _('Short description of your container'),\n 'tile_image': _('Logo'),\n}\n\nHELP_TEXTS = {\n 'name': _('This title will appear on the '\n 'teaser card and on top of the container '\n 'detail page. It should be max. 120 characters long'),\n 'description': _('This short description will appear on '\n 'the header of the container and in the teaser. '\n 'It should briefly state the goal of the '\n 'projects in max. 250 chars.'),\n 'tile_image': _(\n 'The image will be shown in the container tile.'\n ),\n}\n\n\nclass ContainerCreateForm(ProjectCreateForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description',\n 'tile_image', 'tile_image_copyright']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerBasicForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['name', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived']\n required_for_project_publish = ['name', 'description']\n labels = LABELS\n help_texts = HELP_TEXTS\n\n\nclass ContainerInformationForm(ProjectDashboardForm):\n\n class Meta:\n model = models.ProjectContainer\n fields = ['information']\n required_for_project_publish = ['information']\n labels = {\n 'information': _('Description of your container'),\n }\n\n\nclass ContainerProjectsForm(ProjectDashboardForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # We tried to find a good balance between a short list and\n # all necessary projects. The details may change over time.\n # Projects that are already selected should remain in the queryset.\n self.fields['projects'].queryset = self.fields['projects'].queryset \\\n .filter(projectcontainer=None)\\\n .filter(Q(containers=self.instance) |\n (Q(containers=None) &\n Q(is_archived=False) &\n Q(is_public=True)))\\\n .order_by('name')\n\n class Meta:\n model = models.ProjectContainer\n fields = ['projects']\n required_for_project_publish = ['projects']\n widgets = {\n 'projects': Select2MultipleWidget,\n }\n", "path": "meinberlin/apps/projectcontainers/forms.py"}, {"content": "from django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom adhocracy4.dashboard.components.forms.views import \\\n ProjectComponentFormView\nfrom adhocracy4.dashboard.views import ProjectCreateView\n\nfrom . import forms\nfrom . import models\n\n\nclass ContainerCreateView(ProjectCreateView):\n model = models.ProjectContainer\n slug_url_kwarg = 'project_slug'\n form_class = forms.ContainerCreateForm\n template_name = 'meinberlin_projectcontainers/container_create_form.html'\n success_message = _('Container successfully created.')\n\n blueprint = ProjectBlueprint(\n title=_('Container'),\n description=_(\n 'A container contains multiple projects.'\n ),\n content=[],\n image='',\n settings_model=None,\n )\n\n\nclass ContainerBasicFormView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerProjectsView(ProjectComponentFormView):\n model = models.ProjectContainer\n\n @property\n def project(self):\n project = super().project\n return project.projectcontainer\n\n def get_object(self, queryset=None):\n return self.project\n\n\nclass ContainerListView(dashboard_mixins.DashboardBaseMixin,\n generic.ListView):\n model = models.ProjectContainer\n paginate_by = 12\n template_name = 'meinberlin_projectcontainers/container_list.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n\n def get_queryset(self):\n return super().get_queryset().filter(\n organisation=self.organisation\n )\n\n def get_permission_object(self):\n return self.organisation\n", "path": "meinberlin/apps/projectcontainers/views.py"}]} | 1,885 | 443 |
gh_patches_debug_6159 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-3155 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Invalid MarkdownV1 parsing
### Steps to Reproduce
1. Call the `mention_markdown(123, "Keeping_the_underscores", version=1)` function from `telegram.helpers`
### Expected behaviour
`[Keeping_the_underscores](tg://user?id=123)`, that is valid MarkdownV1 and consistent with https://core.telegram.org/bots/api#markdown-style.
### Actual behaviour
`[Keeping\\_the\\_underscores](tg://user?id=123)`. However this will fail to parse into well-formed MarkdownV1 -- in fact Telegram will respond with an error if it occurs in a message with `parse_mode = Markdown`.
### Operating System
Linux
### Version of Python, python-telegram-bot & dependencies
```shell
python-telegram-bot==20.0a2
```
### Relevant log output
_No response_
### Additional Context
_No response_
</issue>
<code>
[start of telegram/helpers.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2022
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains convenience helper functions.
20
21 .. versionchanged:: 20.0
22 Previously, the contents of this module were available through the (no longer existing)
23 module ``telegram.utils.helpers``.
24 """
25
26 __all__ = (
27 "create_deep_linked_url",
28 "effective_message_type",
29 "escape_markdown",
30 "mention_html",
31 "mention_markdown",
32 )
33
34 import re
35 from html import escape
36 from typing import TYPE_CHECKING, Optional, Union
37
38 from telegram.constants import MessageType
39
40 if TYPE_CHECKING:
41 from telegram import Message, Update
42
43
44 def escape_markdown(text: str, version: int = 1, entity_type: str = None) -> str:
45 """Helper function to escape telegram markup symbols.
46
47 Args:
48 text (:obj:`str`): The text.
49 version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.
50 Either ``1`` or ``2``. Defaults to ``1``.
51 entity_type (:obj:`str`, optional): For the entity types
52 :tg-const:`telegram.MessageEntity.PRE`, :tg-const:`telegram.MessageEntity.CODE` and
53 the link part of :tg-const:`telegram.MessageEntity.TEXT_LINK`, only certain characters
54 need to be escaped in :tg-const:`telegram.constants.ParseMode.MARKDOWN_V2`.
55 See the official API documentation for details. Only valid in combination with
56 ``version=2``, will be ignored else.
57 """
58 if int(version) == 1:
59 escape_chars = r"_*`["
60 elif int(version) == 2:
61 if entity_type in ["pre", "code"]:
62 escape_chars = r"\`"
63 elif entity_type == "text_link":
64 escape_chars = r"\)"
65 else:
66 escape_chars = r"\_*[]()~`>#+-=|{}.!"
67 else:
68 raise ValueError("Markdown version must be either 1 or 2!")
69
70 return re.sub(f"([{re.escape(escape_chars)}])", r"\\\1", text)
71
72
73 def mention_html(user_id: Union[int, str], name: str) -> str:
74 """
75 Args:
76 user_id (:obj:`int`): The user's id which you want to mention.
77 name (:obj:`str`): The name the mention is showing.
78
79 Returns:
80 :obj:`str`: The inline mention for the user as HTML.
81 """
82 return f'<a href="tg://user?id={user_id}">{escape(name)}</a>'
83
84
85 def mention_markdown(user_id: Union[int, str], name: str, version: int = 1) -> str:
86 """
87 Args:
88 user_id (:obj:`int`): The user's id which you want to mention.
89 name (:obj:`str`): The name the mention is showing.
90 version (:obj:`int` | :obj:`str`): Use to specify the version of Telegram's Markdown.
91 Either ``1`` or ``2``. Defaults to ``1``.
92
93 Returns:
94 :obj:`str`: The inline mention for the user as Markdown.
95 """
96 return f"[{escape_markdown(name, version=version)}](tg://user?id={user_id})"
97
98
99 def effective_message_type(entity: Union["Message", "Update"]) -> Optional[str]:
100 """
101 Extracts the type of message as a string identifier from a :class:`telegram.Message` or a
102 :class:`telegram.Update`.
103
104 Args:
105 entity (:class:`telegram.Update` | :class:`telegram.Message`): The ``update`` or
106 ``message`` to extract from.
107
108 Returns:
109 :obj:`str` | :obj:`None`: One of :class:`telegram.constants.MessageType` if the entity
110 contains a message that matches one of those types. :obj:`None` otherwise.
111
112 """
113 # Importing on file-level yields cyclic Import Errors
114 from telegram import Message, Update # pylint: disable=import-outside-toplevel
115
116 if isinstance(entity, Message):
117 message = entity
118 elif isinstance(entity, Update):
119 if not entity.effective_message:
120 return None
121 message = entity.effective_message
122 else:
123 raise TypeError(f"The entity is neither Message nor Update (got: {type(entity)})")
124
125 for message_type in MessageType:
126 if message[message_type]:
127 return message_type
128
129 return None
130
131
132 def create_deep_linked_url(bot_username: str, payload: str = None, group: bool = False) -> str:
133 """
134 Creates a deep-linked URL for this :paramref:`bot_username` with the specified
135 :paramref:`payload`. See https://core.telegram.org/bots#deep-linking to learn more.
136
137 The :paramref:`payload` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``
138
139 Note:
140 Works well in conjunction with
141 ``CommandHandler("start", callback, filters=filters.Regex('payload'))``
142
143 Examples:
144 ``create_deep_linked_url(bot.get_me().username, "some-params")``
145
146 Args:
147 bot_username (:obj:`str`): The username to link to
148 payload (:obj:`str`, optional): Parameters to encode in the created URL
149 group (:obj:`bool`, optional): If :obj:`True` the user is prompted to select a group to
150 add the bot to. If :obj:`False`, opens a one-on-one conversation with the bot.
151 Defaults to :obj:`False`.
152
153 Returns:
154 :obj:`str`: An URL to start the bot with specific parameters
155 """
156 if bot_username is None or len(bot_username) <= 3:
157 raise ValueError("You must provide a valid bot_username.")
158
159 base_url = f"https://t.me/{bot_username}"
160 if not payload:
161 return base_url
162
163 if len(payload) > 64:
164 raise ValueError("The deep-linking payload must not exceed 64 characters.")
165
166 if not re.match(r"^[A-Za-z0-9_-]+$", payload):
167 raise ValueError(
168 "Only the following characters are allowed for deep-linked "
169 "URLs: A-Z, a-z, 0-9, _ and -"
170 )
171
172 if group:
173 key = "startgroup"
174 else:
175 key = "start"
176
177 return f"{base_url}?{key}={payload}"
178
[end of telegram/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/helpers.py b/telegram/helpers.py
--- a/telegram/helpers.py
+++ b/telegram/helpers.py
@@ -93,7 +93,10 @@
Returns:
:obj:`str`: The inline mention for the user as Markdown.
"""
- return f"[{escape_markdown(name, version=version)}](tg://user?id={user_id})"
+ tg_link = f"tg://user?id={user_id}"
+ if version == 1:
+ return f"[{name}]({tg_link})"
+ return f"[{escape_markdown(name, version=version)}]({tg_link})"
def effective_message_type(entity: Union["Message", "Update"]) -> Optional[str]:
| {"golden_diff": "diff --git a/telegram/helpers.py b/telegram/helpers.py\n--- a/telegram/helpers.py\n+++ b/telegram/helpers.py\n@@ -93,7 +93,10 @@\n Returns:\n :obj:`str`: The inline mention for the user as Markdown.\n \"\"\"\n- return f\"[{escape_markdown(name, version=version)}](tg://user?id={user_id})\"\n+ tg_link = f\"tg://user?id={user_id}\"\n+ if version == 1:\n+ return f\"[{name}]({tg_link})\"\n+ return f\"[{escape_markdown(name, version=version)}]({tg_link})\"\n \n \n def effective_message_type(entity: Union[\"Message\", \"Update\"]) -> Optional[str]:\n", "issue": "[BUG] Invalid MarkdownV1 parsing\n### Steps to Reproduce\r\n\r\n1. Call the `mention_markdown(123, \"Keeping_the_underscores\", version=1)` function from `telegram.helpers`\r\n\r\n### Expected behaviour\r\n\r\n`[Keeping_the_underscores](tg://user?id=123)`, that is valid MarkdownV1 and consistent with https://core.telegram.org/bots/api#markdown-style.\r\n\r\n### Actual behaviour\r\n\r\n`[Keeping\\\\_the\\\\_underscores](tg://user?id=123)`. However this will fail to parse into well-formed MarkdownV1 -- in fact Telegram will respond with an error if it occurs in a message with `parse_mode = Markdown`.\r\n\r\n### Operating System\r\n\r\nLinux\r\n\r\n### Version of Python, python-telegram-bot & dependencies\r\n\r\n```shell\r\npython-telegram-bot==20.0a2\r\n```\r\n\r\n\r\n### Relevant log output\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2022\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains convenience helper functions.\n\n.. versionchanged:: 20.0\n Previously, the contents of this module were available through the (no longer existing)\n module ``telegram.utils.helpers``.\n\"\"\"\n\n__all__ = (\n \"create_deep_linked_url\",\n \"effective_message_type\",\n \"escape_markdown\",\n \"mention_html\",\n \"mention_markdown\",\n)\n\nimport re\nfrom html import escape\nfrom typing import TYPE_CHECKING, Optional, Union\n\nfrom telegram.constants import MessageType\n\nif TYPE_CHECKING:\n from telegram import Message, Update\n\n\ndef escape_markdown(text: str, version: int = 1, entity_type: str = None) -> str:\n \"\"\"Helper function to escape telegram markup symbols.\n\n Args:\n text (:obj:`str`): The text.\n version (:obj:`int` | :obj:`str`): Use to specify the version of telegrams Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n entity_type (:obj:`str`, optional): For the entity types\n :tg-const:`telegram.MessageEntity.PRE`, :tg-const:`telegram.MessageEntity.CODE` and\n the link part of :tg-const:`telegram.MessageEntity.TEXT_LINK`, only certain characters\n need to be escaped in :tg-const:`telegram.constants.ParseMode.MARKDOWN_V2`.\n See the official API documentation for details. Only valid in combination with\n ``version=2``, will be ignored else.\n \"\"\"\n if int(version) == 1:\n escape_chars = r\"_*`[\"\n elif int(version) == 2:\n if entity_type in [\"pre\", \"code\"]:\n escape_chars = r\"\\`\"\n elif entity_type == \"text_link\":\n escape_chars = r\"\\)\"\n else:\n escape_chars = r\"\\_*[]()~`>#+-=|{}.!\"\n else:\n raise ValueError(\"Markdown version must be either 1 or 2!\")\n\n return re.sub(f\"([{re.escape(escape_chars)}])\", r\"\\\\\\1\", text)\n\n\ndef mention_html(user_id: Union[int, str], name: str) -> str:\n \"\"\"\n Args:\n user_id (:obj:`int`): The user's id which you want to mention.\n name (:obj:`str`): The name the mention is showing.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n \"\"\"\n return f'<a href=\"tg://user?id={user_id}\">{escape(name)}</a>'\n\n\ndef mention_markdown(user_id: Union[int, str], name: str, version: int = 1) -> str:\n \"\"\"\n Args:\n user_id (:obj:`int`): The user's id which you want to mention.\n name (:obj:`str`): The name the mention is showing.\n version (:obj:`int` | :obj:`str`): Use to specify the version of Telegram's Markdown.\n Either ``1`` or ``2``. Defaults to ``1``.\n\n Returns:\n :obj:`str`: The inline mention for the user as Markdown.\n \"\"\"\n return f\"[{escape_markdown(name, version=version)}](tg://user?id={user_id})\"\n\n\ndef effective_message_type(entity: Union[\"Message\", \"Update\"]) -> Optional[str]:\n \"\"\"\n Extracts the type of message as a string identifier from a :class:`telegram.Message` or a\n :class:`telegram.Update`.\n\n Args:\n entity (:class:`telegram.Update` | :class:`telegram.Message`): The ``update`` or\n ``message`` to extract from.\n\n Returns:\n :obj:`str` | :obj:`None`: One of :class:`telegram.constants.MessageType` if the entity\n contains a message that matches one of those types. :obj:`None` otherwise.\n\n \"\"\"\n # Importing on file-level yields cyclic Import Errors\n from telegram import Message, Update # pylint: disable=import-outside-toplevel\n\n if isinstance(entity, Message):\n message = entity\n elif isinstance(entity, Update):\n if not entity.effective_message:\n return None\n message = entity.effective_message\n else:\n raise TypeError(f\"The entity is neither Message nor Update (got: {type(entity)})\")\n\n for message_type in MessageType:\n if message[message_type]:\n return message_type\n\n return None\n\n\ndef create_deep_linked_url(bot_username: str, payload: str = None, group: bool = False) -> str:\n \"\"\"\n Creates a deep-linked URL for this :paramref:`bot_username` with the specified\n :paramref:`payload`. See https://core.telegram.org/bots#deep-linking to learn more.\n\n The :paramref:`payload` may consist of the following characters: ``A-Z, a-z, 0-9, _, -``\n\n Note:\n Works well in conjunction with\n ``CommandHandler(\"start\", callback, filters=filters.Regex('payload'))``\n\n Examples:\n ``create_deep_linked_url(bot.get_me().username, \"some-params\")``\n\n Args:\n bot_username (:obj:`str`): The username to link to\n payload (:obj:`str`, optional): Parameters to encode in the created URL\n group (:obj:`bool`, optional): If :obj:`True` the user is prompted to select a group to\n add the bot to. If :obj:`False`, opens a one-on-one conversation with the bot.\n Defaults to :obj:`False`.\n\n Returns:\n :obj:`str`: An URL to start the bot with specific parameters\n \"\"\"\n if bot_username is None or len(bot_username) <= 3:\n raise ValueError(\"You must provide a valid bot_username.\")\n\n base_url = f\"https://t.me/{bot_username}\"\n if not payload:\n return base_url\n\n if len(payload) > 64:\n raise ValueError(\"The deep-linking payload must not exceed 64 characters.\")\n\n if not re.match(r\"^[A-Za-z0-9_-]+$\", payload):\n raise ValueError(\n \"Only the following characters are allowed for deep-linked \"\n \"URLs: A-Z, a-z, 0-9, _ and -\"\n )\n\n if group:\n key = \"startgroup\"\n else:\n key = \"start\"\n\n return f\"{base_url}?{key}={payload}\"\n", "path": "telegram/helpers.py"}]} | 2,741 | 158 |
gh_patches_debug_6613 | rasdani/github-patches | git_diff | CTFd__CTFd-2515 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Getting dynamic challenges by ID does not return decay function
When getting a dynamic challenge from `GET /api/v1/challenges/<challenge-id>`, the challenge does not return its decay function.
This seems to be caused by [this](https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L60-L90) function which does not include `challenge.function`.
This seems be have been missed out when implementing different decay functions for dynamic challenges. This does not have any impact on normal usage of CTFd, but I see no reason not to include the function in the response.
</issue>
<code>
[start of CTFd/plugins/dynamic_challenges/__init__.py]
1 from flask import Blueprint
2
3 from CTFd.models import Challenges, db
4 from CTFd.plugins import register_plugin_assets_directory
5 from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge
6 from CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic
7 from CTFd.plugins.migrations import upgrade
8
9
10 class DynamicChallenge(Challenges):
11 __mapper_args__ = {"polymorphic_identity": "dynamic"}
12 id = db.Column(
13 db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True
14 )
15 initial = db.Column(db.Integer, default=0)
16 minimum = db.Column(db.Integer, default=0)
17 decay = db.Column(db.Integer, default=0)
18 function = db.Column(db.String(32), default="logarithmic")
19
20 def __init__(self, *args, **kwargs):
21 super(DynamicChallenge, self).__init__(**kwargs)
22 self.value = kwargs["initial"]
23
24
25 class DynamicValueChallenge(BaseChallenge):
26 id = "dynamic" # Unique identifier used to register challenges
27 name = "dynamic" # Name of a challenge type
28 templates = (
29 { # Handlebars templates used for each aspect of challenge editing & viewing
30 "create": "/plugins/dynamic_challenges/assets/create.html",
31 "update": "/plugins/dynamic_challenges/assets/update.html",
32 "view": "/plugins/dynamic_challenges/assets/view.html",
33 }
34 )
35 scripts = { # Scripts that are loaded when a template is loaded
36 "create": "/plugins/dynamic_challenges/assets/create.js",
37 "update": "/plugins/dynamic_challenges/assets/update.js",
38 "view": "/plugins/dynamic_challenges/assets/view.js",
39 }
40 # Route at which files are accessible. This must be registered using register_plugin_assets_directory()
41 route = "/plugins/dynamic_challenges/assets/"
42 # Blueprint used to access the static_folder directory.
43 blueprint = Blueprint(
44 "dynamic_challenges",
45 __name__,
46 template_folder="templates",
47 static_folder="assets",
48 )
49 challenge_model = DynamicChallenge
50
51 @classmethod
52 def calculate_value(cls, challenge):
53 f = DECAY_FUNCTIONS.get(challenge.function, logarithmic)
54 value = f(challenge)
55
56 challenge.value = value
57 db.session.commit()
58 return challenge
59
60 @classmethod
61 def read(cls, challenge):
62 """
63 This method is in used to access the data of a challenge in a format processable by the front end.
64
65 :param challenge:
66 :return: Challenge object, data dictionary to be returned to the user
67 """
68 challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()
69 data = {
70 "id": challenge.id,
71 "name": challenge.name,
72 "value": challenge.value,
73 "initial": challenge.initial,
74 "decay": challenge.decay,
75 "minimum": challenge.minimum,
76 "description": challenge.description,
77 "connection_info": challenge.connection_info,
78 "next_id": challenge.next_id,
79 "category": challenge.category,
80 "state": challenge.state,
81 "max_attempts": challenge.max_attempts,
82 "type": challenge.type,
83 "type_data": {
84 "id": cls.id,
85 "name": cls.name,
86 "templates": cls.templates,
87 "scripts": cls.scripts,
88 },
89 }
90 return data
91
92 @classmethod
93 def update(cls, challenge, request):
94 """
95 This method is used to update the information associated with a challenge. This should be kept strictly to the
96 Challenges table and any child tables.
97
98 :param challenge:
99 :param request:
100 :return:
101 """
102 data = request.form or request.get_json()
103
104 for attr, value in data.items():
105 # We need to set these to floats so that the next operations don't operate on strings
106 if attr in ("initial", "minimum", "decay"):
107 value = float(value)
108 setattr(challenge, attr, value)
109
110 return DynamicValueChallenge.calculate_value(challenge)
111
112 @classmethod
113 def solve(cls, user, team, challenge, request):
114 super().solve(user, team, challenge, request)
115
116 DynamicValueChallenge.calculate_value(challenge)
117
118
119 def load(app):
120 upgrade(plugin_name="dynamic_challenges")
121 CHALLENGE_CLASSES["dynamic"] = DynamicValueChallenge
122 register_plugin_assets_directory(
123 app, base_path="/plugins/dynamic_challenges/assets/"
124 )
125
[end of CTFd/plugins/dynamic_challenges/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py
--- a/CTFd/plugins/dynamic_challenges/__init__.py
+++ b/CTFd/plugins/dynamic_challenges/__init__.py
@@ -73,6 +73,7 @@
"initial": challenge.initial,
"decay": challenge.decay,
"minimum": challenge.minimum,
+ "function": challenge.function,
"description": challenge.description,
"connection_info": challenge.connection_info,
"next_id": challenge.next_id,
| {"golden_diff": "diff --git a/CTFd/plugins/dynamic_challenges/__init__.py b/CTFd/plugins/dynamic_challenges/__init__.py\n--- a/CTFd/plugins/dynamic_challenges/__init__.py\n+++ b/CTFd/plugins/dynamic_challenges/__init__.py\n@@ -73,6 +73,7 @@\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n+ \"function\": challenge.function,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n", "issue": "Getting dynamic challenges by ID does not return decay function\nWhen getting a dynamic challenge from `GET /api/v1/challenges/<challenge-id>`, the challenge does not return its decay function.\r\n\r\nThis seems to be caused by [this](https://github.com/CTFd/CTFd/blob/master/CTFd/plugins/dynamic_challenges/__init__.py#L60-L90) function which does not include `challenge.function`.\r\n\r\nThis seems be have been missed out when implementing different decay functions for dynamic challenges. This does not have any impact on normal usage of CTFd, but I see no reason not to include the function in the response.\r\n\n", "before_files": [{"content": "from flask import Blueprint\n\nfrom CTFd.models import Challenges, db\nfrom CTFd.plugins import register_plugin_assets_directory\nfrom CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge\nfrom CTFd.plugins.dynamic_challenges.decay import DECAY_FUNCTIONS, logarithmic\nfrom CTFd.plugins.migrations import upgrade\n\n\nclass DynamicChallenge(Challenges):\n __mapper_args__ = {\"polymorphic_identity\": \"dynamic\"}\n id = db.Column(\n db.Integer, db.ForeignKey(\"challenges.id\", ondelete=\"CASCADE\"), primary_key=True\n )\n initial = db.Column(db.Integer, default=0)\n minimum = db.Column(db.Integer, default=0)\n decay = db.Column(db.Integer, default=0)\n function = db.Column(db.String(32), default=\"logarithmic\")\n\n def __init__(self, *args, **kwargs):\n super(DynamicChallenge, self).__init__(**kwargs)\n self.value = kwargs[\"initial\"]\n\n\nclass DynamicValueChallenge(BaseChallenge):\n id = \"dynamic\" # Unique identifier used to register challenges\n name = \"dynamic\" # Name of a challenge type\n templates = (\n { # Handlebars templates used for each aspect of challenge editing & viewing\n \"create\": \"/plugins/dynamic_challenges/assets/create.html\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.html\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.html\",\n }\n )\n scripts = { # Scripts that are loaded when a template is loaded\n \"create\": \"/plugins/dynamic_challenges/assets/create.js\",\n \"update\": \"/plugins/dynamic_challenges/assets/update.js\",\n \"view\": \"/plugins/dynamic_challenges/assets/view.js\",\n }\n # Route at which files are accessible. This must be registered using register_plugin_assets_directory()\n route = \"/plugins/dynamic_challenges/assets/\"\n # Blueprint used to access the static_folder directory.\n blueprint = Blueprint(\n \"dynamic_challenges\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"assets\",\n )\n challenge_model = DynamicChallenge\n\n @classmethod\n def calculate_value(cls, challenge):\n f = DECAY_FUNCTIONS.get(challenge.function, logarithmic)\n value = f(challenge)\n\n challenge.value = value\n db.session.commit()\n return challenge\n\n @classmethod\n def read(cls, challenge):\n \"\"\"\n This method is in used to access the data of a challenge in a format processable by the front end.\n\n :param challenge:\n :return: Challenge object, data dictionary to be returned to the user\n \"\"\"\n challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()\n data = {\n \"id\": challenge.id,\n \"name\": challenge.name,\n \"value\": challenge.value,\n \"initial\": challenge.initial,\n \"decay\": challenge.decay,\n \"minimum\": challenge.minimum,\n \"description\": challenge.description,\n \"connection_info\": challenge.connection_info,\n \"next_id\": challenge.next_id,\n \"category\": challenge.category,\n \"state\": challenge.state,\n \"max_attempts\": challenge.max_attempts,\n \"type\": challenge.type,\n \"type_data\": {\n \"id\": cls.id,\n \"name\": cls.name,\n \"templates\": cls.templates,\n \"scripts\": cls.scripts,\n },\n }\n return data\n\n @classmethod\n def update(cls, challenge, request):\n \"\"\"\n This method is used to update the information associated with a challenge. This should be kept strictly to the\n Challenges table and any child tables.\n\n :param challenge:\n :param request:\n :return:\n \"\"\"\n data = request.form or request.get_json()\n\n for attr, value in data.items():\n # We need to set these to floats so that the next operations don't operate on strings\n if attr in (\"initial\", \"minimum\", \"decay\"):\n value = float(value)\n setattr(challenge, attr, value)\n\n return DynamicValueChallenge.calculate_value(challenge)\n\n @classmethod\n def solve(cls, user, team, challenge, request):\n super().solve(user, team, challenge, request)\n\n DynamicValueChallenge.calculate_value(challenge)\n\n\ndef load(app):\n upgrade(plugin_name=\"dynamic_challenges\")\n CHALLENGE_CLASSES[\"dynamic\"] = DynamicValueChallenge\n register_plugin_assets_directory(\n app, base_path=\"/plugins/dynamic_challenges/assets/\"\n )\n", "path": "CTFd/plugins/dynamic_challenges/__init__.py"}]} | 1,907 | 127 |
gh_patches_debug_9313 | rasdani/github-patches | git_diff | facebookresearch__hydra-1364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Ax-Plugin] Add support for Python 3.9
Python 3.9 support pending on scikit 2.4.0 release. Relevant comment: https://github.com/scikit-learn/scikit-learn/issues/18621#issuecomment-733078676
Related to #1062
</issue>
<code>
[start of plugins/hydra_ax_sweeper/setup.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 # type: ignore
3 from setuptools import find_namespace_packages, setup
4
5 with open("README.md", "r") as fh:
6 LONG_DESC = fh.read()
7 setup(
8 name="hydra-ax-sweeper",
9 version="1.1.0rc1",
10 author="Omry Yadan, Shagun Sodhani",
11 author_email="[email protected], [email protected]",
12 description="Hydra Ax Sweeper plugin",
13 long_description=LONG_DESC,
14 long_description_content_type="text/markdown",
15 url="https://github.com/facebookresearch/hydra/",
16 packages=find_namespace_packages(include=["hydra_plugins.*"]),
17 classifiers=[
18 "License :: OSI Approved :: MIT License",
19 "Programming Language :: Python :: 3.7",
20 "Programming Language :: Python :: 3.8",
21 # "Programming Language :: Python :: 3.9",
22 "Operating System :: POSIX :: Linux",
23 "Operating System :: MacOS",
24 "Development Status :: 4 - Beta",
25 ],
26 install_requires=["hydra-core>=1.0.0", "ax-platform>=0.1.13"],
27 include_package_data=True,
28 )
29
[end of plugins/hydra_ax_sweeper/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py
--- a/plugins/hydra_ax_sweeper/setup.py
+++ b/plugins/hydra_ax_sweeper/setup.py
@@ -18,7 +18,7 @@
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
- # "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.9",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
"Development Status :: 4 - Beta",
| {"golden_diff": "diff --git a/plugins/hydra_ax_sweeper/setup.py b/plugins/hydra_ax_sweeper/setup.py\n--- a/plugins/hydra_ax_sweeper/setup.py\n+++ b/plugins/hydra_ax_sweeper/setup.py\n@@ -18,7 +18,7 @@\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n- # \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n", "issue": "[Ax-Plugin] Add support for Python 3.9\nPython 3.9 support pending on scikit 2.4.0 release. Relevant comment: https://github.com/scikit-learn/scikit-learn/issues/18621#issuecomment-733078676\r\n\r\nRelated to #1062 \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# type: ignore\nfrom setuptools import find_namespace_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESC = fh.read()\n setup(\n name=\"hydra-ax-sweeper\",\n version=\"1.1.0rc1\",\n author=\"Omry Yadan, Shagun Sodhani\",\n author_email=\"[email protected], [email protected]\",\n description=\"Hydra Ax Sweeper plugin\",\n long_description=LONG_DESC,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/hydra/\",\n packages=find_namespace_packages(include=[\"hydra_plugins.*\"]),\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n # \"Programming Language :: Python :: 3.9\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS\",\n \"Development Status :: 4 - Beta\",\n ],\n install_requires=[\"hydra-core>=1.0.0\", \"ax-platform>=0.1.13\"],\n include_package_data=True,\n )\n", "path": "plugins/hydra_ax_sweeper/setup.py"}]} | 942 | 151 |
gh_patches_debug_14560 | rasdani/github-patches | git_diff | python-trio__trio-1527 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
trio.open_signal_receiver should give an error if no arguments are passed
User confusion spotted in the wild: https://gitter.im/python-trio/general?at=5ebfa9d613878c30b581b9fe
</issue>
<code>
[start of trio/_signals.py]
1 import signal
2 from contextlib import contextmanager
3 from collections import OrderedDict
4
5 import trio
6 from ._util import signal_raise, is_main_thread, ConflictDetector
7
8 # Discussion of signal handling strategies:
9 #
10 # - On Windows signals barely exist. There are no options; signal handlers are
11 # the only available API.
12 #
13 # - On Linux signalfd is arguably the natural way. Semantics: signalfd acts as
14 # an *alternative* signal delivery mechanism. The way you use it is to mask
15 # out the relevant signals process-wide (so that they don't get delivered
16 # the normal way), and then when you read from signalfd that actually counts
17 # as delivering it (despite the mask). The problem with this is that we
18 # don't have any reliable way to mask out signals process-wide -- the only
19 # way to do that in Python is to call pthread_sigmask from the main thread
20 # *before starting any other threads*, and as a library we can't really
21 # impose that, and the failure mode is annoying (signals get delivered via
22 # signal handlers whether we want them to or not).
23 #
24 # - on macOS/*BSD, kqueue is the natural way. Semantics: kqueue acts as an
25 # *extra* signal delivery mechanism. Signals are delivered the normal
26 # way, *and* are delivered to kqueue. So you want to set them to SIG_IGN so
27 # that they don't end up pending forever (I guess?). I can't find any actual
28 # docs on how masking and EVFILT_SIGNAL interact. I did see someone note
29 # that if a signal is pending when the kqueue filter is added then you
30 # *don't* get notified of that, which makes sense. But still, we have to
31 # manipulate signal state (e.g. setting SIG_IGN) which as far as Python is
32 # concerned means we have to do this from the main thread.
33 #
34 # So in summary, there don't seem to be any compelling advantages to using the
35 # platform-native signal notification systems; they're kinda nice, but it's
36 # simpler to implement the naive signal-handler-based system once and be
37 # done. (The big advantage would be if there were a reliable way to monitor
38 # for SIGCHLD from outside the main thread and without interfering with other
39 # libraries that also want to monitor for SIGCHLD. But there isn't. I guess
40 # kqueue might give us that, but in kqueue we don't need it, because kqueue
41 # can directly monitor for child process state changes.)
42
43
44 @contextmanager
45 def _signal_handler(signals, handler):
46 original_handlers = {}
47 try:
48 for signum in set(signals):
49 original_handlers[signum] = signal.signal(signum, handler)
50 yield
51 finally:
52 for signum, original_handler in original_handlers.items():
53 signal.signal(signum, original_handler)
54
55
56 class SignalReceiver:
57 def __init__(self):
58 # {signal num: None}
59 self._pending = OrderedDict()
60 self._lot = trio.lowlevel.ParkingLot()
61 self._conflict_detector = ConflictDetector(
62 "only one task can iterate on a signal receiver at a time"
63 )
64 self._closed = False
65
66 def _add(self, signum):
67 if self._closed:
68 signal_raise(signum)
69 else:
70 self._pending[signum] = None
71 self._lot.unpark()
72
73 def _redeliver_remaining(self):
74 # First make sure that any signals still in the delivery pipeline will
75 # get redelivered
76 self._closed = True
77
78 # And then redeliver any that are sitting in pending. This is done
79 # using a weird recursive construct to make sure we process everything
80 # even if some of the handlers raise exceptions.
81 def deliver_next():
82 if self._pending:
83 signum, _ = self._pending.popitem(last=False)
84 try:
85 signal_raise(signum)
86 finally:
87 deliver_next()
88
89 deliver_next()
90
91 # Helper for tests, not public or otherwise used
92 def _pending_signal_count(self):
93 return len(self._pending)
94
95 def __aiter__(self):
96 return self
97
98 async def __anext__(self):
99 if self._closed:
100 raise RuntimeError("open_signal_receiver block already exited")
101 # In principle it would be possible to support multiple concurrent
102 # calls to __anext__, but doing it without race conditions is quite
103 # tricky, and there doesn't seem to be any point in trying.
104 with self._conflict_detector:
105 if not self._pending:
106 await self._lot.park()
107 else:
108 await trio.lowlevel.checkpoint()
109 signum, _ = self._pending.popitem(last=False)
110 return signum
111
112
113 @contextmanager
114 def open_signal_receiver(*signals):
115 """A context manager for catching signals.
116
117 Entering this context manager starts listening for the given signals and
118 returns an async iterator; exiting the context manager stops listening.
119
120 The async iterator blocks until a signal arrives, and then yields it.
121
122 Note that if you leave the ``with`` block while the iterator has
123 unextracted signals still pending inside it, then they will be
124 re-delivered using Python's regular signal handling logic. This avoids a
125 race condition when signals arrives just before we exit the ``with``
126 block.
127
128 Args:
129 signals: the signals to listen for.
130
131 Raises:
132 RuntimeError: if you try to use this anywhere except Python's main
133 thread. (This is a Python limitation.)
134
135 Example:
136
137 A common convention for Unix daemons is that they should reload their
138 configuration when they receive a ``SIGHUP``. Here's a sketch of what
139 that might look like using :func:`open_signal_receiver`::
140
141 with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter:
142 async for signum in signal_aiter:
143 assert signum == signal.SIGHUP
144 reload_configuration()
145
146 """
147 if not is_main_thread():
148 raise RuntimeError(
149 "Sorry, open_signal_receiver is only possible when running in "
150 "Python interpreter's main thread"
151 )
152 token = trio.lowlevel.current_trio_token()
153 queue = SignalReceiver()
154
155 def handler(signum, _):
156 token.run_sync_soon(queue._add, signum, idempotent=True)
157
158 try:
159 with _signal_handler(signals, handler):
160 yield queue
161 finally:
162 queue._redeliver_remaining()
163
[end of trio/_signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/trio/_signals.py b/trio/_signals.py
--- a/trio/_signals.py
+++ b/trio/_signals.py
@@ -129,6 +129,8 @@
signals: the signals to listen for.
Raises:
+ TypeError: if no signals were provided.
+
RuntimeError: if you try to use this anywhere except Python's main
thread. (This is a Python limitation.)
@@ -144,6 +146,9 @@
reload_configuration()
"""
+ if not signals:
+ raise TypeError("No signals were provided")
+
if not is_main_thread():
raise RuntimeError(
"Sorry, open_signal_receiver is only possible when running in "
| {"golden_diff": "diff --git a/trio/_signals.py b/trio/_signals.py\n--- a/trio/_signals.py\n+++ b/trio/_signals.py\n@@ -129,6 +129,8 @@\n signals: the signals to listen for.\n \n Raises:\n+ TypeError: if no signals were provided.\n+\n RuntimeError: if you try to use this anywhere except Python's main\n thread. (This is a Python limitation.)\n \n@@ -144,6 +146,9 @@\n reload_configuration()\n \n \"\"\"\n+ if not signals:\n+ raise TypeError(\"No signals were provided\")\n+\n if not is_main_thread():\n raise RuntimeError(\n \"Sorry, open_signal_receiver is only possible when running in \"\n", "issue": "trio.open_signal_receiver should give an error if no arguments are passed\nUser confusion spotted in the wild: https://gitter.im/python-trio/general?at=5ebfa9d613878c30b581b9fe\r\n\n", "before_files": [{"content": "import signal\nfrom contextlib import contextmanager\nfrom collections import OrderedDict\n\nimport trio\nfrom ._util import signal_raise, is_main_thread, ConflictDetector\n\n# Discussion of signal handling strategies:\n#\n# - On Windows signals barely exist. There are no options; signal handlers are\n# the only available API.\n#\n# - On Linux signalfd is arguably the natural way. Semantics: signalfd acts as\n# an *alternative* signal delivery mechanism. The way you use it is to mask\n# out the relevant signals process-wide (so that they don't get delivered\n# the normal way), and then when you read from signalfd that actually counts\n# as delivering it (despite the mask). The problem with this is that we\n# don't have any reliable way to mask out signals process-wide -- the only\n# way to do that in Python is to call pthread_sigmask from the main thread\n# *before starting any other threads*, and as a library we can't really\n# impose that, and the failure mode is annoying (signals get delivered via\n# signal handlers whether we want them to or not).\n#\n# - on macOS/*BSD, kqueue is the natural way. Semantics: kqueue acts as an\n# *extra* signal delivery mechanism. Signals are delivered the normal\n# way, *and* are delivered to kqueue. So you want to set them to SIG_IGN so\n# that they don't end up pending forever (I guess?). I can't find any actual\n# docs on how masking and EVFILT_SIGNAL interact. I did see someone note\n# that if a signal is pending when the kqueue filter is added then you\n# *don't* get notified of that, which makes sense. But still, we have to\n# manipulate signal state (e.g. setting SIG_IGN) which as far as Python is\n# concerned means we have to do this from the main thread.\n#\n# So in summary, there don't seem to be any compelling advantages to using the\n# platform-native signal notification systems; they're kinda nice, but it's\n# simpler to implement the naive signal-handler-based system once and be\n# done. (The big advantage would be if there were a reliable way to monitor\n# for SIGCHLD from outside the main thread and without interfering with other\n# libraries that also want to monitor for SIGCHLD. But there isn't. I guess\n# kqueue might give us that, but in kqueue we don't need it, because kqueue\n# can directly monitor for child process state changes.)\n\n\n@contextmanager\ndef _signal_handler(signals, handler):\n original_handlers = {}\n try:\n for signum in set(signals):\n original_handlers[signum] = signal.signal(signum, handler)\n yield\n finally:\n for signum, original_handler in original_handlers.items():\n signal.signal(signum, original_handler)\n\n\nclass SignalReceiver:\n def __init__(self):\n # {signal num: None}\n self._pending = OrderedDict()\n self._lot = trio.lowlevel.ParkingLot()\n self._conflict_detector = ConflictDetector(\n \"only one task can iterate on a signal receiver at a time\"\n )\n self._closed = False\n\n def _add(self, signum):\n if self._closed:\n signal_raise(signum)\n else:\n self._pending[signum] = None\n self._lot.unpark()\n\n def _redeliver_remaining(self):\n # First make sure that any signals still in the delivery pipeline will\n # get redelivered\n self._closed = True\n\n # And then redeliver any that are sitting in pending. This is done\n # using a weird recursive construct to make sure we process everything\n # even if some of the handlers raise exceptions.\n def deliver_next():\n if self._pending:\n signum, _ = self._pending.popitem(last=False)\n try:\n signal_raise(signum)\n finally:\n deliver_next()\n\n deliver_next()\n\n # Helper for tests, not public or otherwise used\n def _pending_signal_count(self):\n return len(self._pending)\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n if self._closed:\n raise RuntimeError(\"open_signal_receiver block already exited\")\n # In principle it would be possible to support multiple concurrent\n # calls to __anext__, but doing it without race conditions is quite\n # tricky, and there doesn't seem to be any point in trying.\n with self._conflict_detector:\n if not self._pending:\n await self._lot.park()\n else:\n await trio.lowlevel.checkpoint()\n signum, _ = self._pending.popitem(last=False)\n return signum\n\n\n@contextmanager\ndef open_signal_receiver(*signals):\n \"\"\"A context manager for catching signals.\n\n Entering this context manager starts listening for the given signals and\n returns an async iterator; exiting the context manager stops listening.\n\n The async iterator blocks until a signal arrives, and then yields it.\n\n Note that if you leave the ``with`` block while the iterator has\n unextracted signals still pending inside it, then they will be\n re-delivered using Python's regular signal handling logic. This avoids a\n race condition when signals arrives just before we exit the ``with``\n block.\n\n Args:\n signals: the signals to listen for.\n\n Raises:\n RuntimeError: if you try to use this anywhere except Python's main\n thread. (This is a Python limitation.)\n\n Example:\n\n A common convention for Unix daemons is that they should reload their\n configuration when they receive a ``SIGHUP``. Here's a sketch of what\n that might look like using :func:`open_signal_receiver`::\n\n with trio.open_signal_receiver(signal.SIGHUP) as signal_aiter:\n async for signum in signal_aiter:\n assert signum == signal.SIGHUP\n reload_configuration()\n\n \"\"\"\n if not is_main_thread():\n raise RuntimeError(\n \"Sorry, open_signal_receiver is only possible when running in \"\n \"Python interpreter's main thread\"\n )\n token = trio.lowlevel.current_trio_token()\n queue = SignalReceiver()\n\n def handler(signum, _):\n token.run_sync_soon(queue._add, signum, idempotent=True)\n\n try:\n with _signal_handler(signals, handler):\n yield queue\n finally:\n queue._redeliver_remaining()\n", "path": "trio/_signals.py"}]} | 2,399 | 159 |
gh_patches_debug_30561 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Edit Organization: url is not displayed
When edit an organization which has an url, the url is not displayed in the field
Dataset Creation [BUG]: Can not save multiple countries
1. Dataset creation: When adding multiple countries, it is saving only one
2. Dataset edit: When adding multiple countries, it is saving only one
3. Dataset creation, steps:
a. add one country
b. click on next:add data (no filling other fields)
c. it will reset the country selection (not ok!)
</issue>
<code>
[start of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py]
1 import ckanext.hdx_package.helpers.licenses as hdx_licenses
2
3 import ckan.plugins as plugins
4 import ckan.plugins.toolkit as toolkit
5 import ckan.model.package as package
6 import ckan.model.license as license
7 import pylons.config as config
8 import version
9
10 import ckanext.hdx_package.helpers.caching as caching
11 import ckanext.hdx_theme.helpers.auth as auth
12
13
14 # def run_on_startup():
15 # cache_on_startup = config.get('hdx.cache.onstartup', 'true')
16 # if 'true' == cache_on_startup:
17 # _generate_license_list()
18 # caching.cached_get_group_package_stuff()
19
20
21 # def _generate_license_list():
22 # package.Package._license_register = license.LicenseRegister()
23 # package.Package._license_register.licenses = [
24 # license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
25 # license.License(license.LicenseCreativeCommonsAttribution()),
26 # license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
27 # license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
28 # license.License(hdx_licenses.LicenseHdxMultiple()),
29 # license.License(hdx_licenses.LicenseHdxOther())
30 # ]
31
32 class HDXThemePlugin(plugins.SingletonPlugin):
33 plugins.implements(plugins.IConfigurer)
34 plugins.implements(plugins.IRoutes, inherit=True)
35 plugins.implements(plugins.ITemplateHelpers)
36 plugins.implements(plugins.IActions)
37 plugins.implements(plugins.IAuthFunctions)
38 plugins.implements(plugins.IGroupController, inherit=True)
39 plugins.implements(plugins.IMiddleware, inherit=True)
40
41 def update_config(self, config):
42 toolkit.add_template_directory(config, 'templates')
43 toolkit.add_template_directory(config, 'templates_legacy')
44 toolkit.add_public_directory(config, 'public')
45 toolkit.add_resource('fanstatic', 'hdx_theme')
46
47
48 def before_map(self, map):
49 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
50 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
51 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
52 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
53 #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
54 #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
55
56 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
57 map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
58
59 #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')
60
61 return map
62
63 def create(self, entity):
64 caching.invalidate_group_caches()
65
66 def edit(self, entity):
67 caching.invalidate_group_caches()
68
69 def get_helpers(self):
70 from ckanext.hdx_theme.helpers import helpers as hdx_helpers
71 return {
72 'is_downloadable': hdx_helpers.is_downloadable,
73 'get_facet_items_dict': hdx_helpers.get_facet_items_dict,
74 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
75 'get_filtered_params_list': hdx_helpers.get_filtered_params_list,
76 'get_last_revision_package': hdx_helpers.get_last_revision_package,
77 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
78 'get_last_revision_group': hdx_helpers.get_last_revision_group,
79 'get_group_followers': hdx_helpers.get_group_followers,
80 'get_group_members': hdx_helpers.get_group_members,
81 'markdown_extract_strip': hdx_helpers.markdown_extract_strip,
82 'render_date_from_concat_str': hdx_helpers.render_date_from_concat_str,
83 'hdx_version': hdx_helpers.hdx_version,
84 'hdx_build_nav_icon_with_message': hdx_helpers.hdx_build_nav_icon_with_message,
85 'hdx_num_of_new_related_items': hdx_helpers.hdx_num_of_new_related_items,
86 'hdx_get_extras_element': hdx_helpers.hdx_get_extras_element,
87 'hdx_get_user_info': hdx_helpers.hdx_get_user_info,
88 'hdx_linked_user': hdx_helpers.hdx_linked_user,
89 'hdx_show_singular_plural': hdx_helpers.hdx_show_singular_plural,
90 'hdx_member_roles_list': hdx_helpers.hdx_member_roles_list,
91 'hdx_organizations_available_with_roles': hdx_helpers.hdx_organizations_available_with_roles,
92 'hdx_group_followee_list': hdx_helpers.hdx_group_followee_list,
93 'hdx_remove_schema_and_domain_from_url': hdx_helpers.hdx_remove_schema_and_domain_from_url,
94 'hdx_get_ckan_config': hdx_helpers.hdx_get_ckan_config
95 }
96
97 def get_actions(self):
98 from ckanext.hdx_theme.helpers import actions as hdx_actions
99 return {
100 'organization_list_for_user':hdx_actions.organization_list_for_user,
101 'cached_group_list': hdx_actions.cached_group_list,
102 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,
103 'member_list': hdx_actions.member_list,
104 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins,
105 'hdx_send_new_org_request': hdx_actions.hdx_send_new_org_request,
106 'hdx_send_editor_request_for_org': hdx_actions.hdx_send_editor_request_for_org,
107 'hdx_send_request_membership': hdx_actions.hdx_send_request_membership,
108 'hdx_user_show' : hdx_actions.hdx_user_show
109
110 }
111 def get_auth_functions(self):
112 return {
113 'hdx_basic_user_info': auth.hdx_basic_user_info,
114 'group_member_create': auth.group_member_create,
115 'hdx_send_new_org_request': auth.hdx_send_new_org_request,
116 'hdx_send_editor_request_for_org': auth.hdx_send_editor_request_for_org,
117 'hdx_send_request_membership': auth.hdx_send_request_membership
118 }
119
120 # def make_middleware(self, app, config):
121 # run_on_startup()
122 # return app
123
124
125
126
127
[end of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
@@ -47,13 +47,13 @@
def before_map(self, map):
map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
- map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
- map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
- map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
+ map.connect('/count/dataset', controller='ckanext.hdx_theme.helpers.count:CountController', action='dataset')
+ map.connect('/count/country', controller='ckanext.hdx_theme.helpers.count:CountController', action='country')
+ map.connect('/count/source', controller='ckanext.hdx_theme.helpers.count:CountController', action='source')
#map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
#map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
- map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
+ map.connect('/count/test', controller='ckanext.hdx_theme.helpers.count:CountController', action='test')
map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
#map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -47,13 +47,13 @@\n \n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n- map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n- map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n- map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n+ map.connect('/count/dataset', controller='ckanext.hdx_theme.helpers.count:CountController', action='dataset')\n+ map.connect('/count/country', controller='ckanext.hdx_theme.helpers.count:CountController', action='country')\n+ map.connect('/count/source', controller='ckanext.hdx_theme.helpers.count:CountController', action='source')\n #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n- map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n+ map.connect('/count/test', controller='ckanext.hdx_theme.helpers.count:CountController', action='test')\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n \n #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n", "issue": "Edit Organization: url is not displayed\nWhen edit an organization which has an url, the url is not displayed in the field\n\nDataset Creation [BUG]: Can not save multiple countries \n1. Dataset creation: When adding multiple countries, it is saving only one\n2. Dataset edit: When adding multiple countries, it is saving only one\n3. Dataset creation, steps:\n a. add one country\n b. click on next:add data (no filling other fields)\n c. it will reset the country selection (not ok!)\n\n", "before_files": [{"content": "import ckanext.hdx_package.helpers.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport pylons.config as config\nimport version\n\nimport ckanext.hdx_package.helpers.caching as caching\nimport ckanext.hdx_theme.helpers.auth as auth\n\n\n# def run_on_startup():\n# cache_on_startup = config.get('hdx.cache.onstartup', 'true')\n# if 'true' == cache_on_startup:\n# _generate_license_list()\n# caching.cached_get_group_package_stuff()\n\n\n# def _generate_license_list():\n# package.Package._license_register = license.LicenseRegister() \n# package.Package._license_register.licenses = [\n# license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n# license.License(license.LicenseCreativeCommonsAttribution()),\n# license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n# license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n# license.License(hdx_licenses.LicenseHdxMultiple()),\n# license.License(hdx_licenses.LicenseHdxOther())\n# ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_template_directory(config, 'templates_legacy')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n #map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n #map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n\n #map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme.helpers import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict': hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list': hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package': hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group': hdx_helpers.get_last_revision_group,\n 'get_group_followers': hdx_helpers.get_group_followers,\n 'get_group_members': hdx_helpers.get_group_members,\n 'markdown_extract_strip': hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str': hdx_helpers.render_date_from_concat_str,\n 'hdx_version': hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message': hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items': hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element': hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info': hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user': hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural': hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list': hdx_helpers.hdx_member_roles_list,\n 'hdx_organizations_available_with_roles': hdx_helpers.hdx_organizations_available_with_roles,\n 'hdx_group_followee_list': hdx_helpers.hdx_group_followee_list,\n 'hdx_remove_schema_and_domain_from_url': hdx_helpers.hdx_remove_schema_and_domain_from_url,\n 'hdx_get_ckan_config': hdx_helpers.hdx_get_ckan_config\n }\n \n def get_actions(self):\n from ckanext.hdx_theme.helpers import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins,\n 'hdx_send_new_org_request': hdx_actions.hdx_send_new_org_request,\n 'hdx_send_editor_request_for_org': hdx_actions.hdx_send_editor_request_for_org,\n 'hdx_send_request_membership': hdx_actions.hdx_send_request_membership,\n 'hdx_user_show' : hdx_actions.hdx_user_show\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info,\n 'group_member_create': auth.group_member_create,\n 'hdx_send_new_org_request': auth.hdx_send_new_org_request,\n 'hdx_send_editor_request_for_org': auth.hdx_send_editor_request_for_org,\n 'hdx_send_request_membership': auth.hdx_send_request_membership\n }\n \n # def make_middleware(self, app, config):\n # run_on_startup()\n # return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]} | 2,369 | 463 |
gh_patches_debug_34844 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-375 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ENH] Naming conventions
I wanted to discuss naming conventions for the various functions and arguments for consistency. `expand_column` has a parameter `column` whereas `add_column` had `col_name`.
Also, is `_column` necessary in each function? Would it be ok to just have an `add()` or `transform()` method? In general I'm more on the side of more verbose function names, but just wanted to throw the idea out there.
Similarly, when following the format of `limit_column_characters` functions like `change_type` should probably be named `change_column_type`.
I'm not married to any of this (except the function arguments, those should be consistent) but wanted to get peoples' ideas.
</issue>
<code>
[start of janitor/io.py]
1 import os
2 from glob import glob
3
4 import pandas as pd
5
6
7 def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):
8 """
9 :param filespath: The filepath pattern matching the CSVs files.
10 Accepts regular expressions, with or without csv extension.
11 :param seperate_df: If False (default) returns a single Dataframe
12 with the concatenation of the csv files.
13 If True, returns a dictionary of seperate dataframes
14 for each CSV file.
15 :param kwargs: Keyword arguments to pass into the
16 original pandas `read_csv`.
17 """
18 # Sanitize input
19 assert filespath is not None
20 assert len(filespath) != 0
21
22 # Read the csv files
23 dfs = {
24 os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)
25 }
26 # Check if dataframes have been read
27 if len(dfs) == 0:
28 raise ValueError("No CSV files to read with the given filespath")
29 # Concatenate the dataframes if requested (default)
30 col_names = list(dfs.values())[0].columns
31 if not seperate_df:
32 # If columns do not match raise an error
33 for df in dfs.values():
34 if not all(df.columns == col_names):
35 raise ValueError(
36 "Columns in input CSV files do not match."
37 "Files cannot be concatenated"
38 )
39 return pd.concat(list(dfs.values()), ignore_index=True, sort=False)
40 else:
41 return dfs
42
[end of janitor/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/io.py b/janitor/io.py
--- a/janitor/io.py
+++ b/janitor/io.py
@@ -1,34 +1,49 @@
import os
from glob import glob
+from typing import Iterable, Union
import pandas as pd
+from .utils import deprecated_alias
-def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):
+
+@deprecated_alias(seperate_df="separate_df", filespath="files_path")
+def read_csvs(
+ files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs
+) -> Union[pd.DataFrame, dict]:
"""
- :param filespath: The filepath pattern matching the CSVs files.
+ :param files_path: The filepath pattern matching the CSVs files.
Accepts regular expressions, with or without csv extension.
- :param seperate_df: If False (default) returns a single Dataframe
+ Also accepts iterable of file paths.
+ :param separate_df: If False (default) returns a single Dataframe
with the concatenation of the csv files.
- If True, returns a dictionary of seperate dataframes
+ If True, returns a dictionary of separate dataframes
for each CSV file.
:param kwargs: Keyword arguments to pass into the
original pandas `read_csv`.
"""
# Sanitize input
- assert filespath is not None
- assert len(filespath) != 0
+ assert files_path is not None
+ assert len(files_path) != 0
# Read the csv files
- dfs = {
- os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)
- }
+ # String to file/folder or file pattern provided
+ if isinstance(files_path, str):
+ dfs = {
+ os.path.basename(f): pd.read_csv(f, **kwargs)
+ for f in glob(files_path)
+ }
+ # Iterable of file paths provided
+ else:
+ dfs = {
+ os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path
+ }
# Check if dataframes have been read
if len(dfs) == 0:
- raise ValueError("No CSV files to read with the given filespath")
+ raise ValueError("No CSV files to read with the given `files_path`")
# Concatenate the dataframes if requested (default)
col_names = list(dfs.values())[0].columns
- if not seperate_df:
+ if not separate_df:
# If columns do not match raise an error
for df in dfs.values():
if not all(df.columns == col_names):
| {"golden_diff": "diff --git a/janitor/io.py b/janitor/io.py\n--- a/janitor/io.py\n+++ b/janitor/io.py\n@@ -1,34 +1,49 @@\n import os\n from glob import glob\n+from typing import Iterable, Union\n \n import pandas as pd\n \n+from .utils import deprecated_alias\n \n-def read_csvs(filespath: str, seperate_df: bool = False, **kwargs):\n+\n+@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\n+def read_csvs(\n+ files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n+) -> Union[pd.DataFrame, dict]:\n \"\"\"\n- :param filespath: The filepath pattern matching the CSVs files.\n+ :param files_path: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n- :param seperate_df: If False (default) returns a single Dataframe\n+ Also accepts iterable of file paths.\n+ :param separate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n- If True, returns a dictionary of seperate dataframes\n+ If True, returns a dictionary of separate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n- assert filespath is not None\n- assert len(filespath) != 0\n+ assert files_path is not None\n+ assert len(files_path) != 0\n \n # Read the csv files\n- dfs = {\n- os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)\n- }\n+ # String to file/folder or file pattern provided\n+ if isinstance(files_path, str):\n+ dfs = {\n+ os.path.basename(f): pd.read_csv(f, **kwargs)\n+ for f in glob(files_path)\n+ }\n+ # Iterable of file paths provided\n+ else:\n+ dfs = {\n+ os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n+ }\n # Check if dataframes have been read\n if len(dfs) == 0:\n- raise ValueError(\"No CSV files to read with the given filespath\")\n+ raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n- if not seperate_df:\n+ if not separate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n", "issue": "[ENH] Naming conventions\nI wanted to discuss naming conventions for the various functions and arguments for consistency. `expand_column` has a parameter `column` whereas `add_column` had `col_name`.\r\n\r\nAlso, is `_column` necessary in each function? Would it be ok to just have an `add()` or `transform()` method? In general I'm more on the side of more verbose function names, but just wanted to throw the idea out there.\r\n\r\nSimilarly, when following the format of `limit_column_characters` functions like `change_type` should probably be named `change_column_type`.\r\n\r\nI'm not married to any of this (except the function arguments, those should be consistent) but wanted to get peoples' ideas.\n", "before_files": [{"content": "import os\nfrom glob import glob\n\nimport pandas as pd\n\n\ndef read_csvs(filespath: str, seperate_df: bool = False, **kwargs):\n \"\"\"\n :param filespath: The filepath pattern matching the CSVs files.\n Accepts regular expressions, with or without csv extension.\n :param seperate_df: If False (default) returns a single Dataframe\n with the concatenation of the csv files.\n If True, returns a dictionary of seperate dataframes\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n \"\"\"\n # Sanitize input\n assert filespath is not None\n assert len(filespath) != 0\n\n # Read the csv files\n dfs = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in glob(filespath)\n }\n # Check if dataframes have been read\n if len(dfs) == 0:\n raise ValueError(\"No CSV files to read with the given filespath\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs.values())[0].columns\n if not seperate_df:\n # If columns do not match raise an error\n for df in dfs.values():\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(list(dfs.values()), ignore_index=True, sort=False)\n else:\n return dfs\n", "path": "janitor/io.py"}]} | 1,088 | 601 |
gh_patches_debug_2608 | rasdani/github-patches | git_diff | ultralytics__yolov5-296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects
Hi,
I meet a problem:
Traceback (most recent call last):
File "train.py", line 394, in <module>
train(hyp)
File "train.py", line 331, in train
torch.save(ckpt, last)
File "/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py", line 328, in save
_legacy_save(obj, opened_file, pickle_module, pickle_protocol)
File "/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py", line 401, in _legacy_save
pickler.dump(obj)
**TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects**
Thanks!
environment:
ubuntu 16.04
GPU 2080Ti *4
pytorch 1.4.0
</issue>
<code>
[start of utils/torch_utils.py]
1 import math
2 import os
3 import time
4 from copy import deepcopy
5
6 import torch
7 import torch.backends.cudnn as cudnn
8 import torch.nn as nn
9 import torch.nn.functional as F
10 import torchvision.models as models
11
12
13 def init_seeds(seed=0):
14 torch.manual_seed(seed)
15
16 # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html
17 if seed == 0: # slower, more reproducible
18 cudnn.deterministic = True
19 cudnn.benchmark = False
20 else: # faster, less reproducible
21 cudnn.deterministic = False
22 cudnn.benchmark = True
23
24
25 def select_device(device='', apex=False, batch_size=None):
26 # device = 'cpu' or '0' or '0,1,2,3'
27 cpu_request = device.lower() == 'cpu'
28 if device and not cpu_request: # if device requested other than 'cpu'
29 os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
30 assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity
31
32 cuda = False if cpu_request else torch.cuda.is_available()
33 if cuda:
34 c = 1024 ** 2 # bytes to MB
35 ng = torch.cuda.device_count()
36 if ng > 1 and batch_size: # check that batch_size is compatible with device_count
37 assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)
38 x = [torch.cuda.get_device_properties(i) for i in range(ng)]
39 s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex
40 for i in range(0, ng):
41 if i == 1:
42 s = ' ' * len(s)
43 print("%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)" %
44 (s, i, x[i].name, x[i].total_memory / c))
45 else:
46 print('Using CPU')
47
48 print('') # skip a line
49 return torch.device('cuda:0' if cuda else 'cpu')
50
51
52 def time_synchronized():
53 torch.cuda.synchronize() if torch.cuda.is_available() else None
54 return time.time()
55
56
57 def is_parallel(model):
58 # is model is parallel with DP or DDP
59 return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
60
61
62 def initialize_weights(model):
63 for m in model.modules():
64 t = type(m)
65 if t is nn.Conv2d:
66 pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
67 elif t is nn.BatchNorm2d:
68 m.eps = 1e-4
69 m.momentum = 0.03
70 elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:
71 m.inplace = True
72
73
74 def find_modules(model, mclass=nn.Conv2d):
75 # finds layer indices matching module class 'mclass'
76 return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]
77
78
79 def fuse_conv_and_bn(conv, bn):
80 # https://tehnokv.com/posts/fusing-batchnorm-and-conv/
81 with torch.no_grad():
82 # init
83 fusedconv = torch.nn.Conv2d(conv.in_channels,
84 conv.out_channels,
85 kernel_size=conv.kernel_size,
86 stride=conv.stride,
87 padding=conv.padding,
88 bias=True)
89
90 # prepare filters
91 w_conv = conv.weight.clone().view(conv.out_channels, -1)
92 w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
93 fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
94
95 # prepare spatial bias
96 if conv.bias is not None:
97 b_conv = conv.bias
98 else:
99 b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)
100 b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
101 fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
102
103 return fusedconv
104
105
106 def model_info(model, verbose=False):
107 # Plots a line-by-line description of a PyTorch model
108 n_p = sum(x.numel() for x in model.parameters()) # number parameters
109 n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
110 if verbose:
111 print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
112 for i, (name, p) in enumerate(model.named_parameters()):
113 name = name.replace('module_list.', '')
114 print('%5g %40s %9s %12g %20s %10.3g %10.3g' %
115 (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
116
117 try: # FLOPS
118 from thop import profile
119 flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2
120 fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS
121 except:
122 fs = ''
123
124 print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))
125
126
127 def load_classifier(name='resnet101', n=2):
128 # Loads a pretrained model reshaped to n-class output
129 model = models.__dict__[name](pretrained=True)
130
131 # Display model properties
132 input_size = [3, 224, 224]
133 input_space = 'RGB'
134 input_range = [0, 1]
135 mean = [0.485, 0.456, 0.406]
136 std = [0.229, 0.224, 0.225]
137 for x in [input_size, input_space, input_range, mean, std]:
138 print(x + ' =', eval(x))
139
140 # Reshape output to n classes
141 filters = model.fc.weight.shape[1]
142 model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)
143 model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)
144 model.fc.out_features = n
145 return model
146
147
148 def scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio
149 # scales img(bs,3,y,x) by ratio
150 h, w = img.shape[2:]
151 s = (int(h * ratio), int(w * ratio)) # new size
152 img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize
153 if not same_shape: # pad/crop img
154 gs = 32 # (pixels) grid size
155 h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]
156 return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean
157
158
159 class ModelEMA:
160 """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models
161 Keep a moving average of everything in the model state_dict (parameters and buffers).
162 This is intended to allow functionality like
163 https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
164 A smoothed version of the weights is necessary for some training schemes to perform well.
165 E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
166 RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
167 smoothing of weights to match results. Pay attention to the decay constant you are using
168 relative to your update count per epoch.
169 To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
170 disable validation of the EMA weights. Validation will have to be done manually in a separate
171 process, or after the training stops converging.
172 This class is sensitive where it is initialized in the sequence of model init,
173 GPU assignment and distributed training wrappers.
174 I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
175 """
176
177 def __init__(self, model, decay=0.9999, device=''):
178 # Create EMA
179 self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA
180 self.ema.eval()
181 self.updates = 0 # number of EMA updates
182 self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)
183 self.device = device # perform ema on different device from model if set
184 if device:
185 self.ema.to(device)
186 for p in self.ema.parameters():
187 p.requires_grad_(False)
188
189 def update(self, model):
190 # Update EMA parameters
191 with torch.no_grad():
192 self.updates += 1
193 d = self.decay(self.updates)
194
195 msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict
196 for k, v in self.ema.state_dict().items():
197 if v.dtype.is_floating_point:
198 v *= d
199 v += (1. - d) * msd[k].detach()
200
201 def update_attr(self, model):
202 # Update EMA attributes
203 for k, v in model.__dict__.items():
204 if not k.startswith('_') and k != 'module':
205 setattr(self.ema, k, v)
206
[end of utils/torch_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/torch_utils.py b/utils/torch_utils.py
--- a/utils/torch_utils.py
+++ b/utils/torch_utils.py
@@ -201,5 +201,5 @@
def update_attr(self, model):
# Update EMA attributes
for k, v in model.__dict__.items():
- if not k.startswith('_') and k != 'module':
+ if not k.startswith('_') and k not in ["process_group", "reducer"]:
setattr(self.ema, k, v)
| {"golden_diff": "diff --git a/utils/torch_utils.py b/utils/torch_utils.py\n--- a/utils/torch_utils.py\n+++ b/utils/torch_utils.py\n@@ -201,5 +201,5 @@\n def update_attr(self, model):\n # Update EMA attributes\n for k, v in model.__dict__.items():\n- if not k.startswith('_') and k != 'module':\n+ if not k.startswith('_') and k not in [\"process_group\", \"reducer\"]:\n setattr(self.ema, k, v)\n", "issue": "TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects\nHi\uff0c\r\nI meet a problem:\r\n\r\nTraceback (most recent call last):\r\n File \"train.py\", line 394, in <module>\r\n train(hyp)\r\n File \"train.py\", line 331, in train\r\n torch.save(ckpt, last)\r\n File \"/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py\", line 328, in save\r\n _legacy_save(obj, opened_file, pickle_module, pickle_protocol)\r\n File \"/home/yy/anaconda3/lib/python3.6/site-packages/torch/serialization.py\", line 401, in _legacy_save\r\n pickler.dump(obj)\r\n**TypeError: can't pickle torch.distributed.ProcessGroupNCCL objects**\r\n\r\nThanks!\r\n\r\nenvironment:\r\nubuntu 16.04\r\nGPU 2080Ti *4\r\npytorch 1.4.0\r\n\n", "before_files": [{"content": "import math\nimport os\nimport time\nfrom copy import deepcopy\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\n\ndef init_seeds(seed=0):\n torch.manual_seed(seed)\n\n # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html\n if seed == 0: # slower, more reproducible\n cudnn.deterministic = True\n cudnn.benchmark = False\n else: # faster, less reproducible\n cudnn.deterministic = False\n cudnn.benchmark = True\n\n\ndef select_device(device='', apex=False, batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n cpu_request = device.lower() == 'cpu'\n if device and not cpu_request: # if device requested other than 'cpu'\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), 'CUDA unavailable, invalid device %s requested' % device # check availablity\n\n cuda = False if cpu_request else torch.cuda.is_available()\n if cuda:\n c = 1024 ** 2 # bytes to MB\n ng = torch.cuda.device_count()\n if ng > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % ng == 0, 'batch-size %g not multiple of GPU count %g' % (batch_size, ng)\n x = [torch.cuda.get_device_properties(i) for i in range(ng)]\n s = 'Using CUDA ' + ('Apex ' if apex else '') # apex for mixed precision https://github.com/NVIDIA/apex\n for i in range(0, ng):\n if i == 1:\n s = ' ' * len(s)\n print(\"%sdevice%g _CudaDeviceProperties(name='%s', total_memory=%dMB)\" %\n (s, i, x[i].name, x[i].total_memory / c))\n else:\n print('Using CPU')\n\n print('') # skip a line\n return torch.device('cuda:0' if cuda else 'cpu')\n\n\ndef time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()\n\n\ndef is_parallel(model):\n # is model is parallel with DP or DDP\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)\n\n\ndef initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-4\n m.momentum = 0.03\n elif t in [nn.LeakyReLU, nn.ReLU, nn.ReLU6]:\n m.inplace = True\n\n\ndef find_modules(model, mclass=nn.Conv2d):\n # finds layer indices matching module class 'mclass'\n return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)]\n\n\ndef fuse_conv_and_bn(conv, bn):\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n with torch.no_grad():\n # init\n fusedconv = torch.nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n bias=True)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\n\n # prepare spatial bias\n if conv.bias is not None:\n b_conv = conv.bias\n else:\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device)\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, verbose=False):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n flops = profile(deepcopy(model), inputs=(torch.zeros(1, 3, 64, 64),), verbose=False)[0] / 1E9 * 2\n fs = ', %.1f GFLOPS' % (flops * 100) # 640x640 FLOPS\n except:\n fs = ''\n\n print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))\n\n\ndef load_classifier(name='resnet101', n=2):\n # Loads a pretrained model reshaped to n-class output\n model = models.__dict__[name](pretrained=True)\n\n # Display model properties\n input_size = [3, 224, 224]\n input_space = 'RGB'\n input_range = [0, 1]\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n for x in [input_size, input_space, input_range, mean, std]:\n print(x + ' =', eval(x))\n\n # Reshape output to n classes\n filters = model.fc.weight.shape[1]\n model.fc.bias = torch.nn.Parameter(torch.zeros(n), requires_grad=True)\n model.fc.weight = torch.nn.Parameter(torch.zeros(n, filters), requires_grad=True)\n model.fc.out_features = n\n return model\n\n\ndef scale_img(img, ratio=1.0, same_shape=False): # img(16,3,256,416), r=ratio\n # scales img(bs,3,y,x) by ratio\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n gs = 32 # (pixels) grid size\n h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)]\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean\n\n\nclass ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use\n RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA\n smoothing of weights to match results. Pay attention to the decay constant you are using\n relative to your update count per epoch.\n To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but\n disable validation of the EMA weights. Validation will have to be done manually in a separate\n process, or after the training stops converging.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, device=''):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model) # FP32 EMA\n self.ema.eval()\n self.updates = 0 # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n self.device = device # perform ema on different device from model if set\n if device:\n self.ema.to(device)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model):\n # Update EMA attributes\n for k, v in model.__dict__.items():\n if not k.startswith('_') and k != 'module':\n setattr(self.ema, k, v)\n", "path": "utils/torch_utils.py"}]} | 3,584 | 117 |
gh_patches_debug_40082 | rasdani/github-patches | git_diff | huggingface__diffusers-6192 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[IP-Adapter] add a 'unload_ip_adapter' method
fix https://github.com/huggingface/diffusers/issues/6088
Is anyone interested in adding a `unload_ip_adapter` method? it needs to undo everything we did with `load_ip_adapter` method:
https://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/ip_adapter.py#L46
It needs to do below 3 things:
1. remove `image_encoder`
2. undo the changes we made to the UNet structure here (https://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/unet.py#L82), i.e.
```python
self.encoder_hid_proj = None
self.config.encoder_hid_dim_type = None
```
3. reset the attention processor
</issue>
<code>
[start of src/diffusers/loaders/ip_adapter.py]
1 # Copyright 2023 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Dict, Union
16
17 import torch
18 from huggingface_hub.utils import validate_hf_hub_args
19 from safetensors import safe_open
20
21 from ..utils import (
22 _get_model_file,
23 is_transformers_available,
24 logging,
25 )
26
27
28 if is_transformers_available():
29 from transformers import (
30 CLIPImageProcessor,
31 CLIPVisionModelWithProjection,
32 )
33
34 from ..models.attention_processor import (
35 IPAdapterAttnProcessor,
36 IPAdapterAttnProcessor2_0,
37 )
38
39 logger = logging.get_logger(__name__)
40
41
42 class IPAdapterMixin:
43 """Mixin for handling IP Adapters."""
44
45 @validate_hf_hub_args
46 def load_ip_adapter(
47 self,
48 pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
49 subfolder: str,
50 weight_name: str,
51 **kwargs,
52 ):
53 """
54 Parameters:
55 pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
56 Can be either:
57
58 - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
59 the Hub.
60 - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
61 with [`ModelMixin.save_pretrained`].
62 - A [torch state
63 dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
64
65 cache_dir (`Union[str, os.PathLike]`, *optional*):
66 Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
67 is not used.
68 force_download (`bool`, *optional*, defaults to `False`):
69 Whether or not to force the (re-)download of the model weights and configuration files, overriding the
70 cached versions if they exist.
71 resume_download (`bool`, *optional*, defaults to `False`):
72 Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
73 incompletely downloaded files are deleted.
74 proxies (`Dict[str, str]`, *optional*):
75 A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
76 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
77 local_files_only (`bool`, *optional*, defaults to `False`):
78 Whether to only load local model weights and configuration files or not. If set to `True`, the model
79 won't be downloaded from the Hub.
80 token (`str` or *bool*, *optional*):
81 The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
82 `diffusers-cli login` (stored in `~/.huggingface`) is used.
83 revision (`str`, *optional*, defaults to `"main"`):
84 The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
85 allowed by Git.
86 subfolder (`str`, *optional*, defaults to `""`):
87 The subfolder location of a model file within a larger model repository on the Hub or locally.
88 """
89
90 # Load the main state dict first.
91 cache_dir = kwargs.pop("cache_dir", None)
92 force_download = kwargs.pop("force_download", False)
93 resume_download = kwargs.pop("resume_download", False)
94 proxies = kwargs.pop("proxies", None)
95 local_files_only = kwargs.pop("local_files_only", None)
96 token = kwargs.pop("token", None)
97 revision = kwargs.pop("revision", None)
98
99 user_agent = {
100 "file_type": "attn_procs_weights",
101 "framework": "pytorch",
102 }
103
104 if not isinstance(pretrained_model_name_or_path_or_dict, dict):
105 model_file = _get_model_file(
106 pretrained_model_name_or_path_or_dict,
107 weights_name=weight_name,
108 cache_dir=cache_dir,
109 force_download=force_download,
110 resume_download=resume_download,
111 proxies=proxies,
112 local_files_only=local_files_only,
113 token=token,
114 revision=revision,
115 subfolder=subfolder,
116 user_agent=user_agent,
117 )
118 if weight_name.endswith(".safetensors"):
119 state_dict = {"image_proj": {}, "ip_adapter": {}}
120 with safe_open(model_file, framework="pt", device="cpu") as f:
121 for key in f.keys():
122 if key.startswith("image_proj."):
123 state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
124 elif key.startswith("ip_adapter."):
125 state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
126 else:
127 state_dict = torch.load(model_file, map_location="cpu")
128 else:
129 state_dict = pretrained_model_name_or_path_or_dict
130
131 keys = list(state_dict.keys())
132 if keys != ["image_proj", "ip_adapter"]:
133 raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
134
135 # load CLIP image encoer here if it has not been registered to the pipeline yet
136 if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
137 if not isinstance(pretrained_model_name_or_path_or_dict, dict):
138 logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
139 image_encoder = CLIPVisionModelWithProjection.from_pretrained(
140 pretrained_model_name_or_path_or_dict,
141 subfolder=os.path.join(subfolder, "image_encoder"),
142 ).to(self.device, dtype=self.dtype)
143 self.image_encoder = image_encoder
144 else:
145 raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
146
147 # create feature extractor if it has not been registered to the pipeline yet
148 if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
149 self.feature_extractor = CLIPImageProcessor()
150
151 # load ip-adapter into unet
152 self.unet._load_ip_adapter_weights(state_dict)
153
154 def set_ip_adapter_scale(self, scale):
155 for attn_processor in self.unet.attn_processors.values():
156 if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
157 attn_processor.scale = scale
158
[end of src/diffusers/loaders/ip_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py
--- a/src/diffusers/loaders/ip_adapter.py
+++ b/src/diffusers/loaders/ip_adapter.py
@@ -132,7 +132,7 @@
if keys != ["image_proj", "ip_adapter"]:
raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
- # load CLIP image encoer here if it has not been registered to the pipeline yet
+ # load CLIP image encoder here if it has not been registered to the pipeline yet
if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
if not isinstance(pretrained_model_name_or_path_or_dict, dict):
logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
@@ -141,12 +141,14 @@
subfolder=os.path.join(subfolder, "image_encoder"),
).to(self.device, dtype=self.dtype)
self.image_encoder = image_encoder
+ self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
else:
raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
# create feature extractor if it has not been registered to the pipeline yet
if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
self.feature_extractor = CLIPImageProcessor()
+ self.register_to_config(feature_extractor=["transformers", "CLIPImageProcessor"])
# load ip-adapter into unet
self.unet._load_ip_adapter_weights(state_dict)
@@ -155,3 +157,32 @@
for attn_processor in self.unet.attn_processors.values():
if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
attn_processor.scale = scale
+
+ def unload_ip_adapter(self):
+ """
+ Unloads the IP Adapter weights
+
+ Examples:
+
+ ```python
+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
+ >>> pipeline.unload_ip_adapter()
+ >>> ...
+ ```
+ """
+ # remove CLIP image encoder
+ if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
+ self.image_encoder = None
+ self.register_to_config(image_encoder=[None, None])
+
+ # remove feature extractor
+ if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
+ self.feature_extractor = None
+ self.register_to_config(feature_extractor=[None, None])
+
+ # remove hidden encoder
+ self.unet.encoder_hid_proj = None
+ self.config.encoder_hid_dim_type = None
+
+ # restore original Unet attention processors layers
+ self.unet.set_default_attn_processor()
| {"golden_diff": "diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py\n--- a/src/diffusers/loaders/ip_adapter.py\n+++ b/src/diffusers/loaders/ip_adapter.py\n@@ -132,7 +132,7 @@\n if keys != [\"image_proj\", \"ip_adapter\"]:\n raise ValueError(\"Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.\")\n \n- # load CLIP image encoer here if it has not been registered to the pipeline yet\n+ # load CLIP image encoder here if it has not been registered to the pipeline yet\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is None:\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n@@ -141,12 +141,14 @@\n subfolder=os.path.join(subfolder, \"image_encoder\"),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n+ self.register_to_config(image_encoder=[\"transformers\", \"CLIPVisionModelWithProjection\"])\n else:\n raise ValueError(\"`image_encoder` cannot be None when using IP Adapters.\")\n \n # create feature extractor if it has not been registered to the pipeline yet\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is None:\n self.feature_extractor = CLIPImageProcessor()\n+ self.register_to_config(feature_extractor=[\"transformers\", \"CLIPImageProcessor\"])\n \n # load ip-adapter into unet\n self.unet._load_ip_adapter_weights(state_dict)\n@@ -155,3 +157,32 @@\n for attn_processor in self.unet.attn_processors.values():\n if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):\n attn_processor.scale = scale\n+\n+ def unload_ip_adapter(self):\n+ \"\"\"\n+ Unloads the IP Adapter weights\n+\n+ Examples:\n+\n+ ```python\n+ >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.\n+ >>> pipeline.unload_ip_adapter()\n+ >>> ...\n+ ```\n+ \"\"\"\n+ # remove CLIP image encoder\n+ if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is not None:\n+ self.image_encoder = None\n+ self.register_to_config(image_encoder=[None, None])\n+\n+ # remove feature extractor\n+ if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is not None:\n+ self.feature_extractor = None\n+ self.register_to_config(feature_extractor=[None, None])\n+\n+ # remove hidden encoder\n+ self.unet.encoder_hid_proj = None\n+ self.config.encoder_hid_dim_type = None\n+\n+ # restore original Unet attention processors layers\n+ self.unet.set_default_attn_processor()\n", "issue": "[IP-Adapter] add a 'unload_ip_adapter' method\nfix https://github.com/huggingface/diffusers/issues/6088\r\n\r\nIs anyone interested in adding a `unload_ip_adapter` method? it needs to undo everything we did with `load_ip_adapter` method:\r\nhttps://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/ip_adapter.py#L46\r\n\r\nIt needs to do below 3 things:\r\n1. remove `image_encoder`\r\n2. undo the changes we made to the UNet structure here (https://github.com/huggingface/diffusers/blob/93ea26f272f69a0e27afaebc96b68a2221a7eda0/src/diffusers/loaders/unet.py#L82), i.e. \r\n ```python\r\n self.encoder_hid_proj = None\r\n self.config.encoder_hid_dim_type = None\r\n ```\r\n3. reset the attention processor \n", "before_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Dict, Union\n\nimport torch\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom safetensors import safe_open\n\nfrom ..utils import (\n _get_model_file,\n is_transformers_available,\n logging,\n)\n\n\nif is_transformers_available():\n from transformers import (\n CLIPImageProcessor,\n CLIPVisionModelWithProjection,\n )\n\n from ..models.attention_processor import (\n IPAdapterAttnProcessor,\n IPAdapterAttnProcessor2_0,\n )\n\nlogger = logging.get_logger(__name__)\n\n\nclass IPAdapterMixin:\n \"\"\"Mixin for handling IP Adapters.\"\"\"\n\n @validate_hf_hub_args\n def load_ip_adapter(\n self,\n pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],\n subfolder: str,\n weight_name: str,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`ModelMixin.save_pretrained`].\n - A [torch state\n dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n \"\"\"\n\n # Load the main state dict first.\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", None)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n user_agent = {\n \"file_type\": \"attn_procs_weights\",\n \"framework\": \"pytorch\",\n }\n\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n model_file = _get_model_file(\n pretrained_model_name_or_path_or_dict,\n weights_name=weight_name,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n )\n if weight_name.endswith(\".safetensors\"):\n state_dict = {\"image_proj\": {}, \"ip_adapter\": {}}\n with safe_open(model_file, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n if key.startswith(\"image_proj.\"):\n state_dict[\"image_proj\"][key.replace(\"image_proj.\", \"\")] = f.get_tensor(key)\n elif key.startswith(\"ip_adapter.\"):\n state_dict[\"ip_adapter\"][key.replace(\"ip_adapter.\", \"\")] = f.get_tensor(key)\n else:\n state_dict = torch.load(model_file, map_location=\"cpu\")\n else:\n state_dict = pretrained_model_name_or_path_or_dict\n\n keys = list(state_dict.keys())\n if keys != [\"image_proj\", \"ip_adapter\"]:\n raise ValueError(\"Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.\")\n\n # load CLIP image encoer here if it has not been registered to the pipeline yet\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is None:\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n image_encoder = CLIPVisionModelWithProjection.from_pretrained(\n pretrained_model_name_or_path_or_dict,\n subfolder=os.path.join(subfolder, \"image_encoder\"),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n else:\n raise ValueError(\"`image_encoder` cannot be None when using IP Adapters.\")\n\n # create feature extractor if it has not been registered to the pipeline yet\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is None:\n self.feature_extractor = CLIPImageProcessor()\n\n # load ip-adapter into unet\n self.unet._load_ip_adapter_weights(state_dict)\n\n def set_ip_adapter_scale(self, scale):\n for attn_processor in self.unet.attn_processors.values():\n if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):\n attn_processor.scale = scale\n", "path": "src/diffusers/loaders/ip_adapter.py"}]} | 2,687 | 657 |
gh_patches_debug_2105 | rasdani/github-patches | git_diff | pallets__click-123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automatically dedent help text of options?
```
import click
@click.command()
@click.option('--foo', help="""
heyho
i am
multiline
""")
def cli(foo):
click.echo(foo)
cli()
```
This currently does not remove the leading whitespace from each paragraph in the help text for `--foo`:
```
untitaker@untibox:~/projects/click, branch master
$ python lol.py --help
Usage: lol.py [OPTIONS]
Options:
--foo TEXT
heyho
i am
multiline
--help Show this message and exit.
```
Although i am not sure if help texts for options are even supposed to get that
complex.
</issue>
<code>
[start of click/decorators.py]
1 import sys
2 import inspect
3
4 from functools import update_wrapper
5
6 from ._compat import iteritems
7 from .utils import echo
8
9
10 def pass_context(f):
11 """Marks a callback as wanting to receive the current context
12 object as first argument.
13 """
14 f.__click_pass_context__ = True
15 return f
16
17
18 def pass_obj(f):
19 """Similar to :func:`pass_context`, but only pass the object on the
20 context onwards (:attr:`Context.obj`). This is useful if that object
21 represents the state of a nested system.
22 """
23 @pass_context
24 def new_func(*args, **kwargs):
25 ctx = args[0]
26 return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)
27 return update_wrapper(new_func, f)
28
29
30 def make_pass_decorator(object_type, ensure=False):
31 """Given an object type this creates a decorator that will work
32 similar to :func:`pass_obj` but instead of passing the object of the
33 current context, it will find the innermost context of type
34 :func:`object_type`.
35
36 This generates a decorator that works roughly like this::
37
38 from functools import update_wrapper
39
40 def decorator(f):
41 @pass_context
42 def new_func(ctx, *args, **kwargs):
43 obj = ctx.find_object(object_type)
44 return ctx.invoke(f, obj, *args, **kwargs)
45 return update_wrapper(new_func, f)
46 return decorator
47
48 :param object_type: the type of the object to pass.
49 :param ensure: if set to `True`, a new object will be created and
50 remembered on the context if it's not there yet.
51 """
52 def decorator(f):
53 @pass_context
54 def new_func(*args, **kwargs):
55 ctx = args[0]
56 if ensure:
57 obj = ctx.ensure_object(object_type)
58 else:
59 obj = ctx.find_object(object_type)
60 if obj is None:
61 raise RuntimeError('Managed to invoke callback without a '
62 'context object of type %r existing'
63 % object_type.__name__)
64 return ctx.invoke(f, obj, *args[1:], **kwargs)
65 return update_wrapper(new_func, f)
66 return decorator
67
68
69 def _make_command(f, name, attrs, cls):
70 if isinstance(f, Command):
71 raise TypeError('Attempted to convert a callback into a '
72 'command twice.')
73 try:
74 params = f.__click_params__
75 params.reverse()
76 del f.__click_params__
77 except AttributeError:
78 params = []
79 help = inspect.getdoc(f)
80 if isinstance(help, bytes):
81 help = help.decode('utf-8')
82 attrs.setdefault('help', help)
83 return cls(name=name or f.__name__.lower(),
84 callback=f, params=params, **attrs)
85
86
87 def command(name=None, cls=None, **attrs):
88 """Creates a new :class:`Command` and uses the decorated function as
89 callback. This will also automatically attach all decorated
90 :func:`option`\s and :func:`argument`\s as parameters to the command.
91
92 The name of the command defaults to the name of the function. If you
93 want to change that, you can pass the intended name as the first
94 argument.
95
96 All keyword arguments are forwarded to the underlying command class.
97
98 Once decorated the function turns into a :class:`Command` instance
99 that can be invoked as a command line utility or be attached to a
100 command :class:`Group`.
101
102 :param name: the name of the command. This defaults to the function
103 name.
104 :param cls: the command class to instantiate. This defaults to
105 :class:`Command`.
106 """
107 if cls is None:
108 cls = Command
109 def decorator(f):
110 return _make_command(f, name, attrs, cls)
111 return decorator
112
113
114 def group(name=None, **attrs):
115 """Creates a new :class:`Group` with a function as callback. This
116 works otherwise the same as :func:`command` just that the `cls`
117 parameter is set to :class:`Group`.
118 """
119 attrs.setdefault('cls', Group)
120 return command(name, **attrs)
121
122
123 def _param_memo(f, param):
124 if isinstance(f, Command):
125 f.params.append(param)
126 else:
127 if not hasattr(f, '__click_params__'):
128 f.__click_params__ = []
129 f.__click_params__.append(param)
130
131
132 def argument(*param_decls, **attrs):
133 """Attaches an option to the command. All positional arguments are
134 passed as parameter declarations to :class:`Argument`; all keyword
135 arguments are forwarded unchanged. This is equivalent to creating an
136 :class:`Option` instance manually and attaching it to the
137 :attr:`Command.params` list.
138 """
139 def decorator(f):
140 _param_memo(f, Argument(param_decls, **attrs))
141 return f
142 return decorator
143
144
145 def option(*param_decls, **attrs):
146 """Attaches an option to the command. All positional arguments are
147 passed as parameter declarations to :class:`Option`; all keyword
148 arguments are forwarded unchanged. This is equivalent to creating an
149 :class:`Option` instance manually and attaching it to the
150 :attr:`Command.params` list.
151 """
152 def decorator(f):
153 _param_memo(f, Option(param_decls, **attrs))
154 return f
155 return decorator
156
157
158 def confirmation_option(*param_decls, **attrs):
159 """Shortcut for confirmation prompts that can be ignored by passing
160 ``--yes`` as parameter.
161
162 This is equivalent to decorating a function with :func:`option` with
163 the following parameters::
164
165 def callback(ctx, param, value):
166 if not value:
167 ctx.abort()
168
169 @click.command()
170 @click.option('--yes', is_flag=True, callback=callback,
171 expose_value=False, prompt='Do you want to continue?')
172 def dropdb():
173 pass
174 """
175 def decorator(f):
176 def callback(ctx, param, value):
177 if not value:
178 ctx.abort()
179 attrs.setdefault('is_flag', True)
180 attrs.setdefault('callback', callback)
181 attrs.setdefault('expose_value', False)
182 attrs.setdefault('prompt', 'Do you want to continue?')
183 attrs.setdefault('help', 'Confirm the action without prompting.')
184 return option(*(param_decls or ('--yes',)), **attrs)(f)
185 return decorator
186
187
188 def password_option(*param_decls, **attrs):
189 """Shortcut for password prompts.
190
191 This is equivalent to decorating a function with :func:`option` with
192 the following parameters::
193
194 @click.command()
195 @click.option('--password', prompt=True, confirmation_prompt=True,
196 hide_input=True)
197 def changeadmin(password):
198 pass
199 """
200 def decorator(f):
201 attrs.setdefault('prompt', True)
202 attrs.setdefault('confirmation_prompt', True)
203 attrs.setdefault('hide_input', True)
204 return option(*(param_decls or ('--password',)), **attrs)(f)
205 return decorator
206
207
208 def version_option(version=None, *param_decls, **attrs):
209 """Adds a ``--version`` option which immediately ends the program
210 printing out the version number. This is implemented as an eager
211 option that prints the version and exits the program in the callback.
212
213 :param version: the version number to show. If not provided click
214 attempts an auto discovery via setuptools.
215 :param prog_name: the name of the program (defaults to autodetection)
216 :param message: custom message to show instead of the default
217 (``'%(prog)s, version %(version)s'``)
218 :param others: everything else is forwarded to :func:`option`.
219 """
220 if version is None:
221 module = sys._getframe(1).f_globals.get('__name__')
222 def decorator(f):
223 prog_name = attrs.pop('prog_name', None)
224 message = attrs.pop('message', '%(prog)s, version %(version)s')
225
226 def callback(ctx, param, value):
227 if not value or ctx.resilient_parsing:
228 return
229 prog = prog_name
230 if prog is None:
231 prog = ctx.find_root().info_name
232 ver = version
233 if ver is None:
234 try:
235 import pkg_resources
236 except ImportError:
237 pass
238 else:
239 for dist in pkg_resources.working_set:
240 scripts = dist.get_entry_map().get('console_scripts') or {}
241 for script_name, entry_point in iteritems(scripts):
242 if entry_point.module_name == module:
243 ver = dist.version
244 break
245 if ver is None:
246 raise RuntimeError('Could not determine version')
247 echo(message % {
248 'prog': prog,
249 'version': ver,
250 })
251 ctx.exit()
252
253 attrs.setdefault('is_flag', True)
254 attrs.setdefault('expose_value', False)
255 attrs.setdefault('is_eager', True)
256 attrs.setdefault('help', 'Show the version and exit.')
257 attrs['callback'] = callback
258 return option(*(param_decls or ('--version',)), **attrs)(f)
259 return decorator
260
261
262 def help_option(*param_decls, **attrs):
263 """Adds a ``--help`` option which immediately ends the program
264 printing out the help page. This is usually unnecessary to add as
265 this is added by default to all commands unless suppressed.
266
267 Like :func:`version_option`, this is implemented as eager option that
268 prints in the callback and exits.
269
270 All arguments are forwarded to :func:`option`.
271 """
272 def decorator(f):
273 def callback(ctx, param, value):
274 if value and not ctx.resilient_parsing:
275 echo(ctx.get_help())
276 ctx.exit()
277 attrs.setdefault('is_flag', True)
278 attrs.setdefault('expose_value', False)
279 attrs.setdefault('help', 'Show this message and exit.')
280 attrs.setdefault('is_eager', True)
281 attrs['callback'] = callback
282 return option(*(param_decls or ('--help',)), **attrs)(f)
283 return decorator
284
285
286 # Circular dependencies between core and decorators
287 from .core import Command, Group, Argument, Option
288
[end of click/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/click/decorators.py b/click/decorators.py
--- a/click/decorators.py
+++ b/click/decorators.py
@@ -150,6 +150,8 @@
:attr:`Command.params` list.
"""
def decorator(f):
+ if 'help' in attrs:
+ attrs['help'] = inspect.cleandoc(attrs['help'])
_param_memo(f, Option(param_decls, **attrs))
return f
return decorator
| {"golden_diff": "diff --git a/click/decorators.py b/click/decorators.py\n--- a/click/decorators.py\n+++ b/click/decorators.py\n@@ -150,6 +150,8 @@\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n+ if 'help' in attrs:\n+ attrs['help'] = inspect.cleandoc(attrs['help'])\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n", "issue": "Automatically dedent help text of options?\n```\nimport click\n\[email protected]()\[email protected]('--foo', help=\"\"\"\n heyho\n\n i am\n\n multiline\n\"\"\")\ndef cli(foo):\n click.echo(foo)\n\ncli()\n```\n\nThis currently does not remove the leading whitespace from each paragraph in the help text for `--foo`:\n\n```\nuntitaker@untibox:~/projects/click, branch master\n$ python lol.py --help\nUsage: lol.py [OPTIONS]\n\nOptions:\n --foo TEXT\n heyho\n\n i am\n\n multiline\n --help Show this message and exit.\n```\n\nAlthough i am not sure if help texts for options are even supposed to get that\ncomplex.\n\n", "before_files": [{"content": "import sys\nimport inspect\n\nfrom functools import update_wrapper\n\nfrom ._compat import iteritems\nfrom .utils import echo\n\n\ndef pass_context(f):\n \"\"\"Marks a callback as wanting to receive the current context\n object as first argument.\n \"\"\"\n f.__click_pass_context__ = True\n return f\n\n\ndef pass_obj(f):\n \"\"\"Similar to :func:`pass_context`, but only pass the object on the\n context onwards (:attr:`Context.obj`). This is useful if that object\n represents the state of a nested system.\n \"\"\"\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n return ctx.invoke(f, ctx.obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n\n\ndef make_pass_decorator(object_type, ensure=False):\n \"\"\"Given an object type this creates a decorator that will work\n similar to :func:`pass_obj` but instead of passing the object of the\n current context, it will find the innermost context of type\n :func:`object_type`.\n\n This generates a decorator that works roughly like this::\n\n from functools import update_wrapper\n\n def decorator(f):\n @pass_context\n def new_func(ctx, *args, **kwargs):\n obj = ctx.find_object(object_type)\n return ctx.invoke(f, obj, *args, **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n :param object_type: the type of the object to pass.\n :param ensure: if set to `True`, a new object will be created and\n remembered on the context if it's not there yet.\n \"\"\"\n def decorator(f):\n @pass_context\n def new_func(*args, **kwargs):\n ctx = args[0]\n if ensure:\n obj = ctx.ensure_object(object_type)\n else:\n obj = ctx.find_object(object_type)\n if obj is None:\n raise RuntimeError('Managed to invoke callback without a '\n 'context object of type %r existing'\n % object_type.__name__)\n return ctx.invoke(f, obj, *args[1:], **kwargs)\n return update_wrapper(new_func, f)\n return decorator\n\n\ndef _make_command(f, name, attrs, cls):\n if isinstance(f, Command):\n raise TypeError('Attempted to convert a callback into a '\n 'command twice.')\n try:\n params = f.__click_params__\n params.reverse()\n del f.__click_params__\n except AttributeError:\n params = []\n help = inspect.getdoc(f)\n if isinstance(help, bytes):\n help = help.decode('utf-8')\n attrs.setdefault('help', help)\n return cls(name=name or f.__name__.lower(),\n callback=f, params=params, **attrs)\n\n\ndef command(name=None, cls=None, **attrs):\n \"\"\"Creates a new :class:`Command` and uses the decorated function as\n callback. This will also automatically attach all decorated\n :func:`option`\\s and :func:`argument`\\s as parameters to the command.\n\n The name of the command defaults to the name of the function. If you\n want to change that, you can pass the intended name as the first\n argument.\n\n All keyword arguments are forwarded to the underlying command class.\n\n Once decorated the function turns into a :class:`Command` instance\n that can be invoked as a command line utility or be attached to a\n command :class:`Group`.\n\n :param name: the name of the command. This defaults to the function\n name.\n :param cls: the command class to instantiate. This defaults to\n :class:`Command`.\n \"\"\"\n if cls is None:\n cls = Command\n def decorator(f):\n return _make_command(f, name, attrs, cls)\n return decorator\n\n\ndef group(name=None, **attrs):\n \"\"\"Creates a new :class:`Group` with a function as callback. This\n works otherwise the same as :func:`command` just that the `cls`\n parameter is set to :class:`Group`.\n \"\"\"\n attrs.setdefault('cls', Group)\n return command(name, **attrs)\n\n\ndef _param_memo(f, param):\n if isinstance(f, Command):\n f.params.append(param)\n else:\n if not hasattr(f, '__click_params__'):\n f.__click_params__ = []\n f.__click_params__.append(param)\n\n\ndef argument(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Argument`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Argument(param_decls, **attrs))\n return f\n return decorator\n\n\ndef option(*param_decls, **attrs):\n \"\"\"Attaches an option to the command. All positional arguments are\n passed as parameter declarations to :class:`Option`; all keyword\n arguments are forwarded unchanged. This is equivalent to creating an\n :class:`Option` instance manually and attaching it to the\n :attr:`Command.params` list.\n \"\"\"\n def decorator(f):\n _param_memo(f, Option(param_decls, **attrs))\n return f\n return decorator\n\n\ndef confirmation_option(*param_decls, **attrs):\n \"\"\"Shortcut for confirmation prompts that can be ignored by passing\n ``--yes`` as parameter.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n\n @click.command()\n @click.option('--yes', is_flag=True, callback=callback,\n expose_value=False, prompt='Do you want to continue?')\n def dropdb():\n pass\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if not value:\n ctx.abort()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('callback', callback)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('prompt', 'Do you want to continue?')\n attrs.setdefault('help', 'Confirm the action without prompting.')\n return option(*(param_decls or ('--yes',)), **attrs)(f)\n return decorator\n\n\ndef password_option(*param_decls, **attrs):\n \"\"\"Shortcut for password prompts.\n\n This is equivalent to decorating a function with :func:`option` with\n the following parameters::\n\n @click.command()\n @click.option('--password', prompt=True, confirmation_prompt=True,\n hide_input=True)\n def changeadmin(password):\n pass\n \"\"\"\n def decorator(f):\n attrs.setdefault('prompt', True)\n attrs.setdefault('confirmation_prompt', True)\n attrs.setdefault('hide_input', True)\n return option(*(param_decls or ('--password',)), **attrs)(f)\n return decorator\n\n\ndef version_option(version=None, *param_decls, **attrs):\n \"\"\"Adds a ``--version`` option which immediately ends the program\n printing out the version number. This is implemented as an eager\n option that prints the version and exits the program in the callback.\n\n :param version: the version number to show. If not provided click\n attempts an auto discovery via setuptools.\n :param prog_name: the name of the program (defaults to autodetection)\n :param message: custom message to show instead of the default\n (``'%(prog)s, version %(version)s'``)\n :param others: everything else is forwarded to :func:`option`.\n \"\"\"\n if version is None:\n module = sys._getframe(1).f_globals.get('__name__')\n def decorator(f):\n prog_name = attrs.pop('prog_name', None)\n message = attrs.pop('message', '%(prog)s, version %(version)s')\n\n def callback(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n prog = prog_name\n if prog is None:\n prog = ctx.find_root().info_name\n ver = version\n if ver is None:\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in pkg_resources.working_set:\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for script_name, entry_point in iteritems(scripts):\n if entry_point.module_name == module:\n ver = dist.version\n break\n if ver is None:\n raise RuntimeError('Could not determine version')\n echo(message % {\n 'prog': prog,\n 'version': ver,\n })\n ctx.exit()\n\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('is_eager', True)\n attrs.setdefault('help', 'Show the version and exit.')\n attrs['callback'] = callback\n return option(*(param_decls or ('--version',)), **attrs)(f)\n return decorator\n\n\ndef help_option(*param_decls, **attrs):\n \"\"\"Adds a ``--help`` option which immediately ends the program\n printing out the help page. This is usually unnecessary to add as\n this is added by default to all commands unless suppressed.\n\n Like :func:`version_option`, this is implemented as eager option that\n prints in the callback and exits.\n\n All arguments are forwarded to :func:`option`.\n \"\"\"\n def decorator(f):\n def callback(ctx, param, value):\n if value and not ctx.resilient_parsing:\n echo(ctx.get_help())\n ctx.exit()\n attrs.setdefault('is_flag', True)\n attrs.setdefault('expose_value', False)\n attrs.setdefault('help', 'Show this message and exit.')\n attrs.setdefault('is_eager', True)\n attrs['callback'] = callback\n return option(*(param_decls or ('--help',)), **attrs)(f)\n return decorator\n\n\n# Circular dependencies between core and decorators\nfrom .core import Command, Group, Argument, Option\n", "path": "click/decorators.py"}]} | 3,646 | 114 |
gh_patches_debug_25000 | rasdani/github-patches | git_diff | adfinis__timed-backend-938 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: remaining effort on initial report creation is buggy
## Scenario
Create a report and add an estimated remaining effort to it.
## Expected behavior
It updates the remaining effort of the certain task in the statistics view.
## Actual behavior
After submitting the report the remaining effort does **not** update in stats view. But after editing the rem. effort and saving the report again in the timesheet, it will update the values in the stats view.
The attributes are fine in both requests to the backend. I suspect that a the `POST` and `PATCH` requests have different handling of the remaining effort attribute. Maybe the `POST` does not properly trigger the update?

</issue>
<code>
[start of timed/tracking/signals.py]
1 from django.db.models import Sum
2 from django.db.models.signals import pre_save
3 from django.dispatch import receiver
4
5 from timed.tracking.models import Report
6
7
8 @receiver(pre_save, sender=Report)
9 def update_rejected_on_reports(sender, instance, **kwargs):
10 """Unreject report when the task changes."""
11 # Check if the report is being created or updated
12 if instance.pk and instance.rejected:
13 report = Report.objects.get(id=instance.id)
14 if report.task_id != instance.task_id:
15 instance.rejected = False
16
17
18 @receiver(pre_save, sender=Report)
19 def update_most_recent_remaining_effort(sender, instance, **kwargs):
20 """Update remaining effort on task, if remaining effort tracking is active.
21
22 Update most_recent_remaining_effort on task and total_remaining_effort on project
23 only if remaining effort on report has changed.
24 Any other change on report should not trigger this signal.
25 """
26 if kwargs.get("raw", False): # pragma: no cover
27 return
28
29 if not instance.pk:
30 return
31 if instance.task.project.remaining_effort_tracking is not True:
32 return
33
34 if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:
35 task = instance.task
36 task.most_recent_remaining_effort = instance.remaining_effort
37 task.save()
38
39 project = task.project
40 total_remaining_effort = (
41 project.tasks.all()
42 .aggregate(sum_remaining=Sum("most_recent_remaining_effort"))
43 .get("sum_remaining")
44 )
45 project.total_remaining_effort = total_remaining_effort
46 project.save()
47
[end of timed/tracking/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/timed/tracking/signals.py b/timed/tracking/signals.py
--- a/timed/tracking/signals.py
+++ b/timed/tracking/signals.py
@@ -26,21 +26,30 @@
if kwargs.get("raw", False): # pragma: no cover
return
- if not instance.pk:
- return
if instance.task.project.remaining_effort_tracking is not True:
return
+ # update most_recent_remaining_effort and total_remaining_effort on report creation
+ if not instance.pk:
+ update_remaining_effort(instance)
+ return
+
+ # check if remaining effort has changed on report update
if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:
- task = instance.task
- task.most_recent_remaining_effort = instance.remaining_effort
- task.save()
-
- project = task.project
- total_remaining_effort = (
- project.tasks.all()
- .aggregate(sum_remaining=Sum("most_recent_remaining_effort"))
- .get("sum_remaining")
- )
- project.total_remaining_effort = total_remaining_effort
- project.save()
+ update_remaining_effort(instance)
+
+
+def update_remaining_effort(report):
+ task = report.task
+ project = task.project
+
+ task.most_recent_remaining_effort = report.remaining_effort
+ task.save()
+
+ total_remaining_effort = (
+ task.project.tasks.all()
+ .aggregate(sum_remaining=Sum("most_recent_remaining_effort"))
+ .get("sum_remaining")
+ )
+ project.total_remaining_effort = total_remaining_effort
+ project.save()
| {"golden_diff": "diff --git a/timed/tracking/signals.py b/timed/tracking/signals.py\n--- a/timed/tracking/signals.py\n+++ b/timed/tracking/signals.py\n@@ -26,21 +26,30 @@\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n \n- if not instance.pk:\n- return\n if instance.task.project.remaining_effort_tracking is not True:\n return\n \n+ # update most_recent_remaining_effort and total_remaining_effort on report creation\n+ if not instance.pk:\n+ update_remaining_effort(instance)\n+ return\n+\n+ # check if remaining effort has changed on report update\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n- task = instance.task\n- task.most_recent_remaining_effort = instance.remaining_effort\n- task.save()\n-\n- project = task.project\n- total_remaining_effort = (\n- project.tasks.all()\n- .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n- .get(\"sum_remaining\")\n- )\n- project.total_remaining_effort = total_remaining_effort\n- project.save()\n+ update_remaining_effort(instance)\n+\n+\n+def update_remaining_effort(report):\n+ task = report.task\n+ project = task.project\n+\n+ task.most_recent_remaining_effort = report.remaining_effort\n+ task.save()\n+\n+ total_remaining_effort = (\n+ task.project.tasks.all()\n+ .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n+ .get(\"sum_remaining\")\n+ )\n+ project.total_remaining_effort = total_remaining_effort\n+ project.save()\n", "issue": "bug: remaining effort on initial report creation is buggy\n## Scenario\r\nCreate a report and add an estimated remaining effort to it.\r\n\r\n## Expected behavior\r\nIt updates the remaining effort of the certain task in the statistics view.\r\n\r\n## Actual behavior\r\nAfter submitting the report the remaining effort does **not** update in stats view. But after editing the rem. effort and saving the report again in the timesheet, it will update the values in the stats view.\r\n\r\nThe attributes are fine in both requests to the backend. I suspect that a the `POST` and `PATCH` requests have different handling of the remaining effort attribute. Maybe the `POST` does not properly trigger the update?\r\n\r\n\n", "before_files": [{"content": "from django.db.models import Sum\nfrom django.db.models.signals import pre_save\nfrom django.dispatch import receiver\n\nfrom timed.tracking.models import Report\n\n\n@receiver(pre_save, sender=Report)\ndef update_rejected_on_reports(sender, instance, **kwargs):\n \"\"\"Unreject report when the task changes.\"\"\"\n # Check if the report is being created or updated\n if instance.pk and instance.rejected:\n report = Report.objects.get(id=instance.id)\n if report.task_id != instance.task_id:\n instance.rejected = False\n\n\n@receiver(pre_save, sender=Report)\ndef update_most_recent_remaining_effort(sender, instance, **kwargs):\n \"\"\"Update remaining effort on task, if remaining effort tracking is active.\n\n Update most_recent_remaining_effort on task and total_remaining_effort on project\n only if remaining effort on report has changed.\n Any other change on report should not trigger this signal.\n \"\"\"\n if kwargs.get(\"raw\", False): # pragma: no cover\n return\n\n if not instance.pk:\n return\n if instance.task.project.remaining_effort_tracking is not True:\n return\n\n if instance.remaining_effort != Report.objects.get(id=instance.id).remaining_effort:\n task = instance.task\n task.most_recent_remaining_effort = instance.remaining_effort\n task.save()\n\n project = task.project\n total_remaining_effort = (\n project.tasks.all()\n .aggregate(sum_remaining=Sum(\"most_recent_remaining_effort\"))\n .get(\"sum_remaining\")\n )\n project.total_remaining_effort = total_remaining_effort\n project.save()\n", "path": "timed/tracking/signals.py"}]} | 1,168 | 376 |
gh_patches_debug_34733 | rasdani/github-patches | git_diff | pre-commit__pre-commit-231 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: base manifest value for 'exclude' is always ignored
I stumbled upon this bug while working on #226: the culprit is [`Repository.hooks`](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/repository.py#L48).
A quick fix for this would be to simply remove the default value from `pre_commit/clientlib/validate_config.py`, but the root cause is that any default value defined for a field in this file will make the corresponding manifest field useless.
Basically here is what happens in `Repository.hooks`:
- all the hooks defined in the current repository are enumerated
- at this stage, a `hook` is a dict closely matching the Yaml the config file content, **plus** default values for fields not defined in the Yaml but having a JSON schema 'default'
- when doing the dict merge, **every** (key,value) pair in `hook` overrides the corresponding manifest entry. This includes default config value like `exclude: '$^'` overriding a base manifest value like `exclude: '.bak$'`
Hence I suggest either adding a test ensuring there will never be any 'default' defined in `CONFIG_JSON_SCHEMA`, or improving the merge logic.
</issue>
<code>
[start of pre_commit/clientlib/validate_config.py]
1 from __future__ import unicode_literals
2
3 from pre_commit.clientlib.validate_base import get_run_function
4 from pre_commit.clientlib.validate_base import get_validator
5 from pre_commit.clientlib.validate_base import is_regex_valid
6 from pre_commit.errors import FatalError
7
8
9 _LOCAL_HOOKS_MAGIC_REPO_STRING = 'local'
10
11
12 def is_local_hooks(repo_entry):
13 return repo_entry['repo'] == _LOCAL_HOOKS_MAGIC_REPO_STRING
14
15
16 class InvalidConfigError(FatalError):
17 pass
18
19
20 CONFIG_JSON_SCHEMA = {
21 'type': 'array',
22 'minItems': 1,
23 'items': {
24 'type': 'object',
25 'properties': {
26 'repo': {'type': 'string'},
27 'sha': {'type': 'string'},
28 'hooks': {
29 'type': 'array',
30 'minItems': 1,
31 'items': {
32 'type': 'object',
33 'properties': {
34 'id': {'type': 'string'},
35 'files': {'type': 'string'},
36 'exclude': {'type': 'string', 'default': '^$'},
37 'language_version': {'type': 'string'},
38 'args': {
39 'type': 'array',
40 'items': {'type': 'string'},
41 },
42 },
43 'required': ['id'],
44 }
45 }
46 },
47 'required': ['repo', 'hooks'],
48 }
49 }
50
51
52 def try_regex(repo, hook, value, field_name):
53 if not is_regex_valid(value):
54 raise InvalidConfigError(
55 'Invalid {0} regex at {1}, {2}: {3}'.format(
56 field_name, repo, hook, value,
57 )
58 )
59
60
61 def validate_config_extra(config):
62 for repo in config:
63 if is_local_hooks(repo):
64 if 'sha' in repo:
65 raise InvalidConfigError(
66 '"sha" property provided for local hooks'
67 )
68 elif 'sha' not in repo:
69 raise InvalidConfigError(
70 'Missing "sha" field for repository {0}'.format(repo['repo'])
71 )
72 for hook in repo['hooks']:
73 try_regex(repo, hook['id'], hook.get('files', ''), 'files')
74 try_regex(repo, hook['id'], hook['exclude'], 'exclude')
75
76
77 load_config = get_validator(
78 CONFIG_JSON_SCHEMA,
79 InvalidConfigError,
80 additional_validation_strategy=validate_config_extra,
81 )
82
83
84 run = get_run_function('Config filenames.', load_config, InvalidConfigError)
85
86
87 if __name__ == '__main__':
88 exit(run())
89
[end of pre_commit/clientlib/validate_config.py]
[start of pre_commit/clientlib/validate_manifest.py]
1 from __future__ import unicode_literals
2
3 from pre_commit.clientlib.validate_base import get_run_function
4 from pre_commit.clientlib.validate_base import get_validator
5 from pre_commit.clientlib.validate_base import is_regex_valid
6 from pre_commit.languages.all import all_languages
7
8
9 class InvalidManifestError(ValueError):
10 pass
11
12
13 MANIFEST_JSON_SCHEMA = {
14 'type': 'array',
15 'minItems': 1,
16 'items': {
17 'type': 'object',
18 'properties': {
19 'id': {'type': 'string'},
20 'name': {'type': 'string'},
21 'description': {'type': 'string', 'default': ''},
22 'entry': {'type': 'string'},
23 'language': {'type': 'string'},
24 'language_version': {'type': 'string', 'default': 'default'},
25 'files': {'type': 'string'},
26 'expected_return_value': {'type': 'number', 'default': 0},
27 'args': {
28 'type': 'array',
29 'default': [],
30 'items': {
31 'type': 'string',
32 },
33 },
34 },
35 'required': ['id', 'name', 'entry', 'language', 'files'],
36 },
37 }
38
39
40 def validate_languages(hook_config):
41 if hook_config['language'] not in all_languages:
42 raise InvalidManifestError(
43 'Expected language {0} for {1} to be one of {2!r}'.format(
44 hook_config['id'],
45 hook_config['language'],
46 all_languages,
47 )
48 )
49
50
51 def validate_files(hook_config):
52 if not is_regex_valid(hook_config['files']):
53 raise InvalidManifestError(
54 'Invalid files regex at {0}: {1}'.format(
55 hook_config['id'],
56 hook_config['files'],
57 )
58 )
59
60
61 def additional_manifest_check(obj):
62 for hook_config in obj:
63 validate_languages(hook_config)
64 validate_files(hook_config)
65
66
67 load_manifest = get_validator(
68 MANIFEST_JSON_SCHEMA,
69 InvalidManifestError,
70 additional_manifest_check,
71 )
72
73
74 run = get_run_function(
75 'Manifest filenames.',
76 load_manifest,
77 InvalidManifestError,
78 )
79
80
81 if __name__ == '__main__':
82 exit(run())
83
[end of pre_commit/clientlib/validate_manifest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/clientlib/validate_config.py b/pre_commit/clientlib/validate_config.py
--- a/pre_commit/clientlib/validate_config.py
+++ b/pre_commit/clientlib/validate_config.py
@@ -33,7 +33,7 @@
'properties': {
'id': {'type': 'string'},
'files': {'type': 'string'},
- 'exclude': {'type': 'string', 'default': '^$'},
+ 'exclude': {'type': 'string'},
'language_version': {'type': 'string'},
'args': {
'type': 'array',
@@ -71,7 +71,7 @@
)
for hook in repo['hooks']:
try_regex(repo, hook['id'], hook.get('files', ''), 'files')
- try_regex(repo, hook['id'], hook['exclude'], 'exclude')
+ try_regex(repo, hook['id'], hook.get('exclude', ''), 'exclude')
load_config = get_validator(
diff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py
--- a/pre_commit/clientlib/validate_manifest.py
+++ b/pre_commit/clientlib/validate_manifest.py
@@ -20,6 +20,7 @@
'name': {'type': 'string'},
'description': {'type': 'string', 'default': ''},
'entry': {'type': 'string'},
+ 'exclude': {'type': 'string', 'default': '^$'},
'language': {'type': 'string'},
'language_version': {'type': 'string', 'default': 'default'},
'files': {'type': 'string'},
@@ -52,8 +53,14 @@
if not is_regex_valid(hook_config['files']):
raise InvalidManifestError(
'Invalid files regex at {0}: {1}'.format(
- hook_config['id'],
- hook_config['files'],
+ hook_config['id'], hook_config['files'],
+ )
+ )
+
+ if not is_regex_valid(hook_config.get('exclude', '')):
+ raise InvalidManifestError(
+ 'Invalid exclude regex at {0}: {1}'.format(
+ hook_config['id'], hook_config['exclude'],
)
)
| {"golden_diff": "diff --git a/pre_commit/clientlib/validate_config.py b/pre_commit/clientlib/validate_config.py\n--- a/pre_commit/clientlib/validate_config.py\n+++ b/pre_commit/clientlib/validate_config.py\n@@ -33,7 +33,7 @@\n 'properties': {\n 'id': {'type': 'string'},\n 'files': {'type': 'string'},\n- 'exclude': {'type': 'string', 'default': '^$'},\n+ 'exclude': {'type': 'string'},\n 'language_version': {'type': 'string'},\n 'args': {\n 'type': 'array',\n@@ -71,7 +71,7 @@\n )\n for hook in repo['hooks']:\n try_regex(repo, hook['id'], hook.get('files', ''), 'files')\n- try_regex(repo, hook['id'], hook['exclude'], 'exclude')\n+ try_regex(repo, hook['id'], hook.get('exclude', ''), 'exclude')\n \n \n load_config = get_validator(\ndiff --git a/pre_commit/clientlib/validate_manifest.py b/pre_commit/clientlib/validate_manifest.py\n--- a/pre_commit/clientlib/validate_manifest.py\n+++ b/pre_commit/clientlib/validate_manifest.py\n@@ -20,6 +20,7 @@\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n+ 'exclude': {'type': 'string', 'default': '^$'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n@@ -52,8 +53,14 @@\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n- hook_config['id'],\n- hook_config['files'],\n+ hook_config['id'], hook_config['files'],\n+ )\n+ )\n+\n+ if not is_regex_valid(hook_config.get('exclude', '')):\n+ raise InvalidManifestError(\n+ 'Invalid exclude regex at {0}: {1}'.format(\n+ hook_config['id'], hook_config['exclude'],\n )\n )\n", "issue": "Bug: base manifest value for 'exclude' is always ignored\nI stumbled upon this bug while working on #226: the culprit is [`Repository.hooks`](https://github.com/pre-commit/pre-commit/blob/master/pre_commit/repository.py#L48).\n\nA quick fix for this would be to simply remove the default value from `pre_commit/clientlib/validate_config.py`, but the root cause is that any default value defined for a field in this file will make the corresponding manifest field useless.\n\nBasically here is what happens in `Repository.hooks`:\n- all the hooks defined in the current repository are enumerated\n- at this stage, a `hook` is a dict closely matching the Yaml the config file content, **plus** default values for fields not defined in the Yaml but having a JSON schema 'default'\n- when doing the dict merge, **every** (key,value) pair in `hook` overrides the corresponding manifest entry. This includes default config value like `exclude: '$^'` overriding a base manifest value like `exclude: '.bak$'`\n\nHence I suggest either adding a test ensuring there will never be any 'default' defined in `CONFIG_JSON_SCHEMA`, or improving the merge logic.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.errors import FatalError\n\n\n_LOCAL_HOOKS_MAGIC_REPO_STRING = 'local'\n\n\ndef is_local_hooks(repo_entry):\n return repo_entry['repo'] == _LOCAL_HOOKS_MAGIC_REPO_STRING\n\n\nclass InvalidConfigError(FatalError):\n pass\n\n\nCONFIG_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'repo': {'type': 'string'},\n 'sha': {'type': 'string'},\n 'hooks': {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'files': {'type': 'string'},\n 'exclude': {'type': 'string', 'default': '^$'},\n 'language_version': {'type': 'string'},\n 'args': {\n 'type': 'array',\n 'items': {'type': 'string'},\n },\n },\n 'required': ['id'],\n }\n }\n },\n 'required': ['repo', 'hooks'],\n }\n}\n\n\ndef try_regex(repo, hook, value, field_name):\n if not is_regex_valid(value):\n raise InvalidConfigError(\n 'Invalid {0} regex at {1}, {2}: {3}'.format(\n field_name, repo, hook, value,\n )\n )\n\n\ndef validate_config_extra(config):\n for repo in config:\n if is_local_hooks(repo):\n if 'sha' in repo:\n raise InvalidConfigError(\n '\"sha\" property provided for local hooks'\n )\n elif 'sha' not in repo:\n raise InvalidConfigError(\n 'Missing \"sha\" field for repository {0}'.format(repo['repo'])\n )\n for hook in repo['hooks']:\n try_regex(repo, hook['id'], hook.get('files', ''), 'files')\n try_regex(repo, hook['id'], hook['exclude'], 'exclude')\n\n\nload_config = get_validator(\n CONFIG_JSON_SCHEMA,\n InvalidConfigError,\n additional_validation_strategy=validate_config_extra,\n)\n\n\nrun = get_run_function('Config filenames.', load_config, InvalidConfigError)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_config.py"}, {"content": "from __future__ import unicode_literals\n\nfrom pre_commit.clientlib.validate_base import get_run_function\nfrom pre_commit.clientlib.validate_base import get_validator\nfrom pre_commit.clientlib.validate_base import is_regex_valid\nfrom pre_commit.languages.all import all_languages\n\n\nclass InvalidManifestError(ValueError):\n pass\n\n\nMANIFEST_JSON_SCHEMA = {\n 'type': 'array',\n 'minItems': 1,\n 'items': {\n 'type': 'object',\n 'properties': {\n 'id': {'type': 'string'},\n 'name': {'type': 'string'},\n 'description': {'type': 'string', 'default': ''},\n 'entry': {'type': 'string'},\n 'language': {'type': 'string'},\n 'language_version': {'type': 'string', 'default': 'default'},\n 'files': {'type': 'string'},\n 'expected_return_value': {'type': 'number', 'default': 0},\n 'args': {\n 'type': 'array',\n 'default': [],\n 'items': {\n 'type': 'string',\n },\n },\n },\n 'required': ['id', 'name', 'entry', 'language', 'files'],\n },\n}\n\n\ndef validate_languages(hook_config):\n if hook_config['language'] not in all_languages:\n raise InvalidManifestError(\n 'Expected language {0} for {1} to be one of {2!r}'.format(\n hook_config['id'],\n hook_config['language'],\n all_languages,\n )\n )\n\n\ndef validate_files(hook_config):\n if not is_regex_valid(hook_config['files']):\n raise InvalidManifestError(\n 'Invalid files regex at {0}: {1}'.format(\n hook_config['id'],\n hook_config['files'],\n )\n )\n\n\ndef additional_manifest_check(obj):\n for hook_config in obj:\n validate_languages(hook_config)\n validate_files(hook_config)\n\n\nload_manifest = get_validator(\n MANIFEST_JSON_SCHEMA,\n InvalidManifestError,\n additional_manifest_check,\n)\n\n\nrun = get_run_function(\n 'Manifest filenames.',\n load_manifest,\n InvalidManifestError,\n)\n\n\nif __name__ == '__main__':\n exit(run())\n", "path": "pre_commit/clientlib/validate_manifest.py"}]} | 2,160 | 492 |
gh_patches_debug_7190 | rasdani/github-patches | git_diff | huggingface__accelerate-13 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't send the values of int to device
My training data looks like:
src_image, target_image, src_camera, target_camera, src_camera_idx, target_camera_idx
Where src_camera_idx, target_camera_idx are integers
When I try to apply accelerate I get the following error:
TypeError: Can't send the values of type <class 'int'> to device cuda:0, only of nested list/tuple/dicts of tensors or objects having a `to` method.
We don't need to send the integers to the device. Perhaps instead of raising an error here, you can simply skip the items that cannot be moved to device? Or at least give me the option to skip them if I know my data has such objects.
</issue>
<code>
[start of src/accelerate/utils.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import random
16 from enum import Enum
17 from typing import List, Optional, Union
18
19 import numpy as np
20 import torch
21
22 from .state import AcceleratorState, DistributedType, is_tpu_available
23
24
25 if is_tpu_available():
26 import torch_xla.core.xla_model as xm
27
28
29 class RNGType(Enum):
30 TORCH = "torch"
31 CUDA = "cuda"
32 XLA = "xla"
33 GENERATOR = "generator"
34
35
36 def set_seed(seed: int):
37 """
38 Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.
39
40 Args:
41 seed (:obj:`int`): The seed to set.
42 """
43 random.seed(seed)
44 np.random.seed(seed)
45 torch.manual_seed(seed)
46 torch.cuda.manual_seed_all(seed)
47 # ^^ safe to call this function even if cuda is not available
48 if is_tpu_available():
49 xm.set_rng_state(seed)
50
51
52 def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
53 # Get the proper rng state
54 if rng_type == RNGType.TORCH:
55 rng_state = torch.get_rng_state()
56 elif rng_type == RNGType.CUDA:
57 rng_state = torch.cuda.get_rng_state()
58 elif rng_type == RNGType.XLA:
59 assert is_tpu_available(), "Can't synchronize XLA seeds on an environment without TPUs."
60 rng_state = torch.tensor(xm.get_rng_state())
61 elif rng_type == RNGType.GENERATOR:
62 assert generator is not None, "Need a generator to synchronize its seed."
63 rng_state = generator.get_state()
64
65 # Broadcast the rng state from device 0 to other devices
66 state = AcceleratorState()
67 if state.distributed_type == DistributedType.TPU:
68 rng_state = xm.mesh_reduce("random_seed", rng_state, lambda x: x[0])
69 elif state.distributed_type == DistributedType.MULTI_GPU:
70 rng_state = rng_state.to(state.device)
71 torch.distributed.broadcast(rng_state, 0)
72 rng_state = rng_state.cpu()
73
74 # Set the broadcast rng state
75 if rng_type == RNGType.TORCH:
76 torch.set_rng_state(rng_state)
77 elif rng_type == RNGType.CUDA:
78 torch.cuda.set_rng_state(rng_state)
79 elif rng_type == RNGType.XLA:
80 xm.set_rng_state(rng_state.item())
81 elif rng_type == RNGType.GENERATOR:
82 generator.set_state(rng_state)
83
84
85 def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
86 for rng_type in rng_types:
87 synchronize_rng_state(RNGType(rng_type), generator=generator)
88
89
90 def send_to_device(tensor, device):
91 """
92 Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.
93
94 Args:
95 tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):
96 The data to send to a given device.
97 device (:obj:`torch.device`):
98 The device to send the data to
99
100 Returns:
101 The same data structure as :obj:`tensor` with all tensors sent to the proper device.
102 """
103 if isinstance(tensor, (list, tuple)):
104 return type(tensor)(send_to_device(t, device) for t in tensor)
105 elif isinstance(tensor, dict):
106 return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})
107 elif not hasattr(tensor, "to"):
108 raise TypeError(
109 f"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts "
110 "of tensors or objects having a `to` method."
111 )
112 return tensor.to(device)
113
114
115 def extract_model_from_parallel(model):
116 """
117 Extract a model from its distributed containers.
118
119 Args:
120 model (:obj:`torch.nn.Module`): The model to extract.
121
122 Returns:
123 :obj:`torch.nn.Module`: The extracted model.
124 """
125 while isinstance(model, (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)):
126 model = model.module
127 return model
128
129
130 def _tpu_gather(tensor, name="tensor"):
131 if isinstance(tensor, (list, tuple)):
132 return type(tensor)(_tpu_gather(t, name=f"{name}_{i}") for i, t in enumerate(tensor))
133 elif isinstance(tensor, dict):
134 return type(tensor)({k: _tpu_gather(v, name=f"{name}_{k}") for k, v in tensor.items()})
135 elif not isinstance(tensor, torch.Tensor):
136 raise TypeError(f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.")
137 return xm.mesh_reduce(name, tensor, torch.cat)
138
139
140 def _gpu_gather(tensor):
141 if isinstance(tensor, (list, tuple)):
142 return type(tensor)(_gpu_gather(t) for t in tensor)
143 elif isinstance(tensor, dict):
144 return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})
145 elif not isinstance(tensor, torch.Tensor):
146 raise TypeError(f"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.")
147 output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
148 torch.distributed.all_gather(output_tensors, tensor)
149 return torch.cat(output_tensors, dim=0)
150
151
152 def gather(tensor):
153 """
154 Recusrively gather tensor in a nested list/tuple/dictionary of tensors from all devices.
155
156 Args:
157 tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):
158 The data to gather.
159
160 Returns:
161 The same data structure as :obj:`tensor` with all tensors sent to the proper device.
162 """
163 if AcceleratorState().distributed_type == DistributedType.TPU:
164 return _tpu_gather(tensor, name="accelerate.utils.gather")
165 elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:
166 return _gpu_gather(tensor)
167 else:
168 return tensor
169
170
171 def wait_for_everyone():
172 """
173 Introduces a blocking point in the script, making sure all processes have reached this point before continuing.
174
175 Warning::
176
177 Make sure all processes will reach this instruction otherwise one of your processes will hang forever.
178 """
179 if AcceleratorState().distributed_type == DistributedType.MULTI_GPU:
180 torch.distributed.barrier()
181 elif AcceleratorState().distributed_type == DistributedType.TPU:
182 xm.rendezvous("accelerate.utils.wait_for_everyone")
183
184
185 def save(obj, f):
186 """
187 Save the data to disk. Use in place of :obj:`torch.save()`.
188
189 Args:
190 obj: The data to save
191 f: The file (or file-like object) to use to save the data
192 """
193 if AcceleratorState().distributed_type == DistributedType.TPU:
194 xm.save(obj, f)
195 elif AcceleratorState().local_process_index == 0:
196 torch.save(obj, f)
197
[end of src/accelerate/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py
--- a/src/accelerate/utils.py
+++ b/src/accelerate/utils.py
@@ -105,10 +105,7 @@
elif isinstance(tensor, dict):
return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})
elif not hasattr(tensor, "to"):
- raise TypeError(
- f"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts "
- "of tensors or objects having a `to` method."
- )
+ return tensor
return tensor.to(device)
| {"golden_diff": "diff --git a/src/accelerate/utils.py b/src/accelerate/utils.py\n--- a/src/accelerate/utils.py\n+++ b/src/accelerate/utils.py\n@@ -105,10 +105,7 @@\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n- raise TypeError(\n- f\"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts \"\n- \"of tensors or objects having a `to` method.\"\n- )\n+ return tensor\n return tensor.to(device)\n", "issue": "Can't send the values of int to device\nMy training data looks like:\r\n\r\nsrc_image, target_image, src_camera, target_camera, src_camera_idx, target_camera_idx\r\n\r\nWhere src_camera_idx, target_camera_idx are integers\r\n\r\nWhen I try to apply accelerate I get the following error:\r\nTypeError: Can't send the values of type <class 'int'> to device cuda:0, only of nested list/tuple/dicts of tensors or objects having a `to` method.\r\n\r\nWe don't need to send the integers to the device. Perhaps instead of raising an error here, you can simply skip the items that cannot be moved to device? Or at least give me the option to skip them if I know my data has such objects.\r\n\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nfrom enum import Enum\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\nclass RNGType(Enum):\n TORCH = \"torch\"\n CUDA = \"cuda\"\n XLA = \"xla\"\n GENERATOR = \"generator\"\n\n\ndef set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``.\n\n Args:\n seed (:obj:`int`): The seed to set.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n # ^^ safe to call this function even if cuda is not available\n if is_tpu_available():\n xm.set_rng_state(seed)\n\n\ndef synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):\n # Get the proper rng state\n if rng_type == RNGType.TORCH:\n rng_state = torch.get_rng_state()\n elif rng_type == RNGType.CUDA:\n rng_state = torch.cuda.get_rng_state()\n elif rng_type == RNGType.XLA:\n assert is_tpu_available(), \"Can't synchronize XLA seeds on an environment without TPUs.\"\n rng_state = torch.tensor(xm.get_rng_state())\n elif rng_type == RNGType.GENERATOR:\n assert generator is not None, \"Need a generator to synchronize its seed.\"\n rng_state = generator.get_state()\n\n # Broadcast the rng state from device 0 to other devices\n state = AcceleratorState()\n if state.distributed_type == DistributedType.TPU:\n rng_state = xm.mesh_reduce(\"random_seed\", rng_state, lambda x: x[0])\n elif state.distributed_type == DistributedType.MULTI_GPU:\n rng_state = rng_state.to(state.device)\n torch.distributed.broadcast(rng_state, 0)\n rng_state = rng_state.cpu()\n\n # Set the broadcast rng state\n if rng_type == RNGType.TORCH:\n torch.set_rng_state(rng_state)\n elif rng_type == RNGType.CUDA:\n torch.cuda.set_rng_state(rng_state)\n elif rng_type == RNGType.XLA:\n xm.set_rng_state(rng_state.item())\n elif rng_type == RNGType.GENERATOR:\n generator.set_state(rng_state)\n\n\ndef synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):\n for rng_type in rng_types:\n synchronize_rng_state(RNGType(rng_type), generator=generator)\n\n\ndef send_to_device(tensor, device):\n \"\"\"\n Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to send to a given device.\n device (:obj:`torch.device`):\n The device to send the data to\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(send_to_device(t, device) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: send_to_device(v, device) for k, v in tensor.items()})\n elif not hasattr(tensor, \"to\"):\n raise TypeError(\n f\"Can't send the values of type {type(tensor)} to device {device}, only of nested list/tuple/dicts \"\n \"of tensors or objects having a `to` method.\"\n )\n return tensor.to(device)\n\n\ndef extract_model_from_parallel(model):\n \"\"\"\n Extract a model from its distributed containers.\n\n Args:\n model (:obj:`torch.nn.Module`): The model to extract.\n\n Returns:\n :obj:`torch.nn.Module`: The extracted model.\n \"\"\"\n while isinstance(model, (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)):\n model = model.module\n return model\n\n\ndef _tpu_gather(tensor, name=\"tensor\"):\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(_tpu_gather(t, name=f\"{name}_{i}\") for i, t in enumerate(tensor))\n elif isinstance(tensor, dict):\n return type(tensor)({k: _tpu_gather(v, name=f\"{name}_{k}\") for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n return xm.mesh_reduce(name, tensor, torch.cat)\n\n\ndef _gpu_gather(tensor):\n if isinstance(tensor, (list, tuple)):\n return type(tensor)(_gpu_gather(t) for t in tensor)\n elif isinstance(tensor, dict):\n return type(tensor)({k: _gpu_gather(v) for k, v in tensor.items()})\n elif not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Can't gather the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors.\")\n output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(output_tensors, tensor)\n return torch.cat(output_tensors, dim=0)\n\n\ndef gather(tensor):\n \"\"\"\n Recusrively gather tensor in a nested list/tuple/dictionary of tensors from all devices.\n\n Args:\n tensor (nested list/tuple/dictionary of :obj:`torch.Tensor`):\n The data to gather.\n\n Returns:\n The same data structure as :obj:`tensor` with all tensors sent to the proper device.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n return _tpu_gather(tensor, name=\"accelerate.utils.gather\")\n elif AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n return _gpu_gather(tensor)\n else:\n return tensor\n\n\ndef wait_for_everyone():\n \"\"\"\n Introduces a blocking point in the script, making sure all processes have reached this point before continuing.\n\n Warning::\n\n Make sure all processes will reach this instruction otherwise one of your processes will hang forever.\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.MULTI_GPU:\n torch.distributed.barrier()\n elif AcceleratorState().distributed_type == DistributedType.TPU:\n xm.rendezvous(\"accelerate.utils.wait_for_everyone\")\n\n\ndef save(obj, f):\n \"\"\"\n Save the data to disk. Use in place of :obj:`torch.save()`.\n\n Args:\n obj: The data to save\n f: The file (or file-like object) to use to save the data\n \"\"\"\n if AcceleratorState().distributed_type == DistributedType.TPU:\n xm.save(obj, f)\n elif AcceleratorState().local_process_index == 0:\n torch.save(obj, f)\n", "path": "src/accelerate/utils.py"}]} | 2,840 | 152 |
gh_patches_debug_964 | rasdani/github-patches | git_diff | mars-project__mars-1631 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `Cannot find serializable class` raised when calling set_label
When calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`.
</issue>
<code>
[start of mars/dataframe/indexing/__init__.py]
1 # Copyright 1999-2020 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 def _install():
17 from pandas.util import cache_readonly
18 from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE
19 from .at import at
20 from .getitem import dataframe_getitem, series_getitem
21 from .iat import iat
22 from .iloc import iloc, head, tail
23 from .insert import df_insert
24 from .loc import loc
25 from .rename import df_rename, series_rename, index_rename
26 from .reset_index import df_reset_index, series_reset_index
27 from .set_index import set_index
28 from .setitem import dataframe_setitem
29 from .reindex import reindex
30 from .where import mask, where
31
32 for cls in DATAFRAME_TYPE + SERIES_TYPE:
33 setattr(cls, 'iloc', cache_readonly(iloc))
34 setattr(cls, 'loc', cache_readonly(loc))
35 setattr(cls, 'iat', cache_readonly(iat))
36 setattr(cls, 'at', cache_readonly(at))
37 setattr(cls, 'reindex', reindex)
38 setattr(cls, 'head', head)
39 setattr(cls, 'tail', tail)
40 setattr(cls, 'mask', mask)
41 setattr(cls, 'where', where)
42
43 for cls in DATAFRAME_TYPE:
44 setattr(cls, 'set_index', set_index)
45 setattr(cls, '__getitem__', dataframe_getitem)
46 setattr(cls, '__setitem__', dataframe_setitem)
47 setattr(cls, 'insert', df_insert)
48 setattr(cls, 'reset_index', df_reset_index)
49 setattr(cls, 'rename', df_rename)
50
51 for cls in SERIES_TYPE:
52 setattr(cls, '__getitem__', series_getitem)
53 setattr(cls, 'reset_index', series_reset_index)
54 setattr(cls, 'rename', series_rename)
55
56 for cls in INDEX_TYPE:
57 setattr(cls, 'rename', index_rename)
58
59
60 _install()
61 del _install
62
[end of mars/dataframe/indexing/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py
--- a/mars/dataframe/indexing/__init__.py
+++ b/mars/dataframe/indexing/__init__.py
@@ -56,6 +56,10 @@
for cls in INDEX_TYPE:
setattr(cls, 'rename', index_rename)
+ # make sure operand is registered
+ from .set_label import DataFrameSetLabel
+ del DataFrameSetLabel
+
_install()
del _install
| {"golden_diff": "diff --git a/mars/dataframe/indexing/__init__.py b/mars/dataframe/indexing/__init__.py\n--- a/mars/dataframe/indexing/__init__.py\n+++ b/mars/dataframe/indexing/__init__.py\n@@ -56,6 +56,10 @@\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n \n+ # make sure operand is registered\n+ from .set_label import DataFrameSetLabel\n+ del DataFrameSetLabel\n+\n \n _install()\n del _install\n", "issue": "[BUG] `Cannot find serializable class` raised when calling set_label\nWhen calling DataFrame.columns=xxxx in distributed environment, a KeyError is raised with message `Cannot find serializable class for type_id 1517314310`.\n", "before_files": [{"content": "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef _install():\n from pandas.util import cache_readonly\n from ..operands import DATAFRAME_TYPE, SERIES_TYPE, INDEX_TYPE\n from .at import at\n from .getitem import dataframe_getitem, series_getitem\n from .iat import iat\n from .iloc import iloc, head, tail\n from .insert import df_insert\n from .loc import loc\n from .rename import df_rename, series_rename, index_rename\n from .reset_index import df_reset_index, series_reset_index\n from .set_index import set_index\n from .setitem import dataframe_setitem\n from .reindex import reindex\n from .where import mask, where\n\n for cls in DATAFRAME_TYPE + SERIES_TYPE:\n setattr(cls, 'iloc', cache_readonly(iloc))\n setattr(cls, 'loc', cache_readonly(loc))\n setattr(cls, 'iat', cache_readonly(iat))\n setattr(cls, 'at', cache_readonly(at))\n setattr(cls, 'reindex', reindex)\n setattr(cls, 'head', head)\n setattr(cls, 'tail', tail)\n setattr(cls, 'mask', mask)\n setattr(cls, 'where', where)\n\n for cls in DATAFRAME_TYPE:\n setattr(cls, 'set_index', set_index)\n setattr(cls, '__getitem__', dataframe_getitem)\n setattr(cls, '__setitem__', dataframe_setitem)\n setattr(cls, 'insert', df_insert)\n setattr(cls, 'reset_index', df_reset_index)\n setattr(cls, 'rename', df_rename)\n\n for cls in SERIES_TYPE:\n setattr(cls, '__getitem__', series_getitem)\n setattr(cls, 'reset_index', series_reset_index)\n setattr(cls, 'rename', series_rename)\n\n for cls in INDEX_TYPE:\n setattr(cls, 'rename', index_rename)\n\n\n_install()\ndel _install\n", "path": "mars/dataframe/indexing/__init__.py"}]} | 1,250 | 119 |
gh_patches_debug_12109 | rasdani/github-patches | git_diff | Parsl__parsl-2324 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
parsl-visualize creates invalid monitoring.db SQL schema
**Describe the bug**
Hi there, and thanks for your support. I'm using HighThroughputExecutor+SlurmProvider to manage my workflow, and I sometimes encounter an unexpected OperationalError when including a MonitoringHub in my Config, which then breaks the monitoring information and also keeps the main python script hanging. The hanging behavior should be related to https://github.com/Parsl/parsl/pull/1917#discussion_r647487589. More generally, I guess that this issue is related to https://github.com/Parsl/parsl/issues/2123 (and indeed I do sometimes have to stop my scripts with ctrl-c, which may have triggered this behavior).
**To Reproduce**
Unfortunately I don't have a minimal working example to show, since (1) this behavior is not reproducible (sometimes the same script does work correctly, sometimes it doesn't), and (2) parsl only enters in a small part of a more complex script that would be useless to fully describe here. Hopefully some parts of the configuration and the logs (below) can be useful to understand what is going wrong.
To configure parsl, I have some trivial auxiliary functions that define a MonitoringHub, a HighThroughputExecutor and a SlurmProvider, for instance like this one:
```
def define_MonitoringHub(workflow_name=None):
kwargs = dict(
hub_address=address_by_hostname(),
monitoring_debug=True,
resource_monitoring_interval=30,
)
if workflow_name is not None:
kwargs["workflow_name"] = workflow_name
return MonitoringHub(**kwargs)
```
In the main script, these are called as
```
provider = define_SlurmProvider(...)
htex = define_HighThroughputExecutor(provider=provider)
monitoring = define_MonitoringHub(workflow_name="test")
config = Config(executors=[htex], monitoring=monitoring)
parsl.clear()
parsl.load(config)
```
I let my main script run, and it correctly executes a set of `python_app`'s through SLURM (I see them running with `squeue`, and I verify that the output is correct). After all apps executed, the main script remains hanging, and if I check the logs I see in `runinfo/000/database_manager.log` that the script got stuck in an OperationalError loop:
```
$ tail -n 8 runinfo/000/database_manager.log
2022-05-30 10:34:02.599 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked
[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]
[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
2022-05-30 10:34:03.604 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked
[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]
[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]
(Background on this error at: http://sqlalche.me/e/13/e3q8)
```
where it just prints this error message once per second. I attach the full `database_manager.log`, `monitoring_router.log` and `parsl.log`. You see that `parsl.log` is missing the final "DFK cleanup complete" cleanup line (which is present for cases that run smoothly).
[database_manager.log](https://github.com/Parsl/parsl/files/8798140/database_manager.log)
[monitoring_router.log](https://github.com/Parsl/parsl/files/8798141/monitoring_router.log)
[parsl.log](https://github.com/Parsl/parsl/files/8798142/parsl.log)
After this situation takes place, re-running the same script typically leads to the same behavior, even if I remove `monitoring.db` and the `runinfo` folder (which is puzzling.. is there a state of the DFK which somehow remains active after I kill the python script?), although sometimes I recover the expected correct behavior.
Meanwhile, these "wrong" runs cannot be accessed through `parsl-visualize` (when I select a workflow in the web interface, I either get "500 Internal server error" or "Workflow 903cd6fa-cda7-416d-9ada-bdeb09a030d2 does not have any resource usage records. ") , but this is not surprising given that the database is probably corrupted somewhere.
**Expected behavior**
I expected no OperationalError, as the `task_time_invoked` column should be present.
**Environment**
- OS: OS: Ubuntu 16
- Python version: 3.8.13
- Parsl version: 1.2.0
**Distributed Environment**
- Where are you running the Parsl script from ? Login node
- Where do you need the workers to run ? Compute nodes
(apologies for the non-reproducible issue description, I'll keep trying to make the unexpected behavior more robust)
</issue>
<code>
[start of parsl/monitoring/visualization/models.py]
1 from flask_sqlalchemy import SQLAlchemy
2
3
4 WORKFLOW = 'workflow' # Workflow table includes workflow metadata
5 TASK = 'task' # Task table includes task metadata
6 STATUS = 'status' # Status table includes task status
7 RESOURCE = 'resource' # Resource table includes task resource utilization
8 NODE = 'node' # Node table include node info
9
10 db = SQLAlchemy()
11
12
13 class Workflow(db.Model):
14 __tablename__ = WORKFLOW
15 run_id = db.Column(db.Text, nullable=False, primary_key=True)
16 workflow_name = db.Column(db.Text, nullable=True)
17 workflow_version = db.Column(db.Text, nullable=True)
18 time_began = db.Column(db.DateTime, nullable=False) # Why not date?
19 time_completed = db.Column(db.DateTime)
20 host = db.Column(db.Text, nullable=False)
21 user = db.Column(db.Text, nullable=False)
22 rundir = db.Column(db.Text, nullable=False)
23 tasks_failed_count = db.Column(db.Integer, nullable=False)
24 tasks_completed_count = db.Column(db.Integer, nullable=False)
25
26
27 class Node(db.Model):
28 __tablename__ = NODE
29 id = db.Column('id', db.Integer, nullable=False, primary_key=True, autoincrement=True)
30 run_id = db.Column('run_id', db.Text, nullable=False)
31 hostname = db.Column('hostname', db.Text, nullable=False)
32 cpu_count = db.Column('cpu_count', db.Integer, nullable=False)
33 total_memory = db.Column('total_memory', db.Integer, nullable=False)
34 active = db.Column('active', db.Boolean, nullable=False)
35 worker_count = db.Column('worker_count', db.Integer, nullable=False)
36 python_v = db.Column('python_v', db.Text, nullable=False)
37 reg_time = db.Column('reg_time', db.DateTime, nullable=False)
38
39
40 # TODO: expand to full set of info
41 class Status(db.Model):
42 __tablename__ = STATUS
43 task_id = db.Column(db.Integer, db.ForeignKey(
44 'task.task_id'), nullable=False)
45 task_status_name = db.Column(db.Text, nullable=False)
46 timestamp = db.Column(db.DateTime, nullable=False)
47 run_id = db.Column(db.Text, db.ForeignKey(
48 'workflow.run_id'), nullable=False)
49 __table_args__ = (
50 db.PrimaryKeyConstraint('task_id', 'run_id',
51 'task_status_name', 'timestamp'),
52 )
53
54
55 class Task(db.Model):
56 __tablename__ = TASK
57 task_id = db.Column('task_id', db.Integer, nullable=False)
58 run_id = db.Column('run_id', db.Text, nullable=False)
59 task_func_name = db.Column('task_func_name', db.Text, nullable=False)
60 task_depends = db.Column('task_depends', db.Text, nullable=True)
61 task_time_returned = db.Column(
62 'task_time_returned', db.DateTime, nullable=True)
63 task_memoize = db.Column('task_memoize', db.Text, nullable=False)
64 task_inputs = db.Column('task_inputs', db.Text, nullable=True)
65 task_outputs = db.Column('task_outputs', db.Text, nullable=True)
66 task_stdin = db.Column('task_stdin', db.Text, nullable=True)
67 task_stdout = db.Column('task_stdout', db.Text, nullable=True)
68 task_stderr = db.Column('task_stderr', db.Text, nullable=True)
69 __table_args__ = (
70 db.PrimaryKeyConstraint('task_id', 'run_id'),
71 )
72
73
74 class Resource(db.Model):
75 __tablename__ = RESOURCE
76 task_id = db.Column('task_id', db.Integer, db.ForeignKey(
77 'task.task_id'), nullable=False)
78 timestamp = db.Column('timestamp', db.DateTime, nullable=False)
79 run_id = db.Column('run_id', db.Text, db.ForeignKey(
80 'workflow.run_id'), nullable=False)
81 resource_monitoring_interval = db.Column(
82 'resource_monitoring_interval', db.Float, nullable=True)
83 psutil_process_pid = db.Column(
84 'psutil_process_pid', db.Integer, nullable=True)
85 psutil_process_memory_percent = db.Column(
86 'psutil_process_memory_percent', db.Float, nullable=True)
87 psutil_process_children_count = db.Column(
88 'psutil_process_children_count', db.Float, nullable=True)
89 psutil_process_time_user = db.Column(
90 'psutil_process_time_user', db.Float, nullable=True)
91 psutil_process_time_system = db.Column(
92 'psutil_process_time_system', db.Float, nullable=True)
93 psutil_process_memory_virtual = db.Column(
94 'psutil_process_memory_virtual', db.Float, nullable=True)
95 psutil_process_memory_resident = db.Column(
96 'psutil_process_memory_resident', db.Float, nullable=True)
97 psutil_process_disk_read = db.Column(
98 'psutil_process_disk_read', db.Float, nullable=True)
99 psutil_process_disk_write = db.Column(
100 'psutil_process_disk_write', db.Float, nullable=True)
101 psutil_process_status = db.Column(
102 'psutil_process_status', db.Text, nullable=True)
103 __table_args__ = (
104 db.PrimaryKeyConstraint('task_id', 'run_id', 'timestamp'),)
105
[end of parsl/monitoring/visualization/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsl/monitoring/visualization/models.py b/parsl/monitoring/visualization/models.py
--- a/parsl/monitoring/visualization/models.py
+++ b/parsl/monitoring/visualization/models.py
@@ -58,6 +58,8 @@
run_id = db.Column('run_id', db.Text, nullable=False)
task_func_name = db.Column('task_func_name', db.Text, nullable=False)
task_depends = db.Column('task_depends', db.Text, nullable=True)
+ task_time_invoked = db.Column(
+ 'task_time_invoked', db.DateTime, nullable=True)
task_time_returned = db.Column(
'task_time_returned', db.DateTime, nullable=True)
task_memoize = db.Column('task_memoize', db.Text, nullable=False)
| {"golden_diff": "diff --git a/parsl/monitoring/visualization/models.py b/parsl/monitoring/visualization/models.py\n--- a/parsl/monitoring/visualization/models.py\n+++ b/parsl/monitoring/visualization/models.py\n@@ -58,6 +58,8 @@\n run_id = db.Column('run_id', db.Text, nullable=False)\n task_func_name = db.Column('task_func_name', db.Text, nullable=False)\n task_depends = db.Column('task_depends', db.Text, nullable=True)\n+ task_time_invoked = db.Column(\n+ 'task_time_invoked', db.DateTime, nullable=True)\n task_time_returned = db.Column(\n 'task_time_returned', db.DateTime, nullable=True)\n task_memoize = db.Column('task_memoize', db.Text, nullable=False)\n", "issue": "parsl-visualize creates invalid monitoring.db SQL schema\n**Describe the bug**\r\nHi there, and thanks for your support. I'm using HighThroughputExecutor+SlurmProvider to manage my workflow, and I sometimes encounter an unexpected OperationalError when including a MonitoringHub in my Config, which then breaks the monitoring information and also keeps the main python script hanging. The hanging behavior should be related to https://github.com/Parsl/parsl/pull/1917#discussion_r647487589. More generally, I guess that this issue is related to https://github.com/Parsl/parsl/issues/2123 (and indeed I do sometimes have to stop my scripts with ctrl-c, which may have triggered this behavior).\r\n\r\n**To Reproduce**\r\nUnfortunately I don't have a minimal working example to show, since (1) this behavior is not reproducible (sometimes the same script does work correctly, sometimes it doesn't), and (2) parsl only enters in a small part of a more complex script that would be useless to fully describe here. Hopefully some parts of the configuration and the logs (below) can be useful to understand what is going wrong.\r\n\r\n\r\nTo configure parsl, I have some trivial auxiliary functions that define a MonitoringHub, a HighThroughputExecutor and a SlurmProvider, for instance like this one:\r\n```\r\ndef define_MonitoringHub(workflow_name=None):\r\n kwargs = dict(\r\n hub_address=address_by_hostname(),\r\n monitoring_debug=True,\r\n resource_monitoring_interval=30,\r\n )\r\n if workflow_name is not None:\r\n kwargs[\"workflow_name\"] = workflow_name\r\n return MonitoringHub(**kwargs)\r\n```\r\nIn the main script, these are called as\r\n```\r\n provider = define_SlurmProvider(...)\r\n htex = define_HighThroughputExecutor(provider=provider)\r\n monitoring = define_MonitoringHub(workflow_name=\"test\")\r\n config = Config(executors=[htex], monitoring=monitoring)\r\n parsl.clear()\r\n parsl.load(config)\r\n```\r\n\r\nI let my main script run, and it correctly executes a set of `python_app`'s through SLURM (I see them running with `squeue`, and I verify that the output is correct). After all apps executed, the main script remains hanging, and if I check the logs I see in `runinfo/000/database_manager.log` that the script got stuck in an OperationalError loop:\r\n```\r\n$ tail -n 8 runinfo/000/database_manager.log \r\n2022-05-30 10:34:02.599 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked\r\n[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]\r\n[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]\r\n(Background on this error at: http://sqlalche.me/e/13/e3q8)\r\n2022-05-30 10:34:03.604 database_manager:597 [WARNING] [MainThread 140641839281920] Got a database OperationalError. Ignoring and retrying on the assumption that it is recoverable: (sqlite3.OperationalError) table task has no column named task_time_invoked\r\n[SQL: INSERT INTO task (task_id, run_id, task_depends, task_func_name, task_memoize, task_inputs, task_outputs, task_time_invoked, task_fail_count, task_fail_cost) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)]\r\n[parameters: (0, '903cd6fa-cda7-416d-9ada-bdeb09a030d2', '', 'app_create_zarr_structure', False, 'None', 'None', '2022-05-30 10:28:28.462702', 0, 0.0)]\r\n(Background on this error at: http://sqlalche.me/e/13/e3q8)\r\n```\r\nwhere it just prints this error message once per second. I attach the full `database_manager.log`, `monitoring_router.log` and `parsl.log`. You see that `parsl.log` is missing the final \"DFK cleanup complete\" cleanup line (which is present for cases that run smoothly).\r\n\r\n[database_manager.log](https://github.com/Parsl/parsl/files/8798140/database_manager.log)\r\n[monitoring_router.log](https://github.com/Parsl/parsl/files/8798141/monitoring_router.log)\r\n[parsl.log](https://github.com/Parsl/parsl/files/8798142/parsl.log)\r\n\r\n\r\n\r\nAfter this situation takes place, re-running the same script typically leads to the same behavior, even if I remove `monitoring.db` and the `runinfo` folder (which is puzzling.. is there a state of the DFK which somehow remains active after I kill the python script?), although sometimes I recover the expected correct behavior.\r\nMeanwhile, these \"wrong\" runs cannot be accessed through `parsl-visualize` (when I select a workflow in the web interface, I either get \"500 Internal server error\" or \"Workflow 903cd6fa-cda7-416d-9ada-bdeb09a030d2 does not have any resource usage records. \") , but this is not surprising given that the database is probably corrupted somewhere.\r\n\r\n\r\n**Expected behavior**\r\nI expected no OperationalError, as the `task_time_invoked` column should be present.\r\n\r\n**Environment**\r\n - OS: OS: Ubuntu 16\r\n - Python version: 3.8.13\r\n - Parsl version: 1.2.0\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? Login node\r\n- Where do you need the workers to run ? Compute nodes\r\n\r\n(apologies for the non-reproducible issue description, I'll keep trying to make the unexpected behavior more robust)\n", "before_files": [{"content": "from flask_sqlalchemy import SQLAlchemy\n\n\nWORKFLOW = 'workflow' # Workflow table includes workflow metadata\nTASK = 'task' # Task table includes task metadata\nSTATUS = 'status' # Status table includes task status\nRESOURCE = 'resource' # Resource table includes task resource utilization\nNODE = 'node' # Node table include node info\n\ndb = SQLAlchemy()\n\n\nclass Workflow(db.Model):\n __tablename__ = WORKFLOW\n run_id = db.Column(db.Text, nullable=False, primary_key=True)\n workflow_name = db.Column(db.Text, nullable=True)\n workflow_version = db.Column(db.Text, nullable=True)\n time_began = db.Column(db.DateTime, nullable=False) # Why not date?\n time_completed = db.Column(db.DateTime)\n host = db.Column(db.Text, nullable=False)\n user = db.Column(db.Text, nullable=False)\n rundir = db.Column(db.Text, nullable=False)\n tasks_failed_count = db.Column(db.Integer, nullable=False)\n tasks_completed_count = db.Column(db.Integer, nullable=False)\n\n\nclass Node(db.Model):\n __tablename__ = NODE\n id = db.Column('id', db.Integer, nullable=False, primary_key=True, autoincrement=True)\n run_id = db.Column('run_id', db.Text, nullable=False)\n hostname = db.Column('hostname', db.Text, nullable=False)\n cpu_count = db.Column('cpu_count', db.Integer, nullable=False)\n total_memory = db.Column('total_memory', db.Integer, nullable=False)\n active = db.Column('active', db.Boolean, nullable=False)\n worker_count = db.Column('worker_count', db.Integer, nullable=False)\n python_v = db.Column('python_v', db.Text, nullable=False)\n reg_time = db.Column('reg_time', db.DateTime, nullable=False)\n\n\n# TODO: expand to full set of info\nclass Status(db.Model):\n __tablename__ = STATUS\n task_id = db.Column(db.Integer, db.ForeignKey(\n 'task.task_id'), nullable=False)\n task_status_name = db.Column(db.Text, nullable=False)\n timestamp = db.Column(db.DateTime, nullable=False)\n run_id = db.Column(db.Text, db.ForeignKey(\n 'workflow.run_id'), nullable=False)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id',\n 'task_status_name', 'timestamp'),\n )\n\n\nclass Task(db.Model):\n __tablename__ = TASK\n task_id = db.Column('task_id', db.Integer, nullable=False)\n run_id = db.Column('run_id', db.Text, nullable=False)\n task_func_name = db.Column('task_func_name', db.Text, nullable=False)\n task_depends = db.Column('task_depends', db.Text, nullable=True)\n task_time_returned = db.Column(\n 'task_time_returned', db.DateTime, nullable=True)\n task_memoize = db.Column('task_memoize', db.Text, nullable=False)\n task_inputs = db.Column('task_inputs', db.Text, nullable=True)\n task_outputs = db.Column('task_outputs', db.Text, nullable=True)\n task_stdin = db.Column('task_stdin', db.Text, nullable=True)\n task_stdout = db.Column('task_stdout', db.Text, nullable=True)\n task_stderr = db.Column('task_stderr', db.Text, nullable=True)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id'),\n )\n\n\nclass Resource(db.Model):\n __tablename__ = RESOURCE\n task_id = db.Column('task_id', db.Integer, db.ForeignKey(\n 'task.task_id'), nullable=False)\n timestamp = db.Column('timestamp', db.DateTime, nullable=False)\n run_id = db.Column('run_id', db.Text, db.ForeignKey(\n 'workflow.run_id'), nullable=False)\n resource_monitoring_interval = db.Column(\n 'resource_monitoring_interval', db.Float, nullable=True)\n psutil_process_pid = db.Column(\n 'psutil_process_pid', db.Integer, nullable=True)\n psutil_process_memory_percent = db.Column(\n 'psutil_process_memory_percent', db.Float, nullable=True)\n psutil_process_children_count = db.Column(\n 'psutil_process_children_count', db.Float, nullable=True)\n psutil_process_time_user = db.Column(\n 'psutil_process_time_user', db.Float, nullable=True)\n psutil_process_time_system = db.Column(\n 'psutil_process_time_system', db.Float, nullable=True)\n psutil_process_memory_virtual = db.Column(\n 'psutil_process_memory_virtual', db.Float, nullable=True)\n psutil_process_memory_resident = db.Column(\n 'psutil_process_memory_resident', db.Float, nullable=True)\n psutil_process_disk_read = db.Column(\n 'psutil_process_disk_read', db.Float, nullable=True)\n psutil_process_disk_write = db.Column(\n 'psutil_process_disk_write', db.Float, nullable=True)\n psutil_process_status = db.Column(\n 'psutil_process_status', db.Text, nullable=True)\n __table_args__ = (\n db.PrimaryKeyConstraint('task_id', 'run_id', 'timestamp'),)\n", "path": "parsl/monitoring/visualization/models.py"}]} | 3,322 | 182 |
gh_patches_debug_60446 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-471 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update lasso example
**Description**
The lasso example references heat.ml.regression.lasso
**To Reproduce**
Steps to reproduce the behavior:
1. Which module/class/function is affected?
examples/lasso/demo.py
2. What are the circumstances under which the bug appears?
run demo.py
3. What is the exact error-message/errorous behavious?
`Traceback (most recent call last):
File "demo.py", line 10, in <module>
import heat.ml.regression.lasso as lasso
ModuleNotFoundError: No module named 'heat.ml'`
**Expected behavior**
Should import heat.core.regression.lasso
**Version Info**
Which version are you using?
0.2.1
Update lasso example
**Description**
The lasso example references heat.ml.regression.lasso
**To Reproduce**
Steps to reproduce the behavior:
1. Which module/class/function is affected?
examples/lasso/demo.py
2. What are the circumstances under which the bug appears?
run demo.py
3. What is the exact error-message/errorous behavious?
`Traceback (most recent call last):
File "demo.py", line 10, in <module>
import heat.ml.regression.lasso as lasso
ModuleNotFoundError: No module named 'heat.ml'`
**Expected behavior**
Should import heat.core.regression.lasso
**Version Info**
Which version are you using?
0.2.1
</issue>
<code>
[start of examples/lasso/demo.py]
1 import numpy as np
2 import torch
3 import sys
4
5 sys.path.append("../../")
6
7 import heat as ht
8 from matplotlib import pyplot as plt
9 from sklearn import datasets
10 import heat.ml.regression.lasso as lasso
11 import plotfkt
12
13 # read scikit diabetes data set
14 diabetes = datasets.load_diabetes()
15
16 # load diabetes dataset from hdf5 file
17 X = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="x", split=0)
18 y = ht.load_hdf5("../../heat/datasets/data/diabetes.h5", dataset="y", split=0)
19
20 # normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved
21 X = X / ht.sqrt((ht.mean(X ** 2, axis=0)))
22
23 # HeAT lasso instance
24 estimator = lasso.HeatLasso(max_iter=100)
25
26 # List lasso model parameters
27 theta_list = list()
28
29 # Range of lambda values
30 lamda = np.logspace(0, 4, 10) / 10
31
32 # compute the lasso path
33 for l in lamda:
34 estimator.lam = l
35 estimator.fit(X, y)
36 theta_list.append(estimator.theta.numpy().flatten())
37
38 # Stack estimated model parameters into one numpy array
39 theta_lasso = np.stack(theta_list).T
40
41 # Stack into numpy array
42 theta_lasso = np.stack(theta_list).T[1:, :]
43
44
45 # plot lasso paths
46 plt.subplot(3, 1, 1)
47 plotfkt.plot_lasso_path(
48 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - HeAT implementation"
49 )
50
51 if X.is_distributed():
52 distributed = X.comm.rank
53 else:
54 distributed = False
55
56 # Now the same stuff in numpy
57 X = diabetes.data.astype("float32")
58 y = diabetes.target.astype("float32")
59
60 m, _ = X.shape
61 X = np.concatenate((np.ones((m, 1)).astype("float32"), X), axis=1)
62
63 # normalize dataset
64 X = X / np.sqrt((np.mean(X ** 2, axis=0)))
65
66 # Numpy lasso instance
67 estimator = lasso.NumpyLasso(max_iter=100)
68
69 # List lasso model parameters
70 theta_list = list()
71
72 # Range of lambda values
73 lamda = np.logspace(0, 4, 10) / 10
74
75 # compute the lasso path
76 for l in lamda:
77 estimator.lam = l
78 estimator.fit(X, y)
79 theta_list.append(estimator.theta.flatten())
80
81 # Stack estimated model parameters into one numpy array
82 theta_lasso = np.stack(theta_list).T
83
84 # Stack into numpy array
85 theta_lasso = np.stack(theta_list).T[1:, :]
86
87 # plot lasso paths
88 plt.subplot(3, 1, 2)
89 plotfkt.plot_lasso_path(
90 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - Numpy implementation"
91 )
92
93 # Now the same stuff again in PyTorch
94 X = torch.tensor(X)
95 y = torch.tensor(y)
96
97 # HeAT lasso instance
98 estimator = lasso.PytorchLasso(max_iter=100)
99
100 # List lasso model parameters
101 theta_list = list()
102
103 # Range of lambda values
104 lamda = np.logspace(0, 4, 10) / 10
105
106 # compute the lasso path
107 for l in lamda:
108 estimator.lam = l
109 estimator.fit(X, y)
110 theta_list.append(estimator.theta.numpy().flatten())
111
112 # Stack estimated model parameters into one numpy array
113 theta_lasso = np.stack(theta_list).T
114
115 # Stack into numpy array
116 theta_lasso = np.stack(theta_list).T[1:, :]
117
118 # plot lasso paths
119 plt.subplot(3, 1, 3)
120 plotfkt.plot_lasso_path(
121 lamda, theta_lasso, diabetes.feature_names, title="Lasso Paths - PyTorch implementation"
122 )
123
124 # plot only with first rank
125 if distributed is False:
126 plt.show()
127 elif distributed == 0:
128 plt.show()
129
[end of examples/lasso/demo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/lasso/demo.py b/examples/lasso/demo.py
--- a/examples/lasso/demo.py
+++ b/examples/lasso/demo.py
@@ -7,7 +7,7 @@
import heat as ht
from matplotlib import pyplot as plt
from sklearn import datasets
-import heat.ml.regression.lasso as lasso
+import heat.core.regression.lasso as lasso
import plotfkt
# read scikit diabetes data set
| {"golden_diff": "diff --git a/examples/lasso/demo.py b/examples/lasso/demo.py\n--- a/examples/lasso/demo.py\n+++ b/examples/lasso/demo.py\n@@ -7,7 +7,7 @@\n import heat as ht\n from matplotlib import pyplot as plt\n from sklearn import datasets\n-import heat.ml.regression.lasso as lasso\n+import heat.core.regression.lasso as lasso\n import plotfkt\n \n # read scikit diabetes data set\n", "issue": "Update lasso example\n**Description**\r\nThe lasso example references heat.ml.regression.lasso\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Which module/class/function is affected?\r\n examples/lasso/demo.py\r\n2. What are the circumstances under which the bug appears?\r\n run demo.py\r\n3. What is the exact error-message/errorous behavious?\r\n `Traceback (most recent call last):\r\n File \"demo.py\", line 10, in <module>\r\n import heat.ml.regression.lasso as lasso\r\nModuleNotFoundError: No module named 'heat.ml'`\r\n\r\n**Expected behavior**\r\nShould import heat.core.regression.lasso\r\n\r\n**Version Info**\r\n Which version are you using?\r\n0.2.1\r\n\nUpdate lasso example\n**Description**\r\nThe lasso example references heat.ml.regression.lasso\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Which module/class/function is affected?\r\n examples/lasso/demo.py\r\n2. What are the circumstances under which the bug appears?\r\n run demo.py\r\n3. What is the exact error-message/errorous behavious?\r\n `Traceback (most recent call last):\r\n File \"demo.py\", line 10, in <module>\r\n import heat.ml.regression.lasso as lasso\r\nModuleNotFoundError: No module named 'heat.ml'`\r\n\r\n**Expected behavior**\r\nShould import heat.core.regression.lasso\r\n\r\n**Version Info**\r\n Which version are you using?\r\n0.2.1\r\n\n", "before_files": [{"content": "import numpy as np\nimport torch\nimport sys\n\nsys.path.append(\"../../\")\n\nimport heat as ht\nfrom matplotlib import pyplot as plt\nfrom sklearn import datasets\nimport heat.ml.regression.lasso as lasso\nimport plotfkt\n\n# read scikit diabetes data set\ndiabetes = datasets.load_diabetes()\n\n# load diabetes dataset from hdf5 file\nX = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"x\", split=0)\ny = ht.load_hdf5(\"../../heat/datasets/data/diabetes.h5\", dataset=\"y\", split=0)\n\n# normalize dataset #DoTO this goes into the lasso fit routine soon as issue #106 is solved\nX = X / ht.sqrt((ht.mean(X ** 2, axis=0)))\n\n# HeAT lasso instance\nestimator = lasso.HeatLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n\n# plot lasso paths\nplt.subplot(3, 1, 1)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - HeAT implementation\"\n)\n\nif X.is_distributed():\n distributed = X.comm.rank\nelse:\n distributed = False\n\n# Now the same stuff in numpy\nX = diabetes.data.astype(\"float32\")\ny = diabetes.target.astype(\"float32\")\n\nm, _ = X.shape\nX = np.concatenate((np.ones((m, 1)).astype(\"float32\"), X), axis=1)\n\n# normalize dataset\nX = X / np.sqrt((np.mean(X ** 2, axis=0)))\n\n# Numpy lasso instance\nestimator = lasso.NumpyLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplt.subplot(3, 1, 2)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - Numpy implementation\"\n)\n\n# Now the same stuff again in PyTorch\nX = torch.tensor(X)\ny = torch.tensor(y)\n\n# HeAT lasso instance\nestimator = lasso.PytorchLasso(max_iter=100)\n\n# List lasso model parameters\ntheta_list = list()\n\n# Range of lambda values\nlamda = np.logspace(0, 4, 10) / 10\n\n# compute the lasso path\nfor l in lamda:\n estimator.lam = l\n estimator.fit(X, y)\n theta_list.append(estimator.theta.numpy().flatten())\n\n# Stack estimated model parameters into one numpy array\ntheta_lasso = np.stack(theta_list).T\n\n# Stack into numpy array\ntheta_lasso = np.stack(theta_list).T[1:, :]\n\n# plot lasso paths\nplt.subplot(3, 1, 3)\nplotfkt.plot_lasso_path(\n lamda, theta_lasso, diabetes.feature_names, title=\"Lasso Paths - PyTorch implementation\"\n)\n\n# plot only with first rank\nif distributed is False:\n plt.show()\nelif distributed == 0:\n plt.show()\n", "path": "examples/lasso/demo.py"}]} | 2,067 | 101 |
gh_patches_debug_9807 | rasdani/github-patches | git_diff | Mailu__Mailu-2404 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cant setup forward to short-named email address.
### Environment
- docker-compose
### Versions
```
docker ps -a | grep mailu/admin
f14b60868ade mailu/admin:1.9 "/bin/sh -c /start.py" 7 weeks ago Up 7 weeks (healthy) 80/tcp mailu-admin-1
```
## Description
User have email-address like [email protected] . I cant setup forward to this email via WebUI or CLI
## Replication Steps
Setup forward for user to email like [email protected]
</issue>
<code>
[start of core/admin/mailu/ui/forms.py]
1 from wtforms import validators, fields, widgets
2 from wtforms_components import fields as fields_
3 from flask_babel import lazy_gettext as _
4
5 import flask_login
6 import flask_wtf
7 import re
8
9 LOCALPART_REGEX = "^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*$"
10
11 class DestinationField(fields.SelectMultipleField):
12 """ Allow for multiple emails selection from current user choices and
13 additional email addresses.
14 """
15
16 validator = re.compile(r'^.+@([^.@][^@]+)$', re.IGNORECASE)
17
18 def iter_choices(self):
19 managed = [
20 str(email)
21 for email in flask_login.current_user.get_managed_emails()
22 ]
23 for email in managed:
24 selected = self.data is not None and self.coerce(email) in self.data
25 yield (email, email, selected)
26 for email in self.data or ():
27 if email not in managed:
28 yield (email, email, True)
29
30 def pre_validate(self, form):
31 for item in self.data:
32 if not self.validator.match(item):
33 raise validators.ValidationError(_('Invalid email address.'))
34
35 class MultipleEmailAddressesVerify(object):
36 def __init__(self,message=_('Invalid email address.')):
37 self.message = message
38
39 def __call__(self, form, field):
40 pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$')
41 if not pattern.match(field.data.replace(" ", "")):
42 raise validators.ValidationError(self.message)
43
44 class ConfirmationForm(flask_wtf.FlaskForm):
45 submit = fields.SubmitField(_('Confirm'))
46
47 class DomainForm(flask_wtf.FlaskForm):
48 name = fields.StringField(_('Domain name'), [validators.DataRequired()])
49 max_users = fields_.IntegerField(_('Maximum user count'), [validators.NumberRange(min=-1)], default=10)
50 max_aliases = fields_.IntegerField(_('Maximum alias count'), [validators.NumberRange(min=-1)], default=10)
51 max_quota_bytes = fields_.IntegerSliderField(_('Maximum user quota'), default=0)
52 signup_enabled = fields.BooleanField(_('Enable sign-up'), default=False)
53 comment = fields.StringField(_('Comment'))
54 submit = fields.SubmitField(_('Save'))
55
56
57 class DomainSignupForm(flask_wtf.FlaskForm):
58 name = fields.StringField(_('Domain name'), [validators.DataRequired()])
59 localpart = fields.StringField(_('Initial admin'), [validators.DataRequired()])
60 pw = fields.PasswordField(_('Admin password'), [validators.DataRequired()])
61 pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])
62 captcha = flask_wtf.RecaptchaField()
63 submit = fields.SubmitField(_('Create'))
64
65
66 class AlternativeForm(flask_wtf.FlaskForm):
67 name = fields.StringField(_('Alternative name'), [validators.DataRequired()])
68 submit = fields.SubmitField(_('Save'))
69
70
71 class RelayForm(flask_wtf.FlaskForm):
72 name = fields.StringField(_('Relayed domain name'), [validators.DataRequired()])
73 smtp = fields.StringField(_('Remote host'))
74 comment = fields.StringField(_('Comment'))
75 submit = fields.SubmitField(_('Save'))
76
77
78 class UserForm(flask_wtf.FlaskForm):
79 localpart = fields.StringField(_('E-mail'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])
80 pw = fields.PasswordField(_('Password'))
81 pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])
82 quota_bytes = fields_.IntegerSliderField(_('Quota'), default=10**9)
83 enable_imap = fields.BooleanField(_('Allow IMAP access'), default=True)
84 enable_pop = fields.BooleanField(_('Allow POP3 access'), default=True)
85 displayed_name = fields.StringField(_('Displayed name'))
86 comment = fields.StringField(_('Comment'))
87 enabled = fields.BooleanField(_('Enabled'), default=True)
88 submit = fields.SubmitField(_('Save'))
89
90
91 class UserSignupForm(flask_wtf.FlaskForm):
92 localpart = fields.StringField(_('Email address'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])
93 pw = fields.PasswordField(_('Password'), [validators.DataRequired()])
94 pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])
95 submit = fields.SubmitField(_('Sign up'))
96
97 class UserSignupFormCaptcha(UserSignupForm):
98 captcha = flask_wtf.RecaptchaField()
99
100 class UserSettingsForm(flask_wtf.FlaskForm):
101 displayed_name = fields.StringField(_('Displayed name'))
102 spam_enabled = fields.BooleanField(_('Enable spam filter'))
103 spam_mark_as_read = fields.BooleanField(_('Enable marking spam mails as read'))
104 spam_threshold = fields_.IntegerSliderField(_('Spam filter tolerance'))
105 forward_enabled = fields.BooleanField(_('Enable forwarding'))
106 forward_keep = fields.BooleanField(_('Keep a copy of the emails'))
107 forward_destination = fields.StringField(_('Destination'), [validators.Optional(), MultipleEmailAddressesVerify()])
108 submit = fields.SubmitField(_('Save settings'))
109
110
111 class UserPasswordForm(flask_wtf.FlaskForm):
112 pw = fields.PasswordField(_('Password'), [validators.DataRequired()])
113 pw2 = fields.PasswordField(_('Password check'), [validators.DataRequired()])
114 submit = fields.SubmitField(_('Update password'))
115
116
117 class UserReplyForm(flask_wtf.FlaskForm):
118 reply_enabled = fields.BooleanField(_('Enable automatic reply'))
119 reply_subject = fields.StringField(_('Reply subject'))
120 reply_body = fields.StringField(_('Reply body'),
121 widget=widgets.TextArea())
122 reply_startdate = fields.html5.DateField(_('Start of vacation'))
123 reply_enddate = fields.html5.DateField(_('End of vacation'))
124 submit = fields.SubmitField(_('Update'))
125
126
127 class TokenForm(flask_wtf.FlaskForm):
128 displayed_password = fields.StringField(
129 _('Your token (write it down, as it will never be displayed again)')
130 )
131 raw_password = fields.HiddenField([validators.DataRequired()])
132 comment = fields.StringField(_('Comment'))
133 ip = fields.StringField(
134 _('Authorized IP'), [validators.Optional(), validators.IPAddress(ipv6=True)]
135 )
136 submit = fields.SubmitField(_('Save'))
137
138
139 class AliasForm(flask_wtf.FlaskForm):
140 localpart = fields.StringField(_('Alias'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])
141 wildcard = fields.BooleanField(
142 _('Use SQL LIKE Syntax (e.g. for catch-all aliases)'))
143 destination = DestinationField(_('Destination'))
144 comment = fields.StringField(_('Comment'))
145 submit = fields.SubmitField(_('Save'))
146
147
148 class AdminForm(flask_wtf.FlaskForm):
149 admin = fields.SelectField(_('Admin email'), choices=[])
150 submit = fields.SubmitField(_('Submit'))
151
152
153 class ManagerForm(flask_wtf.FlaskForm):
154 manager = fields.SelectField(_('Manager email'))
155 submit = fields.SubmitField(_('Submit'))
156
157
158 class FetchForm(flask_wtf.FlaskForm):
159 protocol = fields.SelectField(_('Protocol'), choices=[
160 ('imap', 'IMAP'), ('pop3', 'POP3')
161 ])
162 host = fields.StringField(_('Hostname or IP'), [validators.DataRequired()])
163 port = fields.IntegerField(_('TCP port'), [validators.DataRequired(), validators.NumberRange(min=0, max=65535)])
164 tls = fields.BooleanField(_('Enable TLS'))
165 username = fields.StringField(_('Username'), [validators.DataRequired()])
166 password = fields.PasswordField(_('Password'))
167 keep = fields.BooleanField(_('Keep emails on the server'))
168 submit = fields.SubmitField(_('Submit'))
169
170
171 class AnnouncementForm(flask_wtf.FlaskForm):
172 announcement_subject = fields.StringField(_('Announcement subject'),
173 [validators.DataRequired()])
174 announcement_body = fields.StringField(_('Announcement body'),
175 [validators.DataRequired()], widget=widgets.TextArea())
176 submit = fields.SubmitField(_('Send'))
177
[end of core/admin/mailu/ui/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py
--- a/core/admin/mailu/ui/forms.py
+++ b/core/admin/mailu/ui/forms.py
@@ -37,7 +37,7 @@
self.message = message
def __call__(self, form, field):
- pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{2,}\.)*([a-z]{2,}))*$')
+ pattern = re.compile(r'^([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{1,})(,([_a-z0-9\-]+)(\.[_a-z0-9\-]+)*@([a-z0-9\-]{1,}\.)*([a-z]{2,}))*$')
if not pattern.match(field.data.replace(" ", "")):
raise validators.ValidationError(self.message)
| {"golden_diff": "diff --git a/core/admin/mailu/ui/forms.py b/core/admin/mailu/ui/forms.py\n--- a/core/admin/mailu/ui/forms.py\n+++ b/core/admin/mailu/ui/forms.py\n@@ -37,7 +37,7 @@\n self.message = message\n \n def __call__(self, form, field):\n- pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,}))*$')\n+ pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{1,}\\.)*([a-z]{1,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{1,}\\.)*([a-z]{2,}))*$')\n if not pattern.match(field.data.replace(\" \", \"\")):\n raise validators.ValidationError(self.message)\n", "issue": "Cant setup forward to short-named email address. \n### Environment\r\n - docker-compose\r\n\r\n\r\n### Versions\r\n```\r\ndocker ps -a | grep mailu/admin\r\nf14b60868ade mailu/admin:1.9 \"/bin/sh -c /start.py\" 7 weeks ago Up 7 weeks (healthy) 80/tcp mailu-admin-1\r\n```\r\n\r\n## Description\r\nUser have email-address like [email protected] . I cant setup forward to this email via WebUI or CLI\r\n\r\n## Replication Steps\r\nSetup forward for user to email like [email protected] \r\n\n", "before_files": [{"content": "from wtforms import validators, fields, widgets\nfrom wtforms_components import fields as fields_\nfrom flask_babel import lazy_gettext as _\n\nimport flask_login\nimport flask_wtf\nimport re\n\nLOCALPART_REGEX = \"^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-zA-Z0-9!#$%&'*+/=?^_`{|}~-]+)*$\"\n\nclass DestinationField(fields.SelectMultipleField):\n \"\"\" Allow for multiple emails selection from current user choices and\n additional email addresses.\n \"\"\"\n\n validator = re.compile(r'^.+@([^.@][^@]+)$', re.IGNORECASE)\n\n def iter_choices(self):\n managed = [\n str(email)\n for email in flask_login.current_user.get_managed_emails()\n ]\n for email in managed:\n selected = self.data is not None and self.coerce(email) in self.data\n yield (email, email, selected)\n for email in self.data or ():\n if email not in managed:\n yield (email, email, True)\n\n def pre_validate(self, form):\n for item in self.data:\n if not self.validator.match(item):\n raise validators.ValidationError(_('Invalid email address.'))\n\nclass MultipleEmailAddressesVerify(object):\n def __init__(self,message=_('Invalid email address.')):\n self.message = message\n\n def __call__(self, form, field):\n pattern = re.compile(r'^([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,})(,([_a-z0-9\\-]+)(\\.[_a-z0-9\\-]+)*@([a-z0-9\\-]{2,}\\.)*([a-z]{2,}))*$')\n if not pattern.match(field.data.replace(\" \", \"\")):\n raise validators.ValidationError(self.message)\n\nclass ConfirmationForm(flask_wtf.FlaskForm):\n submit = fields.SubmitField(_('Confirm'))\n\nclass DomainForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Domain name'), [validators.DataRequired()])\n max_users = fields_.IntegerField(_('Maximum user count'), [validators.NumberRange(min=-1)], default=10)\n max_aliases = fields_.IntegerField(_('Maximum alias count'), [validators.NumberRange(min=-1)], default=10)\n max_quota_bytes = fields_.IntegerSliderField(_('Maximum user quota'), default=0)\n signup_enabled = fields.BooleanField(_('Enable sign-up'), default=False)\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass DomainSignupForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Domain name'), [validators.DataRequired()])\n localpart = fields.StringField(_('Initial admin'), [validators.DataRequired()])\n pw = fields.PasswordField(_('Admin password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n captcha = flask_wtf.RecaptchaField()\n submit = fields.SubmitField(_('Create'))\n\n\nclass AlternativeForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Alternative name'), [validators.DataRequired()])\n submit = fields.SubmitField(_('Save'))\n\n\nclass RelayForm(flask_wtf.FlaskForm):\n name = fields.StringField(_('Relayed domain name'), [validators.DataRequired()])\n smtp = fields.StringField(_('Remote host'))\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass UserForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('E-mail'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n pw = fields.PasswordField(_('Password'))\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n quota_bytes = fields_.IntegerSliderField(_('Quota'), default=10**9)\n enable_imap = fields.BooleanField(_('Allow IMAP access'), default=True)\n enable_pop = fields.BooleanField(_('Allow POP3 access'), default=True)\n displayed_name = fields.StringField(_('Displayed name'))\n comment = fields.StringField(_('Comment'))\n enabled = fields.BooleanField(_('Enabled'), default=True)\n submit = fields.SubmitField(_('Save'))\n\n\nclass UserSignupForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('Email address'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n pw = fields.PasswordField(_('Password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Confirm password'), [validators.EqualTo('pw')])\n submit = fields.SubmitField(_('Sign up'))\n\nclass UserSignupFormCaptcha(UserSignupForm):\n captcha = flask_wtf.RecaptchaField()\n\nclass UserSettingsForm(flask_wtf.FlaskForm):\n displayed_name = fields.StringField(_('Displayed name'))\n spam_enabled = fields.BooleanField(_('Enable spam filter'))\n spam_mark_as_read = fields.BooleanField(_('Enable marking spam mails as read'))\n spam_threshold = fields_.IntegerSliderField(_('Spam filter tolerance'))\n forward_enabled = fields.BooleanField(_('Enable forwarding'))\n forward_keep = fields.BooleanField(_('Keep a copy of the emails'))\n forward_destination = fields.StringField(_('Destination'), [validators.Optional(), MultipleEmailAddressesVerify()])\n submit = fields.SubmitField(_('Save settings'))\n\n\nclass UserPasswordForm(flask_wtf.FlaskForm):\n pw = fields.PasswordField(_('Password'), [validators.DataRequired()])\n pw2 = fields.PasswordField(_('Password check'), [validators.DataRequired()])\n submit = fields.SubmitField(_('Update password'))\n\n\nclass UserReplyForm(flask_wtf.FlaskForm):\n reply_enabled = fields.BooleanField(_('Enable automatic reply'))\n reply_subject = fields.StringField(_('Reply subject'))\n reply_body = fields.StringField(_('Reply body'),\n widget=widgets.TextArea())\n reply_startdate = fields.html5.DateField(_('Start of vacation'))\n reply_enddate = fields.html5.DateField(_('End of vacation'))\n submit = fields.SubmitField(_('Update'))\n\n\nclass TokenForm(flask_wtf.FlaskForm):\n displayed_password = fields.StringField(\n _('Your token (write it down, as it will never be displayed again)')\n )\n raw_password = fields.HiddenField([validators.DataRequired()])\n comment = fields.StringField(_('Comment'))\n ip = fields.StringField(\n _('Authorized IP'), [validators.Optional(), validators.IPAddress(ipv6=True)]\n )\n submit = fields.SubmitField(_('Save'))\n\n\nclass AliasForm(flask_wtf.FlaskForm):\n localpart = fields.StringField(_('Alias'), [validators.DataRequired(), validators.Regexp(LOCALPART_REGEX)])\n wildcard = fields.BooleanField(\n _('Use SQL LIKE Syntax (e.g. for catch-all aliases)'))\n destination = DestinationField(_('Destination'))\n comment = fields.StringField(_('Comment'))\n submit = fields.SubmitField(_('Save'))\n\n\nclass AdminForm(flask_wtf.FlaskForm):\n admin = fields.SelectField(_('Admin email'), choices=[])\n submit = fields.SubmitField(_('Submit'))\n\n\nclass ManagerForm(flask_wtf.FlaskForm):\n manager = fields.SelectField(_('Manager email'))\n submit = fields.SubmitField(_('Submit'))\n\n\nclass FetchForm(flask_wtf.FlaskForm):\n protocol = fields.SelectField(_('Protocol'), choices=[\n ('imap', 'IMAP'), ('pop3', 'POP3')\n ])\n host = fields.StringField(_('Hostname or IP'), [validators.DataRequired()])\n port = fields.IntegerField(_('TCP port'), [validators.DataRequired(), validators.NumberRange(min=0, max=65535)])\n tls = fields.BooleanField(_('Enable TLS'))\n username = fields.StringField(_('Username'), [validators.DataRequired()])\n password = fields.PasswordField(_('Password'))\n keep = fields.BooleanField(_('Keep emails on the server'))\n submit = fields.SubmitField(_('Submit'))\n\n\nclass AnnouncementForm(flask_wtf.FlaskForm):\n announcement_subject = fields.StringField(_('Announcement subject'),\n [validators.DataRequired()])\n announcement_body = fields.StringField(_('Announcement body'),\n [validators.DataRequired()], widget=widgets.TextArea())\n submit = fields.SubmitField(_('Send'))\n", "path": "core/admin/mailu/ui/forms.py"}]} | 2,826 | 280 |
gh_patches_debug_18150 | rasdani/github-patches | git_diff | sunpy__sunpy-4846 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix CROTA in EUVI map source
e.g.
```python
from sunpy.map import Map
smap = Map('https://stereo-ssc.nascom.nasa.gov/pub/beacon/ahead/secchi/img/euvi/20210107/20210107_001615_n7euA.fts')
print(smap.wcs)
```
raises
```
WARNING: FITSFixedWarning: CROTA = -6.39331135705
keyword looks very much like CROTAn but isn't. [astropy.wcs.wcs]
```
It would be good to fix this in the EUVI map source.
</issue>
<code>
[start of sunpy/map/sources/stereo.py]
1 """STEREO Map subclass definitions"""
2
3 __author__ = "Keith Hughitt"
4 __email__ = "[email protected]"
5
6
7 import astropy.units as u
8 from astropy.visualization import PowerStretch
9 from astropy.visualization.mpl_normalize import ImageNormalize
10
11 from sunpy.map import GenericMap
12 from sunpy.map.sources.source_type import source_stretch
13
14 __all__ = ['EUVIMap', 'CORMap', 'HIMap']
15
16
17 class EUVIMap(GenericMap):
18 """STEREO-SECCHI EUVI Image Map
19
20 EUVI is an extreme ultraviolet (EUV) imager. Part of the STEREO-SECCHI
21 suite it observes the Sun from 1 to 1.7 solar radii. It is capable of
22 observing at 304 (He II), 171 (Fe IX), 195 (Fe XII), and 284 (Fe XV)
23 Angstroms.
24
25 References
26 ----------
27 * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_
28 * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_
29 * `Instrument Page <http://secchi.lmsal.com/EUVI/>`_
30 """
31
32 def __init__(self, data, header, **kwargs):
33
34 GenericMap.__init__(self, data, header, **kwargs)
35 self._nickname = "{}-{}".format(self.detector, self.observatory[-1])
36 self.plot_settings['cmap'] = 'sohoeit{wl:d}'.format(wl=int(self.wavelength.value))
37 self.plot_settings['norm'] = ImageNormalize(
38 stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)
39 self.meta['waveunit'] = 'Angstrom'
40
41 # Try to identify when the FITS meta data does not have the correct
42 # date FITS keyword
43 if ('date_obs' in self.meta) and not('date-obs' in self.meta):
44 self.meta['date-obs'] = self.meta['date_obs']
45
46 @property
47 def rsun_arcseconds(self):
48 """
49 Radius of the sun in arcseconds.
50
51 References
52 ----------
53 https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf
54 """
55 return self.meta.get('rsun', None)
56
57 @property
58 def rsun_obs(self):
59 """
60 Radius of the sun in arcseconds as a quantity.
61
62 References
63 ----------
64 https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf
65 """
66 rsun_arcseconds = self.meta.get('rsun', None)
67
68 if rsun_arcseconds is None:
69 rsun_arcseconds = super().rsun_obs
70
71 return u.Quantity(rsun_arcseconds, 'arcsec')
72
73 @classmethod
74 def is_datasource_for(cls, data, header, **kwargs):
75 """Determines if header corresponds to an EUVI image"""
76 return header.get('detector') == 'EUVI'
77
78
79 class CORMap(GenericMap):
80 """STEREO-SECCHI CORonograph Image Map.
81
82 Part of the STEREO-SECCHI suite of remote sensing telescopes,
83 COR is a set of two coronographs (COR1, COR2) onboard STEREO.
84 They are both traditional Lyot coronagraphs.
85
86 The COR1 detectors observes from 1.3 to 4 solar radii while the
87 COR2 detectors observe a range from 2 to 15 solar radii.
88
89 References
90 ----------
91 * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_
92 * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_
93 * `COR1 Instrument Page <https://cor1.gsfc.nasa.gov>`_
94 * `COR2 Instrument Page <http://secchi.nrl.navy.mil/index.php?p=cor2>`_
95 * `COR1 User Guide <https://cor1.gsfc.nasa.gov/guide/>`_
96 """
97
98 def __init__(self, data, header, **kwargs):
99
100 GenericMap.__init__(self, data, header, **kwargs)
101
102 self._nickname = "{}-{}".format(self.detector, self.observatory[-1])
103 self.plot_settings['cmap'] = 'stereocor{det!s}'.format(det=self.detector[-1])
104 self.plot_settings['norm'] = ImageNormalize(
105 stretch=source_stretch(self.meta, PowerStretch(0.5)), clip=False)
106
107 # Try to identify when the FITS meta data does not have the correct
108 # date FITS keyword
109 if ('date_obs' in self.meta) and not('date-obs' in self.meta):
110 self.meta['date-obs'] = self.meta['date_obs']
111
112 @property
113 def measurement(self):
114 """
115 Returns the type of data observed.
116 """
117 # TODO: This needs to do more than white-light. Should give B, pB, etc.
118 return "white-light"
119
120 @classmethod
121 def is_datasource_for(cls, data, header, **kwargs):
122 """Determines if header corresponds to an COR image"""
123 return str(header.get('detector', '')).startswith('COR')
124
125
126 class HIMap(GenericMap):
127 """STEREO-SECCHI Heliospheric Imager (HI) Map.
128
129 The HI is a wide-angle visible-light imaging system
130 for the detection of coronal mass ejection (CME) events
131 in interplanetary space and, in particular, of events
132 directed towards the Earth.
133
134 The Heliospheric imager consists of two instruments, the HI-1 and HI-2.
135 The HI1 observes from 15-80 solar radii while HI2 observes from 80-215
136 solar radii.
137
138 References
139 ----------
140 * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_
141 * `STEREO SECCHI <https://secchi.nrl.navy.mil>`_
142 * `HI Instrument Page <http://www.stereo.rl.ac.uk>`_
143 """
144
145 def __init__(self, data, header, **kwargs):
146
147 GenericMap.__init__(self, data, header, **kwargs)
148 self._nickname = "{}-{}".format(self.detector, self.observatory[-1])
149 self.plot_settings['cmap'] = 'stereohi{det!s}'.format(det=self.detector[-1])
150 self.plot_settings['norm'] = ImageNormalize(
151 stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)
152
153 # Try to identify when the FITS meta data does not have the correct
154 # date FITS keyword
155 if ('date_obs' in self.meta) and not('date-obs' in self.meta):
156 self.meta['date-obs'] = self.meta['date_obs']
157
158 @property
159 def measurement(self):
160 """
161 Returns the type of data observed.
162 """
163 # TODO: This needs to do more than white-light. Should give B, pB, etc.
164 return "white-light"
165
166 @classmethod
167 def is_datasource_for(cls, data, header, **kwargs):
168 """Determines if header corresponds to an COR image"""
169 return str(header.get('detector', '')).startswith('HI')
170
[end of sunpy/map/sources/stereo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/map/sources/stereo.py b/sunpy/map/sources/stereo.py
--- a/sunpy/map/sources/stereo.py
+++ b/sunpy/map/sources/stereo.py
@@ -8,6 +8,7 @@
from astropy.visualization import PowerStretch
from astropy.visualization.mpl_normalize import ImageNormalize
+from sunpy import log
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
@@ -42,6 +43,10 @@
# date FITS keyword
if ('date_obs' in self.meta) and not('date-obs' in self.meta):
self.meta['date-obs'] = self.meta['date_obs']
+ # fix CROTA to CROTAn
+ if "crota" in self.meta and "crota2" not in self.meta:
+ log.debug("EUVIMap: Changing the CROTA keyword to CROTA2")
+ self.meta["crota2"] = self.meta.pop("crota")
@property
def rsun_arcseconds(self):
| {"golden_diff": "diff --git a/sunpy/map/sources/stereo.py b/sunpy/map/sources/stereo.py\n--- a/sunpy/map/sources/stereo.py\n+++ b/sunpy/map/sources/stereo.py\n@@ -8,6 +8,7 @@\n from astropy.visualization import PowerStretch\n from astropy.visualization.mpl_normalize import ImageNormalize\n \n+from sunpy import log\n from sunpy.map import GenericMap\n from sunpy.map.sources.source_type import source_stretch\n \n@@ -42,6 +43,10 @@\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n+ # fix CROTA to CROTAn\n+ if \"crota\" in self.meta and \"crota2\" not in self.meta:\n+ log.debug(\"EUVIMap: Changing the CROTA keyword to CROTA2\")\n+ self.meta[\"crota2\"] = self.meta.pop(\"crota\")\n \n @property\n def rsun_arcseconds(self):\n", "issue": "Fix CROTA in EUVI map source\ne.g. \r\n```python\r\nfrom sunpy.map import Map\r\n\r\nsmap = Map('https://stereo-ssc.nascom.nasa.gov/pub/beacon/ahead/secchi/img/euvi/20210107/20210107_001615_n7euA.fts')\r\nprint(smap.wcs)\r\n```\r\nraises\r\n```\r\nWARNING: FITSFixedWarning: CROTA = -6.39331135705 \r\nkeyword looks very much like CROTAn but isn't. [astropy.wcs.wcs]\r\n```\r\nIt would be good to fix this in the EUVI map source.\n", "before_files": [{"content": "\"\"\"STEREO Map subclass definitions\"\"\"\n\n__author__ = \"Keith Hughitt\"\n__email__ = \"[email protected]\"\n\n\nimport astropy.units as u\nfrom astropy.visualization import PowerStretch\nfrom astropy.visualization.mpl_normalize import ImageNormalize\n\nfrom sunpy.map import GenericMap\nfrom sunpy.map.sources.source_type import source_stretch\n\n__all__ = ['EUVIMap', 'CORMap', 'HIMap']\n\n\nclass EUVIMap(GenericMap):\n \"\"\"STEREO-SECCHI EUVI Image Map\n\n EUVI is an extreme ultraviolet (EUV) imager. Part of the STEREO-SECCHI\n suite it observes the Sun from 1 to 1.7 solar radii. It is capable of\n observing at 304 (He II), 171 (Fe IX), 195 (Fe XII), and 284 (Fe XV)\n Angstroms.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_\n * `Instrument Page <http://secchi.lmsal.com/EUVI/>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'sohoeit{wl:d}'.format(wl=int(self.wavelength.value))\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)\n self.meta['waveunit'] = 'Angstrom'\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def rsun_arcseconds(self):\n \"\"\"\n Radius of the sun in arcseconds.\n\n References\n ----------\n https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf\n \"\"\"\n return self.meta.get('rsun', None)\n\n @property\n def rsun_obs(self):\n \"\"\"\n Radius of the sun in arcseconds as a quantity.\n\n References\n ----------\n https://sohowww.nascom.nasa.gov/solarsoft/stereo/secchi/doc/FITS_keywords.pdf\n \"\"\"\n rsun_arcseconds = self.meta.get('rsun', None)\n\n if rsun_arcseconds is None:\n rsun_arcseconds = super().rsun_obs\n\n return u.Quantity(rsun_arcseconds, 'arcsec')\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an EUVI image\"\"\"\n return header.get('detector') == 'EUVI'\n\n\nclass CORMap(GenericMap):\n \"\"\"STEREO-SECCHI CORonograph Image Map.\n\n Part of the STEREO-SECCHI suite of remote sensing telescopes,\n COR is a set of two coronographs (COR1, COR2) onboard STEREO.\n They are both traditional Lyot coronagraphs.\n\n The COR1 detectors observes from 1.3 to 4 solar radii while the\n COR2 detectors observe a range from 2 to 15 solar radii.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <http://secchi.nrl.navy.mil>`_\n * `COR1 Instrument Page <https://cor1.gsfc.nasa.gov>`_\n * `COR2 Instrument Page <http://secchi.nrl.navy.mil/index.php?p=cor2>`_\n * `COR1 User Guide <https://cor1.gsfc.nasa.gov/guide/>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'stereocor{det!s}'.format(det=self.detector[-1])\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.5)), clip=False)\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def measurement(self):\n \"\"\"\n Returns the type of data observed.\n \"\"\"\n # TODO: This needs to do more than white-light. Should give B, pB, etc.\n return \"white-light\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an COR image\"\"\"\n return str(header.get('detector', '')).startswith('COR')\n\n\nclass HIMap(GenericMap):\n \"\"\"STEREO-SECCHI Heliospheric Imager (HI) Map.\n\n The HI is a wide-angle visible-light imaging system\n for the detection of coronal mass ejection (CME) events\n in interplanetary space and, in particular, of events\n directed towards the Earth.\n\n The Heliospheric imager consists of two instruments, the HI-1 and HI-2.\n The HI1 observes from 15-80 solar radii while HI2 observes from 80-215\n solar radii.\n\n References\n ----------\n * `STEREO Mission Page <https://stereo.gsfc.nasa.gov/>`_\n * `STEREO SECCHI <https://secchi.nrl.navy.mil>`_\n * `HI Instrument Page <http://www.stereo.rl.ac.uk>`_\n \"\"\"\n\n def __init__(self, data, header, **kwargs):\n\n GenericMap.__init__(self, data, header, **kwargs)\n self._nickname = \"{}-{}\".format(self.detector, self.observatory[-1])\n self.plot_settings['cmap'] = 'stereohi{det!s}'.format(det=self.detector[-1])\n self.plot_settings['norm'] = ImageNormalize(\n stretch=source_stretch(self.meta, PowerStretch(0.25)), clip=False)\n\n # Try to identify when the FITS meta data does not have the correct\n # date FITS keyword\n if ('date_obs' in self.meta) and not('date-obs' in self.meta):\n self.meta['date-obs'] = self.meta['date_obs']\n\n @property\n def measurement(self):\n \"\"\"\n Returns the type of data observed.\n \"\"\"\n # TODO: This needs to do more than white-light. Should give B, pB, etc.\n return \"white-light\"\n\n @classmethod\n def is_datasource_for(cls, data, header, **kwargs):\n \"\"\"Determines if header corresponds to an COR image\"\"\"\n return str(header.get('detector', '')).startswith('HI')\n", "path": "sunpy/map/sources/stereo.py"}]} | 2,741 | 241 |
gh_patches_debug_18021 | rasdani/github-patches | git_diff | dj-stripe__dj-stripe-348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django 1.10 support
https://github.com/kavdev/dj-stripe/blob/master/djstripe/__init__.py#L19
`'1.10'` is less than `'1.7.x'`
</issue>
<code>
[start of djstripe/__init__.py]
1 from __future__ import unicode_literals
2 import warnings
3
4 from django import get_version as get_django_version
5
6 __title__ = "dj-stripe"
7 __summary__ = "Django + Stripe Made Easy"
8 __uri__ = "https://github.com/kavdev/dj-stripe/"
9
10 __version__ = "0.9.0.dev0"
11
12 __author__ = "Alexander Kavanaugh"
13 __email__ = "[email protected]"
14
15 __license__ = "BSD"
16 __license__ = "License :: OSI Approved :: BSD License"
17 __copyright__ = "Copyright 2016 Alexander Kavanaugh"
18
19 if get_django_version() <= '1.7.x':
20 msg = "dj-stripe deprecation notice: Django 1.7 and lower are no longer\n" \
21 "supported. Please upgrade to Django 1.8 or higher.\n" \
22 "Reference: https://github.com/kavdev/dj-stripe/issues/275"
23 warnings.warn(msg)
24
[end of djstripe/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/djstripe/__init__.py b/djstripe/__init__.py
--- a/djstripe/__init__.py
+++ b/djstripe/__init__.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
import warnings
-from django import get_version as get_django_version
+from django import VERSION as django_version
__title__ = "dj-stripe"
__summary__ = "Django + Stripe Made Easy"
@@ -16,7 +16,7 @@
__license__ = "License :: OSI Approved :: BSD License"
__copyright__ = "Copyright 2016 Alexander Kavanaugh"
-if get_django_version() <= '1.7.x':
+if django_version < (1, 8):
msg = "dj-stripe deprecation notice: Django 1.7 and lower are no longer\n" \
"supported. Please upgrade to Django 1.8 or higher.\n" \
"Reference: https://github.com/kavdev/dj-stripe/issues/275"
| {"golden_diff": "diff --git a/djstripe/__init__.py b/djstripe/__init__.py\n--- a/djstripe/__init__.py\n+++ b/djstripe/__init__.py\n@@ -1,7 +1,7 @@\n from __future__ import unicode_literals\n import warnings\n \n-from django import get_version as get_django_version\n+from django import VERSION as django_version\n \n __title__ = \"dj-stripe\"\n __summary__ = \"Django + Stripe Made Easy\"\n@@ -16,7 +16,7 @@\n __license__ = \"License :: OSI Approved :: BSD License\"\n __copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n \n-if get_django_version() <= '1.7.x':\n+if django_version < (1, 8):\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n", "issue": "Django 1.10 support\nhttps://github.com/kavdev/dj-stripe/blob/master/djstripe/__init__.py#L19\n\n`'1.10'` is less than `'1.7.x'`\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nimport warnings\n\nfrom django import get_version as get_django_version\n\n__title__ = \"dj-stripe\"\n__summary__ = \"Django + Stripe Made Easy\"\n__uri__ = \"https://github.com/kavdev/dj-stripe/\"\n\n__version__ = \"0.9.0.dev0\"\n\n__author__ = \"Alexander Kavanaugh\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD\"\n__license__ = \"License :: OSI Approved :: BSD License\"\n__copyright__ = \"Copyright 2016 Alexander Kavanaugh\"\n\nif get_django_version() <= '1.7.x':\n msg = \"dj-stripe deprecation notice: Django 1.7 and lower are no longer\\n\" \\\n \"supported. Please upgrade to Django 1.8 or higher.\\n\" \\\n \"Reference: https://github.com/kavdev/dj-stripe/issues/275\"\n warnings.warn(msg)\n", "path": "djstripe/__init__.py"}]} | 843 | 229 |
gh_patches_debug_4821 | rasdani/github-patches | git_diff | numpy__numpy-13306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Indicate attempted allocation size when constructing an array gives a MemoryError
I've seen multiple questions now of the form "why does this give a MemoryError" when the reason is the user ended up allocating a bigger array than expected.
It would be helpful to include the shape and dtype in the error message.
</issue>
<code>
[start of numpy/core/_exceptions.py]
1 """
2 Various richly-typed exceptions, that also help us deal with string formatting
3 in python where it's easier.
4
5 By putting the formatting in `__str__`, we also avoid paying the cost for
6 users who silence the exceptions.
7 """
8 from numpy.core.overrides import set_module
9
10 def _unpack_tuple(tup):
11 if len(tup) == 1:
12 return tup[0]
13 else:
14 return tup
15
16
17 def _display_as_base(cls):
18 """
19 A decorator that makes an exception class look like its base.
20
21 We use this to hide subclasses that are implementation details - the user
22 should catch the base type, which is what the traceback will show them.
23
24 Classes decorated with this decorator are subject to removal without a
25 deprecation warning.
26 """
27 assert issubclass(cls, Exception)
28 cls.__name__ = cls.__base__.__name__
29 cls.__qualname__ = cls.__base__.__qualname__
30 return cls
31
32
33 class UFuncTypeError(TypeError):
34 """ Base class for all ufunc exceptions """
35 def __init__(self, ufunc):
36 self.ufunc = ufunc
37
38
39 @_display_as_base
40 class _UFuncNoLoopError(UFuncTypeError):
41 """ Thrown when a ufunc loop cannot be found """
42 def __init__(self, ufunc, dtypes):
43 super().__init__(ufunc)
44 self.dtypes = tuple(dtypes)
45
46 def __str__(self):
47 return (
48 "ufunc {!r} did not contain a loop with signature matching types "
49 "{!r} -> {!r}"
50 ).format(
51 self.ufunc.__name__,
52 _unpack_tuple(self.dtypes[:self.ufunc.nin]),
53 _unpack_tuple(self.dtypes[self.ufunc.nin:])
54 )
55
56
57 @_display_as_base
58 class _UFuncCastingError(UFuncTypeError):
59 def __init__(self, ufunc, casting, from_, to):
60 super().__init__(ufunc)
61 self.casting = casting
62 self.from_ = from_
63 self.to = to
64
65
66 @_display_as_base
67 class _UFuncInputCastingError(_UFuncCastingError):
68 """ Thrown when a ufunc input cannot be casted """
69 def __init__(self, ufunc, casting, from_, to, i):
70 super().__init__(ufunc, casting, from_, to)
71 self.in_i = i
72
73 def __str__(self):
74 # only show the number if more than one input exists
75 i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
76 return (
77 "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
78 "rule {!r}"
79 ).format(
80 self.ufunc.__name__, i_str, self.from_, self.to, self.casting
81 )
82
83
84 @_display_as_base
85 class _UFuncOutputCastingError(_UFuncCastingError):
86 """ Thrown when a ufunc output cannot be casted """
87 def __init__(self, ufunc, casting, from_, to, i):
88 super().__init__(ufunc, casting, from_, to)
89 self.out_i = i
90
91 def __str__(self):
92 # only show the number if more than one output exists
93 i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
94 return (
95 "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
96 "rule {!r}"
97 ).format(
98 self.ufunc.__name__, i_str, self.from_, self.to, self.casting
99 )
100
101
102 # Exception used in shares_memory()
103 @set_module('numpy')
104 class TooHardError(RuntimeError):
105 pass
106
107
108 @set_module('numpy')
109 class AxisError(ValueError, IndexError):
110 """ Axis supplied was invalid. """
111 def __init__(self, axis, ndim=None, msg_prefix=None):
112 # single-argument form just delegates to base class
113 if ndim is None and msg_prefix is None:
114 msg = axis
115
116 # do the string formatting here, to save work in the C code
117 else:
118 msg = ("axis {} is out of bounds for array of dimension {}"
119 .format(axis, ndim))
120 if msg_prefix is not None:
121 msg = "{}: {}".format(msg_prefix, msg)
122
123 super(AxisError, self).__init__(msg)
124
[end of numpy/core/_exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py
--- a/numpy/core/_exceptions.py
+++ b/numpy/core/_exceptions.py
@@ -121,3 +121,15 @@
msg = "{}: {}".format(msg_prefix, msg)
super(AxisError, self).__init__(msg)
+
+
+@_display_as_base
+class _ArrayMemoryError(MemoryError):
+ """ Thrown when an array cannot be allocated"""
+ def __init__(self, shape, dtype):
+ self.shape = shape
+ self.dtype = dtype
+
+ def __str__(self):
+ return "Unable to allocate array with shape {} and data type {}".format(self.shape, self.dtype)
+
| {"golden_diff": "diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py\n--- a/numpy/core/_exceptions.py\n+++ b/numpy/core/_exceptions.py\n@@ -121,3 +121,15 @@\n msg = \"{}: {}\".format(msg_prefix, msg)\n \n super(AxisError, self).__init__(msg)\n+\n+\n+@_display_as_base\n+class _ArrayMemoryError(MemoryError):\n+ \"\"\" Thrown when an array cannot be allocated\"\"\"\n+ def __init__(self, shape, dtype):\n+ self.shape = shape\n+ self.dtype = dtype\n+\n+ def __str__(self):\n+ return \"Unable to allocate array with shape {} and data type {}\".format(self.shape, self.dtype)\n+\n", "issue": "Indicate attempted allocation size when constructing an array gives a MemoryError\nI've seen multiple questions now of the form \"why does this give a MemoryError\" when the reason is the user ended up allocating a bigger array than expected.\r\n\r\nIt would be helpful to include the shape and dtype in the error message.\n", "before_files": [{"content": "\"\"\"\nVarious richly-typed exceptions, that also help us deal with string formatting\nin python where it's easier.\n\nBy putting the formatting in `__str__`, we also avoid paying the cost for\nusers who silence the exceptions.\n\"\"\"\nfrom numpy.core.overrides import set_module\n\ndef _unpack_tuple(tup):\n if len(tup) == 1:\n return tup[0]\n else:\n return tup\n\n\ndef _display_as_base(cls):\n \"\"\"\n A decorator that makes an exception class look like its base.\n\n We use this to hide subclasses that are implementation details - the user\n should catch the base type, which is what the traceback will show them.\n\n Classes decorated with this decorator are subject to removal without a\n deprecation warning.\n \"\"\"\n assert issubclass(cls, Exception)\n cls.__name__ = cls.__base__.__name__\n cls.__qualname__ = cls.__base__.__qualname__\n return cls\n\n\nclass UFuncTypeError(TypeError):\n \"\"\" Base class for all ufunc exceptions \"\"\"\n def __init__(self, ufunc):\n self.ufunc = ufunc\n\n\n@_display_as_base\nclass _UFuncNoLoopError(UFuncTypeError):\n \"\"\" Thrown when a ufunc loop cannot be found \"\"\"\n def __init__(self, ufunc, dtypes):\n super().__init__(ufunc)\n self.dtypes = tuple(dtypes)\n\n def __str__(self):\n return (\n \"ufunc {!r} did not contain a loop with signature matching types \"\n \"{!r} -> {!r}\"\n ).format(\n self.ufunc.__name__,\n _unpack_tuple(self.dtypes[:self.ufunc.nin]),\n _unpack_tuple(self.dtypes[self.ufunc.nin:])\n )\n\n\n@_display_as_base\nclass _UFuncCastingError(UFuncTypeError):\n def __init__(self, ufunc, casting, from_, to):\n super().__init__(ufunc)\n self.casting = casting\n self.from_ = from_\n self.to = to\n\n\n@_display_as_base\nclass _UFuncInputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc input cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.in_i = i\n\n def __str__(self):\n # only show the number if more than one input exists\n i_str = \"{} \".format(self.in_i) if self.ufunc.nin != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n@_display_as_base\nclass _UFuncOutputCastingError(_UFuncCastingError):\n \"\"\" Thrown when a ufunc output cannot be casted \"\"\"\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.out_i = i\n\n def __str__(self):\n # only show the number if more than one output exists\n i_str = \"{} \".format(self.out_i) if self.ufunc.nout != 1 else \"\"\n return (\n \"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting \"\n \"rule {!r}\"\n ).format(\n self.ufunc.__name__, i_str, self.from_, self.to, self.casting\n )\n\n\n# Exception used in shares_memory()\n@set_module('numpy')\nclass TooHardError(RuntimeError):\n pass\n\n\n@set_module('numpy')\nclass AxisError(ValueError, IndexError):\n \"\"\" Axis supplied was invalid. \"\"\"\n def __init__(self, axis, ndim=None, msg_prefix=None):\n # single-argument form just delegates to base class\n if ndim is None and msg_prefix is None:\n msg = axis\n\n # do the string formatting here, to save work in the C code\n else:\n msg = (\"axis {} is out of bounds for array of dimension {}\"\n .format(axis, ndim))\n if msg_prefix is not None:\n msg = \"{}: {}\".format(msg_prefix, msg)\n\n super(AxisError, self).__init__(msg)\n", "path": "numpy/core/_exceptions.py"}]} | 1,823 | 165 |
gh_patches_debug_30870 | rasdani/github-patches | git_diff | ManimCommunity__manim-1368 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Version checker throws an error when manim is run without internet
## Description of bug / unexpected behavior
Title.
## Expected behavior
Version checker should just log that the version can't be checked, and not produce a traceback.
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
Paste your code here.
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
<details><summary>Images/GIFs</summary>
<!-- PASTE MEDIA HERE -->
</details>
## Logs
<details><summary>Terminal output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
C:\Users\Daryl\manim-dev\manim>manim -p test.py
Manim Community v0.4.0
1: Become
2: BecomePoints
3: BecomePointsScene
4: BecomeStyle
5: BecomeStyle2
6: Image
Choose number corresponding to desired scene/arguments.
(Use comma separated list for multiple entries)
Choice(s): 6
[04/19/21 10:55:41] INFO scene_file_writer.py:585
File ready at C:\Users\Daryl\manim-dev\manim\media\images\test\Ima
ge_ManimCE_v0.4.0.png
INFO Rendered Image scene.py:199
Played 0 animations
INFO Previewed File at: file_ops.py:98
C:\Users\Daryl\manim-dev\manim\media\images\test\Image_ManimCE_v0.4.0.png
Traceback (most recent call last):
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\Scripts\manim", line 5, in <module>
main()
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 782, in main
rv = self.invoke(ctx)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\core.py", line 610, in invoke
return callback(*args, **kwargs)
File "C:\Users\Daryl\AppData\Local\pypoetry\Cache\virtualenvs\manim-WZ_QM4hs-py3.9\lib\site-packages\click\decorators.py", line 21, in new_func
return f(get_current_context(), *args, **kwargs)
File "C:\Users\Daryl\manim-dev\manim\manim\cli\render\commands.py", line 166, in render
stable = req_info.json()["info"]["version"]
AttributeError: 'dict' object has no attribute 'json'
```
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):
- RAM:
- Python version (`python/py/python3 --version`):
- Installed modules (provide output from `pip list`):
```
PASTE HERE
```
</details>
<details><summary>LaTeX details</summary>
+ LaTeX distribution (e.g. TeX Live 2020):
+ Installed LaTeX packages:
<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->
</details>
<details><summary>FFMPEG</summary>
Output of `ffmpeg -version`:
```
PASTE HERE
```
</details>
## Additional comments
<!-- Add further context that you think might be relevant for this issue here. -->
</issue>
<code>
[start of manim/cli/render/commands.py]
1 """Manim's default subcommand, render.
2
3 Manim's render subcommand is accessed in the command-line interface via
4 ``manim``, but can be more explicitly accessed with ``manim render``. Here you
5 can specify options, and arguments for the render command.
6
7 """
8 import json
9 import sys
10 from pathlib import Path
11 from textwrap import dedent
12
13 import click
14 import cloup
15 import requests
16
17 from ... import __version__, config, console, logger
18 from ...constants import CONTEXT_SETTINGS, EPILOG
19 from ...utils.exceptions import RerunSceneException
20 from ...utils.module_ops import scene_classes_from_file
21 from .ease_of_access_options import ease_of_access_options
22 from .global_options import global_options
23 from .output_options import output_options
24 from .render_options import render_options
25
26
27 @cloup.command(
28 context_settings=CONTEXT_SETTINGS,
29 epilog=EPILOG,
30 )
31 @click.argument("file", type=Path, required=True)
32 @click.argument("scene_names", required=False, nargs=-1)
33 @global_options
34 @output_options
35 @render_options
36 @ease_of_access_options
37 @click.pass_context
38 def render(
39 ctx,
40 **args,
41 ):
42 """Render SCENE(S) from the input FILE.
43
44 FILE is the file path of the script.
45
46 SCENES is an optional list of scenes in the file.
47 """
48 for scene in args["scene_names"]:
49 if str(scene).startswith("-"):
50 logger.warning(
51 dedent(
52 """\
53 Manim Community has moved to Click for the CLI.
54
55 This means that options in the CLI are provided BEFORE the positional
56 arguments for your FILE and SCENE(s):
57 `manim render [OPTIONS] [FILE] [SCENES]...`
58
59 For example:
60 New way - `manim -p -ql file.py SceneName1 SceneName2 ...`
61 Old way - `manim file.py SceneName1 SceneName2 ... -p -ql`
62
63 To see the help page for the new available options, run:
64 `manim render -h`
65 """
66 )
67 )
68 sys.exit()
69
70 if args["use_opengl_renderer"]:
71 logger.warning(
72 "--use_opengl_renderer is deprecated, please use --renderer=opengl instead!"
73 )
74 renderer = "opengl"
75
76 if args["use_webgl_renderer"]:
77 logger.warning(
78 "--use_webgl_renderer is deprecated, please use --renderer=webgl instead!"
79 )
80 renderer = "webgl"
81
82 if args["use_webgl_renderer"] and args["use_opengl_renderer"]:
83 logger.warning("You may select only one renderer!")
84 sys.exit()
85
86 class ClickArgs:
87 def __init__(self, args):
88 for name in args:
89 setattr(self, name, args[name])
90
91 def _get_kwargs(self):
92 return list(self.__dict__.items())
93
94 def __eq__(self, other):
95 if not isinstance(other, ClickArgs):
96 return NotImplemented
97 return vars(self) == vars(other)
98
99 def __contains__(self, key):
100 return key in self.__dict__
101
102 def __repr__(self):
103 return str(self.__dict__)
104
105 click_args = ClickArgs(args)
106 if args["jupyter"]:
107 return click_args
108
109 config.digest_args(click_args)
110 file = args["file"]
111 if config.renderer == "opengl":
112 from manim.renderer.opengl_renderer import OpenGLRenderer
113
114 for SceneClass in scene_classes_from_file(file):
115 try:
116 renderer = OpenGLRenderer()
117 while True:
118 scene_classes = scene_classes_from_file(file)
119 SceneClass = scene_classes[0]
120 scene = SceneClass(renderer)
121 status = scene.render()
122 if status:
123 continue
124 else:
125 break
126 except Exception:
127 console.print_exception()
128 elif config.renderer == "webgl":
129 try:
130 from manim.grpc.impl import frame_server_impl
131
132 server = frame_server_impl.get(file)
133 server.start()
134 server.wait_for_termination()
135 except ModuleNotFoundError:
136 console.print(
137 "Dependencies for the WebGL render are missing. Run "
138 "pip install manim[webgl_renderer] to install them."
139 )
140 console.print_exception()
141 else:
142 for SceneClass in scene_classes_from_file(file):
143 try:
144 scene = SceneClass()
145 scene.render()
146 except Exception:
147 console.print_exception()
148
149 if config.notify_outdated_version:
150 manim_info_url = "https://pypi.org/pypi/manim/json"
151 warn_prompt = "Cannot check if latest release of manim is installed"
152 req_info = {}
153
154 try:
155 req_info = requests.get(manim_info_url)
156 req_info.raise_for_status()
157 except requests.exceptions.HTTPError:
158 logger.debug(f"HTTP Error: {warn_prompt}")
159 except requests.exceptions.ConnectionError:
160 logger.debug(f"Connection Error: {warn_prompt}")
161 except requests.exceptions.Timeout:
162 logger.debug(f"Timed Out: {warn_prompt}")
163 except Exception:
164 logger.debug(f"Something went wrong: {warn_prompt}")
165
166 try:
167 stable = req_info.json()["info"]["version"]
168
169 if stable != __version__:
170 console.print(
171 f"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available."
172 )
173 console.print(
174 "You should consider upgrading via [yellow]pip install -U manim[/yellow]"
175 )
176 except json.JSONDecodeError:
177 logger.debug(warn_prompt)
178 logger.debug(f"Error decoding JSON from {manim_info_url}")
179
180 return args
181
[end of manim/cli/render/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py
--- a/manim/cli/render/commands.py
+++ b/manim/cli/render/commands.py
@@ -154,18 +154,8 @@
try:
req_info = requests.get(manim_info_url)
req_info.raise_for_status()
- except requests.exceptions.HTTPError:
- logger.debug(f"HTTP Error: {warn_prompt}")
- except requests.exceptions.ConnectionError:
- logger.debug(f"Connection Error: {warn_prompt}")
- except requests.exceptions.Timeout:
- logger.debug(f"Timed Out: {warn_prompt}")
- except Exception:
- logger.debug(f"Something went wrong: {warn_prompt}")
- try:
stable = req_info.json()["info"]["version"]
-
if stable != __version__:
console.print(
f"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available."
@@ -173,8 +163,16 @@
console.print(
"You should consider upgrading via [yellow]pip install -U manim[/yellow]"
)
+ except requests.exceptions.HTTPError:
+ logger.debug(f"HTTP Error: {warn_prompt}")
+ except requests.exceptions.ConnectionError:
+ logger.debug(f"Connection Error: {warn_prompt}")
+ except requests.exceptions.Timeout:
+ logger.debug(f"Timed Out: {warn_prompt}")
except json.JSONDecodeError:
logger.debug(warn_prompt)
logger.debug(f"Error decoding JSON from {manim_info_url}")
+ except Exception:
+ logger.debug(f"Something went wrong: {warn_prompt}")
return args
| {"golden_diff": "diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py\n--- a/manim/cli/render/commands.py\n+++ b/manim/cli/render/commands.py\n@@ -154,18 +154,8 @@\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n- except requests.exceptions.HTTPError:\n- logger.debug(f\"HTTP Error: {warn_prompt}\")\n- except requests.exceptions.ConnectionError:\n- logger.debug(f\"Connection Error: {warn_prompt}\")\n- except requests.exceptions.Timeout:\n- logger.debug(f\"Timed Out: {warn_prompt}\")\n- except Exception:\n- logger.debug(f\"Something went wrong: {warn_prompt}\")\n \n- try:\n stable = req_info.json()[\"info\"][\"version\"]\n-\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\"\n@@ -173,8 +163,16 @@\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\"\n )\n+ except requests.exceptions.HTTPError:\n+ logger.debug(f\"HTTP Error: {warn_prompt}\")\n+ except requests.exceptions.ConnectionError:\n+ logger.debug(f\"Connection Error: {warn_prompt}\")\n+ except requests.exceptions.Timeout:\n+ logger.debug(f\"Timed Out: {warn_prompt}\")\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n+ except Exception:\n+ logger.debug(f\"Something went wrong: {warn_prompt}\")\n \n return args\n", "issue": "Version checker throws an error when manim is run without internet\n## Description of bug / unexpected behavior\r\nTitle.\r\n\r\n\r\n## Expected behavior\r\nVersion checker should just log that the version can't be checked, and not produce a traceback.\r\n\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nPaste your code here.\r\n```\r\n\r\n</details>\r\n\r\n\r\n## Additional media files\r\n<!-- Paste in the files manim produced on rendering the code above. -->\r\n\r\n<details><summary>Images/GIFs</summary>\r\n\r\n<!-- PASTE MEDIA HERE -->\r\n\r\n</details>\r\n\r\n\r\n## Logs\r\n<details><summary>Terminal output</summary>\r\n<!-- Add \"-v DEBUG\" when calling manim to generate more detailed logs -->\r\n\r\n```\r\nC:\\Users\\Daryl\\manim-dev\\manim>manim -p test.py\r\nManim Community v0.4.0\r\n\r\n1: Become\r\n2: BecomePoints\r\n3: BecomePointsScene\r\n4: BecomeStyle\r\n5: BecomeStyle2\r\n6: Image\r\n\r\nChoose number corresponding to desired scene/arguments.\r\n(Use comma separated list for multiple entries)\r\nChoice(s): 6\r\n[04/19/21 10:55:41] INFO scene_file_writer.py:585\r\n File ready at C:\\Users\\Daryl\\manim-dev\\manim\\media\\images\\test\\Ima\r\n ge_ManimCE_v0.4.0.png\r\n\r\n INFO Rendered Image scene.py:199\r\n Played 0 animations\r\n INFO Previewed File at: file_ops.py:98\r\n C:\\Users\\Daryl\\manim-dev\\manim\\media\\images\\test\\Image_ManimCE_v0.4.0.png\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\Scripts\\manim\", line 5, in <module>\r\n main()\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"C:\\Users\\Daryl\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\manim-WZ_QM4hs-py3.9\\lib\\site-packages\\click\\decorators.py\", line 21, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"C:\\Users\\Daryl\\manim-dev\\manim\\manim\\cli\\render\\commands.py\", line 166, in render\r\n stable = req_info.json()[\"info\"][\"version\"]\r\nAttributeError: 'dict' object has no attribute 'json'\r\n```\r\n\r\n<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->\r\n\r\n</details>\r\n\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):\r\n- RAM:\r\n- Python version (`python/py/python3 --version`):\r\n- Installed modules (provide output from `pip list`):\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n<details><summary>LaTeX details</summary>\r\n\r\n+ LaTeX distribution (e.g. TeX Live 2020):\r\n+ Installed LaTeX packages:\r\n<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->\r\n</details>\r\n\r\n<details><summary>FFMPEG</summary>\r\n\r\nOutput of `ffmpeg -version`:\r\n\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant for this issue here. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's default subcommand, render.\n\nManim's render subcommand is accessed in the command-line interface via\n``manim``, but can be more explicitly accessed with ``manim render``. Here you\ncan specify options, and arguments for the render command.\n\n\"\"\"\nimport json\nimport sys\nfrom pathlib import Path\nfrom textwrap import dedent\n\nimport click\nimport cloup\nimport requests\n\nfrom ... import __version__, config, console, logger\nfrom ...constants import CONTEXT_SETTINGS, EPILOG\nfrom ...utils.exceptions import RerunSceneException\nfrom ...utils.module_ops import scene_classes_from_file\nfrom .ease_of_access_options import ease_of_access_options\nfrom .global_options import global_options\nfrom .output_options import output_options\nfrom .render_options import render_options\n\n\[email protected](\n context_settings=CONTEXT_SETTINGS,\n epilog=EPILOG,\n)\[email protected](\"file\", type=Path, required=True)\[email protected](\"scene_names\", required=False, nargs=-1)\n@global_options\n@output_options\n@render_options\n@ease_of_access_options\[email protected]_context\ndef render(\n ctx,\n **args,\n):\n \"\"\"Render SCENE(S) from the input FILE.\n\n FILE is the file path of the script.\n\n SCENES is an optional list of scenes in the file.\n \"\"\"\n for scene in args[\"scene_names\"]:\n if str(scene).startswith(\"-\"):\n logger.warning(\n dedent(\n \"\"\"\\\n Manim Community has moved to Click for the CLI.\n\n This means that options in the CLI are provided BEFORE the positional\n arguments for your FILE and SCENE(s):\n `manim render [OPTIONS] [FILE] [SCENES]...`\n\n For example:\n New way - `manim -p -ql file.py SceneName1 SceneName2 ...`\n Old way - `manim file.py SceneName1 SceneName2 ... -p -ql`\n\n To see the help page for the new available options, run:\n `manim render -h`\n \"\"\"\n )\n )\n sys.exit()\n\n if args[\"use_opengl_renderer\"]:\n logger.warning(\n \"--use_opengl_renderer is deprecated, please use --renderer=opengl instead!\"\n )\n renderer = \"opengl\"\n\n if args[\"use_webgl_renderer\"]:\n logger.warning(\n \"--use_webgl_renderer is deprecated, please use --renderer=webgl instead!\"\n )\n renderer = \"webgl\"\n\n if args[\"use_webgl_renderer\"] and args[\"use_opengl_renderer\"]:\n logger.warning(\"You may select only one renderer!\")\n sys.exit()\n\n class ClickArgs:\n def __init__(self, args):\n for name in args:\n setattr(self, name, args[name])\n\n def _get_kwargs(self):\n return list(self.__dict__.items())\n\n def __eq__(self, other):\n if not isinstance(other, ClickArgs):\n return NotImplemented\n return vars(self) == vars(other)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return str(self.__dict__)\n\n click_args = ClickArgs(args)\n if args[\"jupyter\"]:\n return click_args\n\n config.digest_args(click_args)\n file = args[\"file\"]\n if config.renderer == \"opengl\":\n from manim.renderer.opengl_renderer import OpenGLRenderer\n\n for SceneClass in scene_classes_from_file(file):\n try:\n renderer = OpenGLRenderer()\n while True:\n scene_classes = scene_classes_from_file(file)\n SceneClass = scene_classes[0]\n scene = SceneClass(renderer)\n status = scene.render()\n if status:\n continue\n else:\n break\n except Exception:\n console.print_exception()\n elif config.renderer == \"webgl\":\n try:\n from manim.grpc.impl import frame_server_impl\n\n server = frame_server_impl.get(file)\n server.start()\n server.wait_for_termination()\n except ModuleNotFoundError:\n console.print(\n \"Dependencies for the WebGL render are missing. Run \"\n \"pip install manim[webgl_renderer] to install them.\"\n )\n console.print_exception()\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n scene = SceneClass()\n scene.render()\n except Exception:\n console.print_exception()\n\n if config.notify_outdated_version:\n manim_info_url = \"https://pypi.org/pypi/manim/json\"\n warn_prompt = \"Cannot check if latest release of manim is installed\"\n req_info = {}\n\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.debug(f\"HTTP Error: {warn_prompt}\")\n except requests.exceptions.ConnectionError:\n logger.debug(f\"Connection Error: {warn_prompt}\")\n except requests.exceptions.Timeout:\n logger.debug(f\"Timed Out: {warn_prompt}\")\n except Exception:\n logger.debug(f\"Something went wrong: {warn_prompt}\")\n\n try:\n stable = req_info.json()[\"info\"][\"version\"]\n\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\"\n )\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\"\n )\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n\n return args\n", "path": "manim/cli/render/commands.py"}]} | 3,292 | 378 |
gh_patches_debug_12594 | rasdani/github-patches | git_diff | optuna__optuna-1600 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Kubernetes example of "simple" to avoid potential errors
https://github.com/optuna/optuna/blob/a5a55a6354b3c60dd7e3a08adcbc3818e90599f0/examples/kubernetes/simple/sklearn_distributed.py#L45-L54
Like https://github.com/optuna/optuna/pull/1536, we should create a study before running the script.
</issue>
<code>
[start of examples/kubernetes/simple/sklearn_distributed.py]
1 """
2 Optuna distributed optimization example that optimizes
3 an sklearn classifier for the Iris dataset on Kubernetes.
4
5 This example's code is mostly the same as the sklearn_simple.py example, except for two things:
6
7 1 - It gives a name to the study and sets load_if_exists to True
8 in order to avoid errors when the code is run from multiple workers.
9
10 2 - It sets the storage address to the postgres pod deployed with the workers.
11
12 """
13 import os
14
15 import sklearn.datasets
16 import sklearn.ensemble
17 import sklearn.model_selection
18 import sklearn.svm
19
20 import optuna
21
22
23 # FYI: Objective functions can take additional arguments
24 # (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).
25 def objective(trial):
26 iris = sklearn.datasets.load_iris()
27 x, y = iris.data, iris.target
28
29 classifier_name = trial.suggest_categorical("classifier", ["SVC", "RandomForest"])
30 if classifier_name == "SVC":
31 svc_c = trial.suggest_float("svc_c", 1e-10, 1e10, log=True)
32 classifier_obj = sklearn.svm.SVC(C=svc_c, gamma="auto")
33 else:
34 rf_max_depth = trial.suggest_int("rf_max_depth", 2, 32, log=True)
35 classifier_obj = sklearn.ensemble.RandomForestClassifier(
36 max_depth=rf_max_depth, n_estimators=10
37 )
38
39 score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)
40 accuracy = score.mean()
41 return accuracy
42
43
44 if __name__ == "__main__":
45 study = optuna.create_study(
46 direction="maximize",
47 study_name="kubernetes",
48 storage="postgresql://{}:{}@postgres:5432/{}".format(
49 os.environ["POSTGRES_USER"],
50 os.environ["POSTGRES_PASSWORD"],
51 os.environ["POSTGRES_DB"],
52 ),
53 load_if_exists=True,
54 )
55 study.optimize(objective, n_trials=20)
56 print(study.best_trial)
57
[end of examples/kubernetes/simple/sklearn_distributed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/kubernetes/simple/sklearn_distributed.py b/examples/kubernetes/simple/sklearn_distributed.py
--- a/examples/kubernetes/simple/sklearn_distributed.py
+++ b/examples/kubernetes/simple/sklearn_distributed.py
@@ -42,15 +42,13 @@
if __name__ == "__main__":
- study = optuna.create_study(
- direction="maximize",
+ study = optuna.load_study(
study_name="kubernetes",
storage="postgresql://{}:{}@postgres:5432/{}".format(
os.environ["POSTGRES_USER"],
os.environ["POSTGRES_PASSWORD"],
os.environ["POSTGRES_DB"],
),
- load_if_exists=True,
)
study.optimize(objective, n_trials=20)
print(study.best_trial)
| {"golden_diff": "diff --git a/examples/kubernetes/simple/sklearn_distributed.py b/examples/kubernetes/simple/sklearn_distributed.py\n--- a/examples/kubernetes/simple/sklearn_distributed.py\n+++ b/examples/kubernetes/simple/sklearn_distributed.py\n@@ -42,15 +42,13 @@\n \n \n if __name__ == \"__main__\":\n- study = optuna.create_study(\n- direction=\"maximize\",\n+ study = optuna.load_study(\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n- load_if_exists=True,\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "issue": "Update Kubernetes example of \"simple\" to avoid potential errors\nhttps://github.com/optuna/optuna/blob/a5a55a6354b3c60dd7e3a08adcbc3818e90599f0/examples/kubernetes/simple/sklearn_distributed.py#L45-L54\r\n\r\nLike https://github.com/optuna/optuna/pull/1536, we should create a study before running the script.\n", "before_files": [{"content": "\"\"\"\nOptuna distributed optimization example that optimizes\nan sklearn classifier for the Iris dataset on Kubernetes.\n\nThis example's code is mostly the same as the sklearn_simple.py example, except for two things:\n\n1 - It gives a name to the study and sets load_if_exists to True\nin order to avoid errors when the code is run from multiple workers.\n\n2 - It sets the storage address to the postgres pod deployed with the workers.\n\n\"\"\"\nimport os\n\nimport sklearn.datasets\nimport sklearn.ensemble\nimport sklearn.model_selection\nimport sklearn.svm\n\nimport optuna\n\n\n# FYI: Objective functions can take additional arguments\n# (https://optuna.readthedocs.io/en/stable/faq.html#objective-func-additional-args).\ndef objective(trial):\n iris = sklearn.datasets.load_iris()\n x, y = iris.data, iris.target\n\n classifier_name = trial.suggest_categorical(\"classifier\", [\"SVC\", \"RandomForest\"])\n if classifier_name == \"SVC\":\n svc_c = trial.suggest_float(\"svc_c\", 1e-10, 1e10, log=True)\n classifier_obj = sklearn.svm.SVC(C=svc_c, gamma=\"auto\")\n else:\n rf_max_depth = trial.suggest_int(\"rf_max_depth\", 2, 32, log=True)\n classifier_obj = sklearn.ensemble.RandomForestClassifier(\n max_depth=rf_max_depth, n_estimators=10\n )\n\n score = sklearn.model_selection.cross_val_score(classifier_obj, x, y, n_jobs=-1, cv=3)\n accuracy = score.mean()\n return accuracy\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(\n direction=\"maximize\",\n study_name=\"kubernetes\",\n storage=\"postgresql://{}:{}@postgres:5432/{}\".format(\n os.environ[\"POSTGRES_USER\"],\n os.environ[\"POSTGRES_PASSWORD\"],\n os.environ[\"POSTGRES_DB\"],\n ),\n load_if_exists=True,\n )\n study.optimize(objective, n_trials=20)\n print(study.best_trial)\n", "path": "examples/kubernetes/simple/sklearn_distributed.py"}]} | 1,202 | 175 |
gh_patches_debug_24487 | rasdani/github-patches | git_diff | chainer__chainer-3032 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
inconsistency to use example module
I found some inconsistent behavior to import classes.
To use SOME_CLASS in chainer/iterators, the following code works.
```importIterator.py
import chainer
some_class = chainer.iterators.SOME_CLASS() # OK
```
However, to use SOME_CLASS in chainer/training/extensions, the following code does not work.
```importExtension.py
import chainer
some_class = chainer.training.extensions.SOME_CLASS() #NG
```
I feel some inconsistency here, because `chainer/__init__.py` contains both of `import iterator` and `import iterators` but `chaner/training/__init__.py` contains only `import extension` and not `import extensions`.
Is there any reason about this inconsistency?
* Conditions
- Chainer version
chainer v2.0.1
- OS/Platform
ubuntu 16.04
* Code to reproduce
```importModule.py
import chainer
iteratorExamples = chainer.iterators # OK
import chainer
extensionExamples = chainer.training.extensions #NG
```
* Error messages
```error.py
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: module 'chainer.training' has no attribute 'extensions'
```
</issue>
<code>
[start of chainer/training/__init__.py]
1 from chainer.training import extension # NOQA
2 from chainer.training import trainer # NOQA
3 from chainer.training import trigger # NOQA
4 from chainer.training import updater # NOQA
5
6
7 # import class and function
8 from chainer.training.extension import Extension # NOQA
9 from chainer.training.extension import make_extension # NOQA
10 from chainer.training.extension import PRIORITY_EDITOR # NOQA
11 from chainer.training.extension import PRIORITY_READER # NOQA
12 from chainer.training.extension import PRIORITY_WRITER # NOQA
13 from chainer.training.trainer import Trainer # NOQA
14 from chainer.training.trigger import get_trigger # NOQA
15 from chainer.training.trigger import IntervalTrigger # NOQA
16 from chainer.training.updater import ParallelUpdater # NOQA
17 from chainer.training.updater import StandardUpdater # NOQA
18 from chainer.training.updater import Updater # NOQA
19
[end of chainer/training/__init__.py]
[start of chainer/training/extensions/parameter_statistics.py]
1 import numpy
2 import six
3
4 from chainer import reporter
5 from chainer import training
6 from chainer.training import extension
7
8
9 class ParameterStatistics(extension.Extension):
10 """Trainer extension to report parameter statistics.
11
12 Statistics are collected and reported for a given :class:`~chainer.Link`
13 or an iterable of :class:`~chainer.Link`s. If a link contains child links,
14 the statistics are reported separately for each child.
15
16 Any function that takes a one-dimensional :class:`numpy.ndarray` or a
17 :class:`cupy.ndarray` and outputs a single or multiple real numbers can be
18 registered to handle the collection of statistics, e.g.
19 :meth:`numpy.ndarray.mean`.
20
21 The keys of reported statistics follow the convention of link name
22 followed by parameter name, attribute name and function name, e.g.
23 ``VGG16Layers/conv1_1/W/data/mean``. They are prepended with an optional
24 prefix and appended with integer indices if the statistics generating
25 function return multiple values.
26
27 Args:
28 links (~chainer.Link or iterable of ~chainer.Link): Link(s) containing
29 the parameters to observe. The link is expected to have a ``name``
30 attribute which is used as a part of the report key.
31 statistics (dict): Dictionary with function name to function mappings.
32 The name is a string and is used as a part of the report key. The
33 function is responsible for generating the statistics.
34 report_params (bool): If ``True``, report statistics for parameter
35 values such as weights and biases.
36 report_grads (bool): If ``True``, report statistics for parameter
37 gradients.
38 prefix (str): Optional prefix to prepend to the report keys.
39 trigger: Trigger that decides when to aggregate the results and report
40 the values.
41 """
42 default_name = 'parameter_statistics'
43 priority = extension.PRIORITY_WRITER
44
45 # prefix ends with a '/' and param_name is preceded by a '/'
46 report_key_template = ('{prefix}{link_name}{param_name}/{attr_name}/'
47 '{function_name}')
48
49 default_statistics = {
50 'mean': numpy.mean,
51 'std': numpy.std,
52 'min': numpy.min,
53 'max': numpy.max,
54 'zeros': lambda x: numpy.count_nonzero(x == 0),
55 'percentile': lambda x: numpy.percentile(x, (0.13, 2.28, 15.87,
56 50, 84.13, 97.72,
57 99.87))
58 }
59
60 def __init__(self, links, statistics=default_statistics,
61 report_params=True, report_grads=True, prefix=None,
62 trigger=(1, 'epoch')):
63
64 if not isinstance(links, (list, tuple)):
65 links = links,
66 self._links = links
67
68 self._statistics = statistics
69
70 attrs = []
71 if report_params:
72 attrs.append('data')
73 if report_grads:
74 attrs.append('grad')
75 self._attrs = attrs
76
77 self._prefix = prefix
78 self._trigger = training.trigger.get_trigger(trigger)
79 self._summary = reporter.DictSummary()
80
81 def __call__(self, trainer):
82 """Execute the statistics extension.
83
84 Collect statistics for the current state of parameters.
85
86 Note that this method will merely update its statistic summary, unless
87 the internal trigger is fired. If the trigger is fired, the summary
88 will also be reported and then reset for the next accumulation.
89
90 Args:
91 trainer (~chainer.training.Trainer): Associated trainer that
92 invoked this extension.
93 """
94 statistics = {}
95
96 for link in self._links:
97 link_name = getattr(link, 'name', 'None')
98 for param_name, param in link.namedparams():
99 for attr_name in self._attrs:
100 for function_name, function in \
101 six.iteritems(self._statistics):
102 # Get parameters as a flattend one-dimensional array
103 # since the statistics function should make no
104 # assumption about the axes
105 params = getattr(param, attr_name).ravel()
106 value = function(params)
107 key = self.report_key_template.format(
108 prefix=self._prefix + '/' if self._prefix else '',
109 link_name=link_name,
110 param_name=param_name,
111 attr_name=attr_name,
112 function_name=function_name
113 )
114 if hasattr(value, '__iter__'):
115 # Append integer indices to the keys if the
116 # statistic function return multiple values
117 statistics.update({'{}/{}'.format(key, i): v for
118 i, v in enumerate(value)})
119 else:
120 statistics[key] = value
121
122 self._summary.add(statistics)
123
124 if self._trigger(trainer):
125 reporter.report(self._summary.compute_mean())
126 self._summary = reporter.DictSummary() # Clear summary
127
128 def register_statistics(self, name, function):
129 """Register a function to compute a certain statistic.
130
131 The registered function will be called each time the extension runs and
132 the results will be included in the report.
133
134 Args:
135 name (str): Name of the statistic.
136 function: Function to generate the statistic. Any function that
137 takes a one-dimensional :class:`numpy.ndarray` or a
138 :class:`cupy.ndarray` and outputs a single or multiple real
139 numbers is allowed.
140 """
141 self._statistics[name] = function
142
[end of chainer/training/extensions/parameter_statistics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/training/__init__.py b/chainer/training/__init__.py
--- a/chainer/training/__init__.py
+++ b/chainer/training/__init__.py
@@ -1,7 +1,11 @@
from chainer.training import extension # NOQA
+from chainer.training import extensions # NOQA
from chainer.training import trainer # NOQA
from chainer.training import trigger # NOQA
+from chainer.training import triggers # NOQA
from chainer.training import updater # NOQA
+from chainer.training import updaters # NOQA
+from chainer.training import util # NOQA
# import class and function
diff --git a/chainer/training/extensions/parameter_statistics.py b/chainer/training/extensions/parameter_statistics.py
--- a/chainer/training/extensions/parameter_statistics.py
+++ b/chainer/training/extensions/parameter_statistics.py
@@ -2,8 +2,8 @@
import six
from chainer import reporter
-from chainer import training
from chainer.training import extension
+from chainer.training import trigger as trigger_module
class ParameterStatistics(extension.Extension):
@@ -75,7 +75,7 @@
self._attrs = attrs
self._prefix = prefix
- self._trigger = training.trigger.get_trigger(trigger)
+ self._trigger = trigger_module.get_trigger(trigger)
self._summary = reporter.DictSummary()
def __call__(self, trainer):
| {"golden_diff": "diff --git a/chainer/training/__init__.py b/chainer/training/__init__.py\n--- a/chainer/training/__init__.py\n+++ b/chainer/training/__init__.py\n@@ -1,7 +1,11 @@\n from chainer.training import extension # NOQA\n+from chainer.training import extensions # NOQA\n from chainer.training import trainer # NOQA\n from chainer.training import trigger # NOQA\n+from chainer.training import triggers # NOQA\n from chainer.training import updater # NOQA\n+from chainer.training import updaters # NOQA\n+from chainer.training import util # NOQA\n \n \n # import class and function\ndiff --git a/chainer/training/extensions/parameter_statistics.py b/chainer/training/extensions/parameter_statistics.py\n--- a/chainer/training/extensions/parameter_statistics.py\n+++ b/chainer/training/extensions/parameter_statistics.py\n@@ -2,8 +2,8 @@\n import six\n \n from chainer import reporter\n-from chainer import training\n from chainer.training import extension\n+from chainer.training import trigger as trigger_module\n \n \n class ParameterStatistics(extension.Extension):\n@@ -75,7 +75,7 @@\n self._attrs = attrs\n \n self._prefix = prefix\n- self._trigger = training.trigger.get_trigger(trigger)\n+ self._trigger = trigger_module.get_trigger(trigger)\n self._summary = reporter.DictSummary()\n \n def __call__(self, trainer):\n", "issue": "inconsistency to use example module\nI found some inconsistent behavior to import classes.\r\n\r\nTo use SOME_CLASS in chainer/iterators, the following code works.\r\n```importIterator.py\r\nimport chainer\r\nsome_class = chainer.iterators.SOME_CLASS() # OK\r\n```\r\nHowever, to use SOME_CLASS in chainer/training/extensions, the following code does not work.\r\n```importExtension.py\r\nimport chainer\r\nsome_class = chainer.training.extensions.SOME_CLASS() #NG\r\n```\r\n\r\nI feel some inconsistency here, because `chainer/__init__.py` contains both of `import iterator` and `import iterators` but `chaner/training/__init__.py` contains only `import extension` and not `import extensions`.\r\n\r\nIs there any reason about this inconsistency?\r\n\r\n* Conditions\r\n - Chainer version\r\n chainer v2.0.1\r\n - OS/Platform\r\n ubuntu 16.04\r\n* Code to reproduce\r\n```importModule.py\r\nimport chainer\r\niteratorExamples = chainer.iterators # OK\r\n\r\nimport chainer\r\nextensionExamples = chainer.training.extensions #NG\r\n```\r\n\r\n* Error messages\r\n```error.py\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nAttributeError: module 'chainer.training' has no attribute 'extensions'\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "from chainer.training import extension # NOQA\nfrom chainer.training import trainer # NOQA\nfrom chainer.training import trigger # NOQA\nfrom chainer.training import updater # NOQA\n\n\n# import class and function\nfrom chainer.training.extension import Extension # NOQA\nfrom chainer.training.extension import make_extension # NOQA\nfrom chainer.training.extension import PRIORITY_EDITOR # NOQA\nfrom chainer.training.extension import PRIORITY_READER # NOQA\nfrom chainer.training.extension import PRIORITY_WRITER # NOQA\nfrom chainer.training.trainer import Trainer # NOQA\nfrom chainer.training.trigger import get_trigger # NOQA\nfrom chainer.training.trigger import IntervalTrigger # NOQA\nfrom chainer.training.updater import ParallelUpdater # NOQA\nfrom chainer.training.updater import StandardUpdater # NOQA\nfrom chainer.training.updater import Updater # NOQA\n", "path": "chainer/training/__init__.py"}, {"content": "import numpy\nimport six\n\nfrom chainer import reporter\nfrom chainer import training\nfrom chainer.training import extension\n\n\nclass ParameterStatistics(extension.Extension):\n \"\"\"Trainer extension to report parameter statistics.\n\n Statistics are collected and reported for a given :class:`~chainer.Link`\n or an iterable of :class:`~chainer.Link`s. If a link contains child links,\n the statistics are reported separately for each child.\n\n Any function that takes a one-dimensional :class:`numpy.ndarray` or a\n :class:`cupy.ndarray` and outputs a single or multiple real numbers can be\n registered to handle the collection of statistics, e.g.\n :meth:`numpy.ndarray.mean`.\n\n The keys of reported statistics follow the convention of link name\n followed by parameter name, attribute name and function name, e.g.\n ``VGG16Layers/conv1_1/W/data/mean``. They are prepended with an optional\n prefix and appended with integer indices if the statistics generating\n function return multiple values.\n\n Args:\n links (~chainer.Link or iterable of ~chainer.Link): Link(s) containing\n the parameters to observe. The link is expected to have a ``name``\n attribute which is used as a part of the report key.\n statistics (dict): Dictionary with function name to function mappings.\n The name is a string and is used as a part of the report key. The\n function is responsible for generating the statistics.\n report_params (bool): If ``True``, report statistics for parameter\n values such as weights and biases.\n report_grads (bool): If ``True``, report statistics for parameter\n gradients.\n prefix (str): Optional prefix to prepend to the report keys.\n trigger: Trigger that decides when to aggregate the results and report\n the values.\n \"\"\"\n default_name = 'parameter_statistics'\n priority = extension.PRIORITY_WRITER\n\n # prefix ends with a '/' and param_name is preceded by a '/'\n report_key_template = ('{prefix}{link_name}{param_name}/{attr_name}/'\n '{function_name}')\n\n default_statistics = {\n 'mean': numpy.mean,\n 'std': numpy.std,\n 'min': numpy.min,\n 'max': numpy.max,\n 'zeros': lambda x: numpy.count_nonzero(x == 0),\n 'percentile': lambda x: numpy.percentile(x, (0.13, 2.28, 15.87,\n 50, 84.13, 97.72,\n 99.87))\n }\n\n def __init__(self, links, statistics=default_statistics,\n report_params=True, report_grads=True, prefix=None,\n trigger=(1, 'epoch')):\n\n if not isinstance(links, (list, tuple)):\n links = links,\n self._links = links\n\n self._statistics = statistics\n\n attrs = []\n if report_params:\n attrs.append('data')\n if report_grads:\n attrs.append('grad')\n self._attrs = attrs\n\n self._prefix = prefix\n self._trigger = training.trigger.get_trigger(trigger)\n self._summary = reporter.DictSummary()\n\n def __call__(self, trainer):\n \"\"\"Execute the statistics extension.\n\n Collect statistics for the current state of parameters.\n\n Note that this method will merely update its statistic summary, unless\n the internal trigger is fired. If the trigger is fired, the summary\n will also be reported and then reset for the next accumulation.\n\n Args:\n trainer (~chainer.training.Trainer): Associated trainer that\n invoked this extension.\n \"\"\"\n statistics = {}\n\n for link in self._links:\n link_name = getattr(link, 'name', 'None')\n for param_name, param in link.namedparams():\n for attr_name in self._attrs:\n for function_name, function in \\\n six.iteritems(self._statistics):\n # Get parameters as a flattend one-dimensional array\n # since the statistics function should make no\n # assumption about the axes\n params = getattr(param, attr_name).ravel()\n value = function(params)\n key = self.report_key_template.format(\n prefix=self._prefix + '/' if self._prefix else '',\n link_name=link_name,\n param_name=param_name,\n attr_name=attr_name,\n function_name=function_name\n )\n if hasattr(value, '__iter__'):\n # Append integer indices to the keys if the\n # statistic function return multiple values\n statistics.update({'{}/{}'.format(key, i): v for\n i, v in enumerate(value)})\n else:\n statistics[key] = value\n\n self._summary.add(statistics)\n\n if self._trigger(trainer):\n reporter.report(self._summary.compute_mean())\n self._summary = reporter.DictSummary() # Clear summary\n\n def register_statistics(self, name, function):\n \"\"\"Register a function to compute a certain statistic.\n\n The registered function will be called each time the extension runs and\n the results will be included in the report.\n\n Args:\n name (str): Name of the statistic.\n function: Function to generate the statistic. Any function that\n takes a one-dimensional :class:`numpy.ndarray` or a\n :class:`cupy.ndarray` and outputs a single or multiple real\n numbers is allowed.\n \"\"\"\n self._statistics[name] = function\n", "path": "chainer/training/extensions/parameter_statistics.py"}]} | 2,565 | 323 |
gh_patches_debug_27870 | rasdani/github-patches | git_diff | Gallopsled__pwntools-2427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Undocumented behaviour of sni kwarg of remote()
In `remote()` 'server_hostname' ssl arg can be set like this:
```python
remote('172.22.0.2', 9090, ssl=True, sni='example.com')
```
This behaviour of `sni` kwarg is undocumented. Currently only behaviour of passing a bool in documented.
</issue>
<code>
[start of pwnlib/tubes/remote.py]
1 from __future__ import absolute_import
2 from __future__ import division
3
4 import socket
5 import socks
6
7 from pwnlib.log import getLogger
8 from pwnlib.timeout import Timeout
9 from pwnlib.tubes.sock import sock
10
11 log = getLogger(__name__)
12
13 class remote(sock):
14 r"""Creates a TCP or UDP-connection to a remote host. It supports
15 both IPv4 and IPv6.
16
17 The returned object supports all the methods from
18 :class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.
19
20 Arguments:
21 host(str): The host to connect to.
22 port(int): The port to connect to.
23 fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
24 typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
25 timeout: A positive number, None or the string "default".
26 ssl(bool): Wrap the socket with SSL
27 ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.
28 sni: Set 'server_hostname' in ssl_args based on the host parameter.
29 sock(socket.socket): Socket to inherit, rather than connecting
30 ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.
31
32 Examples:
33
34 >>> r = remote('google.com', 443, ssl=True)
35 >>> r.send(b'GET /\r\n\r\n')
36 >>> r.recvn(4)
37 b'HTTP'
38
39 If a connection cannot be made, an exception is raised.
40
41 >>> r = remote('127.0.0.1', 1)
42 Traceback (most recent call last):
43 ...
44 PwnlibException: Could not connect to 127.0.0.1 on port 1
45
46 You can also use :meth:`.remote.fromsocket` to wrap an existing socket.
47
48 >>> import socket
49 >>> s = socket.socket()
50 >>> s.connect(('google.com', 80))
51 >>> s.send(b'GET /' + b'\r\n'*2)
52 9
53 >>> r = remote.fromsocket(s)
54 >>> r.recvn(4)
55 b'HTTP'
56 """
57
58 def __init__(self, host, port,
59 fam = "any", typ = "tcp",
60 ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,
61 *args, **kwargs):
62 super(remote, self).__init__(*args, **kwargs)
63
64 # convert port to string for sagemath support
65 self.rport = str(port)
66 self.rhost = host
67
68 if sock:
69 self.family = sock.family
70 self.type = sock.type
71 self.proto = sock.proto
72 self.sock = sock
73
74 else:
75 typ = self._get_type(typ)
76 fam = self._get_family(fam)
77 try:
78 self.sock = self._connect(fam, typ)
79 except socket.gaierror as e:
80 if e.errno != socket.EAI_NONAME:
81 raise
82 self.error('Could not resolve hostname: %r', host)
83 if self.sock:
84 self.settimeout(self.timeout)
85 self.lhost, self.lport = self.sock.getsockname()[:2]
86
87 if ssl:
88 # Deferred import to save startup time
89 import ssl as _ssl
90
91 ssl_args = ssl_args or {}
92 ssl_context = ssl_context or _ssl.SSLContext(_ssl.PROTOCOL_TLSv1_2)
93 if isinstance(sni, str):
94 ssl_args["server_hostname"] = sni
95 elif sni:
96 ssl_args["server_hostname"] = host
97 self.sock = ssl_context.wrap_socket(self.sock,**ssl_args)
98
99 def _connect(self, fam, typ):
100 sock = None
101 timeout = self.timeout
102
103 with self.waitfor('Opening connection to %s on port %s' % (self.rhost, self.rport)) as h:
104 for res in socket.getaddrinfo(self.rhost, self.rport, fam, typ, 0, socket.AI_PASSIVE):
105 self.family, self.type, self.proto, _canonname, sockaddr = res
106
107 if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
108 continue
109
110 h.status("Trying %s", sockaddr[0])
111
112 sock = socket.socket(self.family, self.type, self.proto)
113
114 if timeout is not None and timeout <= 0:
115 sock.setblocking(0)
116 else:
117 sock.setblocking(1)
118 sock.settimeout(timeout)
119
120 try:
121 sock.connect(sockaddr)
122 return sock
123 except socks.ProxyError:
124 raise
125 except socket.error:
126 pass
127 self.error("Could not connect to %s on port %s", self.rhost, self.rport)
128
129 @classmethod
130 def fromsocket(cls, socket):
131 """
132 Helper method to wrap a standard python socket.socket with the
133 tube APIs.
134
135 Arguments:
136 socket: Instance of socket.socket
137
138 Returns:
139 Instance of pwnlib.tubes.remote.remote.
140 """
141 s = socket
142 host, port = s.getpeername()
143 return remote(host, port, fam=s.family, typ=s.type, sock=s)
144
145 class tcp(remote):
146 __doc__ = remote.__doc__
147 def __init__(self, host, port, *a, **kw):
148 return super(tcp, self).__init__(host, port, typ="tcp", *a, **kw)
149
150 class udp(remote):
151 __doc__ = remote.__doc__
152 def __init__(self, host, port, *a, **kw):
153 return super(udp, self).__init__(host, port, typ="udp", *a, **kw)
154
155 class connect(remote):
156 __doc__ = remote.__doc__
157
[end of pwnlib/tubes/remote.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/tubes/remote.py b/pwnlib/tubes/remote.py
--- a/pwnlib/tubes/remote.py
+++ b/pwnlib/tubes/remote.py
@@ -23,11 +23,11 @@
fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
timeout: A positive number, None or the string "default".
+ sock(:class:`socket.socket`): Socket to inherit, rather than connecting
ssl(bool): Wrap the socket with SSL
ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.
- sni: Set 'server_hostname' in ssl_args based on the host parameter.
- sock(socket.socket): Socket to inherit, rather than connecting
- ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.
+ ssl_args(dict): Pass :func:`ssl.wrap_socket` named arguments in a dictionary.
+ sni(str,bool): Set 'server_hostname' in ssl_args. Set to True to set it based on the host argument. Set to False to not provide any value. Default is True.
Examples:
@@ -57,7 +57,7 @@
def __init__(self, host, port,
fam = "any", typ = "tcp",
- ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,
+ sock=None, ssl=False, ssl_context=None, ssl_args=None, sni=True,
*args, **kwargs):
super(remote, self).__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/pwnlib/tubes/remote.py b/pwnlib/tubes/remote.py\n--- a/pwnlib/tubes/remote.py\n+++ b/pwnlib/tubes/remote.py\n@@ -23,11 +23,11 @@\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n timeout: A positive number, None or the string \"default\".\n+ sock(:class:`socket.socket`): Socket to inherit, rather than connecting\n ssl(bool): Wrap the socket with SSL\n ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.\n- sni: Set 'server_hostname' in ssl_args based on the host parameter.\n- sock(socket.socket): Socket to inherit, rather than connecting\n- ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.\n+ ssl_args(dict): Pass :func:`ssl.wrap_socket` named arguments in a dictionary.\n+ sni(str,bool): Set 'server_hostname' in ssl_args. Set to True to set it based on the host argument. Set to False to not provide any value. Default is True.\n \n Examples:\n \n@@ -57,7 +57,7 @@\n \n def __init__(self, host, port,\n fam = \"any\", typ = \"tcp\",\n- ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,\n+ sock=None, ssl=False, ssl_context=None, ssl_args=None, sni=True,\n *args, **kwargs):\n super(remote, self).__init__(*args, **kwargs)\n", "issue": "Undocumented behaviour of sni kwarg of remote()\nIn `remote()` 'server_hostname' ssl arg can be set like this:\r\n```python\r\nremote('172.22.0.2', 9090, ssl=True, sni='example.com')\r\n```\r\nThis behaviour of `sni` kwarg is undocumented. Currently only behaviour of passing a bool in documented.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\n\nimport socket\nimport socks\n\nfrom pwnlib.log import getLogger\nfrom pwnlib.timeout import Timeout\nfrom pwnlib.tubes.sock import sock\n\nlog = getLogger(__name__)\n\nclass remote(sock):\n r\"\"\"Creates a TCP or UDP-connection to a remote host. It supports\n both IPv4 and IPv6.\n\n The returned object supports all the methods from\n :class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.\n\n Arguments:\n host(str): The host to connect to.\n port(int): The port to connect to.\n fam: The string \"any\", \"ipv4\" or \"ipv6\" or an integer to pass to :func:`socket.getaddrinfo`.\n typ: The string \"tcp\" or \"udp\" or an integer to pass to :func:`socket.getaddrinfo`.\n timeout: A positive number, None or the string \"default\".\n ssl(bool): Wrap the socket with SSL\n ssl_context(ssl.SSLContext): Specify SSLContext used to wrap the socket.\n sni: Set 'server_hostname' in ssl_args based on the host parameter.\n sock(socket.socket): Socket to inherit, rather than connecting\n ssl_args(dict): Pass ssl.wrap_socket named arguments in a dictionary.\n\n Examples:\n\n >>> r = remote('google.com', 443, ssl=True)\n >>> r.send(b'GET /\\r\\n\\r\\n')\n >>> r.recvn(4)\n b'HTTP'\n\n If a connection cannot be made, an exception is raised.\n\n >>> r = remote('127.0.0.1', 1)\n Traceback (most recent call last):\n ...\n PwnlibException: Could not connect to 127.0.0.1 on port 1\n\n You can also use :meth:`.remote.fromsocket` to wrap an existing socket.\n\n >>> import socket\n >>> s = socket.socket()\n >>> s.connect(('google.com', 80))\n >>> s.send(b'GET /' + b'\\r\\n'*2)\n 9\n >>> r = remote.fromsocket(s)\n >>> r.recvn(4)\n b'HTTP'\n \"\"\"\n\n def __init__(self, host, port,\n fam = \"any\", typ = \"tcp\",\n ssl=False, sock=None, ssl_context=None, ssl_args=None, sni=True,\n *args, **kwargs):\n super(remote, self).__init__(*args, **kwargs)\n\n # convert port to string for sagemath support\n self.rport = str(port)\n self.rhost = host\n\n if sock:\n self.family = sock.family\n self.type = sock.type\n self.proto = sock.proto\n self.sock = sock\n\n else:\n typ = self._get_type(typ)\n fam = self._get_family(fam)\n try:\n self.sock = self._connect(fam, typ)\n except socket.gaierror as e:\n if e.errno != socket.EAI_NONAME:\n raise\n self.error('Could not resolve hostname: %r', host)\n if self.sock:\n self.settimeout(self.timeout)\n self.lhost, self.lport = self.sock.getsockname()[:2]\n\n if ssl:\n # Deferred import to save startup time\n import ssl as _ssl\n\n ssl_args = ssl_args or {}\n ssl_context = ssl_context or _ssl.SSLContext(_ssl.PROTOCOL_TLSv1_2)\n if isinstance(sni, str):\n ssl_args[\"server_hostname\"] = sni\n elif sni:\n ssl_args[\"server_hostname\"] = host\n self.sock = ssl_context.wrap_socket(self.sock,**ssl_args)\n\n def _connect(self, fam, typ):\n sock = None\n timeout = self.timeout\n\n with self.waitfor('Opening connection to %s on port %s' % (self.rhost, self.rport)) as h:\n for res in socket.getaddrinfo(self.rhost, self.rport, fam, typ, 0, socket.AI_PASSIVE):\n self.family, self.type, self.proto, _canonname, sockaddr = res\n\n if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:\n continue\n\n h.status(\"Trying %s\", sockaddr[0])\n\n sock = socket.socket(self.family, self.type, self.proto)\n\n if timeout is not None and timeout <= 0:\n sock.setblocking(0)\n else:\n sock.setblocking(1)\n sock.settimeout(timeout)\n\n try:\n sock.connect(sockaddr)\n return sock\n except socks.ProxyError:\n raise\n except socket.error:\n pass\n self.error(\"Could not connect to %s on port %s\", self.rhost, self.rport)\n\n @classmethod\n def fromsocket(cls, socket):\n \"\"\"\n Helper method to wrap a standard python socket.socket with the\n tube APIs.\n\n Arguments:\n socket: Instance of socket.socket\n\n Returns:\n Instance of pwnlib.tubes.remote.remote.\n \"\"\"\n s = socket\n host, port = s.getpeername()\n return remote(host, port, fam=s.family, typ=s.type, sock=s)\n\nclass tcp(remote):\n __doc__ = remote.__doc__\n def __init__(self, host, port, *a, **kw):\n return super(tcp, self).__init__(host, port, typ=\"tcp\", *a, **kw)\n\nclass udp(remote):\n __doc__ = remote.__doc__\n def __init__(self, host, port, *a, **kw):\n return super(udp, self).__init__(host, port, typ=\"udp\", *a, **kw)\n\nclass connect(remote):\n __doc__ = remote.__doc__\n", "path": "pwnlib/tubes/remote.py"}]} | 2,272 | 385 |
gh_patches_debug_32274 | rasdani/github-patches | git_diff | getredash__redash-2069 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The API key for one query may be used to retrieve another query's results
### Issue Summary
A query's API key may be used to obtain another query's results via the REST api when including the API key in the query string.
### Steps to Reproduce
1. Create one query and execute it to obtain results (call it query A)
2. Create another query and execute it to obtain different results (call it query B)
3. Get the query's API key for query A (A_API_KEY) and the query number for query A (A_QUERY_NUMBER)
4. Get the result number from query B's most recent run (B_RESULT_NUMBER)
5. Execute the below code and you'll see that the API key for query A can get results from query B
```bash
REDASH_DOMAIN='yourdomain.com'
A_QUERY_NUMBER='query number for query A'
A_API_KEY_A='api key for query A'
B_RESULT_NUMBER='query result number for query b'
# this will download query B's results using query A's access key
wget \
-O query_b_results.csv \
"https://$REDASH_DOMAIN/api/queries/$A_QUERY_NUMBER/results/$B_RESULT_NUMBER.csv?api_key=$A_API_KEY"
```
This is a bug because one query's API key should NOT be able to access another query's results.
### Technical details:
* Redash Version: 1.0.3
* Browser/OS: (Command Line) / Linux Mint 18.2
* How did you install Redash: Command line
</issue>
<code>
[start of redash/handlers/query_results.py]
1 import logging
2 import json
3 import time
4
5 import pystache
6 from flask import make_response, request
7 from flask_login import current_user
8 from flask_restful import abort
9 from redash import models, settings, utils
10 from redash.tasks import QueryTask, record_event
11 from redash.permissions import require_permission, not_view_only, has_access, require_access, view_only
12 from redash.handlers.base import BaseResource, get_object_or_404
13 from redash.utils import collect_query_parameters, collect_parameters_from_request, gen_query_hash
14 from redash.tasks.queries import enqueue_query
15
16
17 def error_response(message):
18 return {'job': {'status': 4, 'error': message}}, 400
19
20
21 #
22 # Run a parameterized query synchronously and return the result
23 # DISCLAIMER: Temporary solution to support parameters in queries. Should be
24 # removed once we refactor the query results API endpoints and handling
25 # on the client side. Please don't reuse in other API handlers.
26 #
27 def run_query_sync(data_source, parameter_values, query_text, max_age=0):
28 query_parameters = set(collect_query_parameters(query_text))
29 missing_params = set(query_parameters) - set(parameter_values.keys())
30 if missing_params:
31 raise Exception('Missing parameter value for: {}'.format(", ".join(missing_params)))
32
33 if query_parameters:
34 query_text = pystache.render(query_text, parameter_values)
35
36 if max_age <= 0:
37 query_result = None
38 else:
39 query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
40
41 query_hash = gen_query_hash(query_text)
42
43 if query_result:
44 logging.info("Returning cached result for query %s" % query_hash)
45 return query_result
46
47 try:
48 started_at = time.time()
49 data, error = data_source.query_runner.run_query(query_text, current_user)
50
51 if error:
52 logging.info('got bak error')
53 logging.info(error)
54 return None
55
56 run_time = time.time() - started_at
57 query_result, updated_query_ids = models.QueryResult.store_result(data_source.org, data_source,
58 query_hash, query_text, data,
59 run_time, utils.utcnow())
60
61 models.db.session.commit()
62 return query_result
63 except Exception as e:
64 if max_age > 0:
65 abort(404, message="Unable to get result from the database, and no cached query result found.")
66 else:
67 abort(503, message="Unable to get result from the database.")
68 return None
69
70 def run_query(data_source, parameter_values, query_text, query_id, max_age=0):
71 query_parameters = set(collect_query_parameters(query_text))
72 missing_params = set(query_parameters) - set(parameter_values.keys())
73 if missing_params:
74 return error_response('Missing parameter value for: {}'.format(", ".join(missing_params)))
75
76 if data_source.paused:
77 if data_source.pause_reason:
78 message = '{} is paused ({}). Please try later.'.format(data_source.name, data_source.pause_reason)
79 else:
80 message = '{} is paused. Please try later.'.format(data_source.name)
81
82 return error_response(message)
83
84 if query_parameters:
85 query_text = pystache.render(query_text, parameter_values)
86
87 if max_age == 0:
88 query_result = None
89 else:
90 query_result = models.QueryResult.get_latest(data_source, query_text, max_age)
91
92 if query_result:
93 return {'query_result': query_result.to_dict()}
94 else:
95 job = enqueue_query(query_text, data_source, current_user.id, metadata={"Username": current_user.email, "Query ID": query_id})
96 return {'job': job.to_dict()}
97
98
99 class QueryResultListResource(BaseResource):
100 @require_permission('execute_query')
101 def post(self):
102 """
103 Execute a query (or retrieve recent results).
104
105 :qparam string query: The query text to execute
106 :qparam number query_id: The query object to update with the result (optional)
107 :qparam number max_age: If query results less than `max_age` seconds old are available, return them, otherwise execute the query; if omitted, always execute
108 :qparam number data_source_id: ID of data source to query
109 """
110 params = request.get_json(force=True)
111 parameter_values = collect_parameters_from_request(request.args)
112
113 query = params['query']
114 max_age = int(params.get('max_age', -1))
115 query_id = params.get('query_id', 'adhoc')
116
117 data_source = models.DataSource.get_by_id_and_org(params.get('data_source_id'), self.current_org)
118
119 if not has_access(data_source.groups, self.current_user, not_view_only):
120 return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403
121
122 self.record_event({
123 'action': 'execute_query',
124 'timestamp': int(time.time()),
125 'object_id': data_source.id,
126 'object_type': 'data_source',
127 'query': query
128 })
129 return run_query(data_source, parameter_values, query, query_id, max_age)
130
131
132 ONE_YEAR = 60 * 60 * 24 * 365.25
133
134
135 class QueryResultResource(BaseResource):
136 @staticmethod
137 def add_cors_headers(headers):
138 if 'Origin' in request.headers:
139 origin = request.headers['Origin']
140
141 if set(['*', origin]) & settings.ACCESS_CONTROL_ALLOW_ORIGIN:
142 headers['Access-Control-Allow-Origin'] = origin
143 headers['Access-Control-Allow-Credentials'] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()
144
145 @require_permission('view_query')
146 def options(self, query_id=None, query_result_id=None, filetype='json'):
147 headers = {}
148 self.add_cors_headers(headers)
149
150 if settings.ACCESS_CONTROL_REQUEST_METHOD:
151 headers['Access-Control-Request-Method'] = settings.ACCESS_CONTROL_REQUEST_METHOD
152
153 if settings.ACCESS_CONTROL_ALLOW_HEADERS:
154 headers['Access-Control-Allow-Headers'] = settings.ACCESS_CONTROL_ALLOW_HEADERS
155
156 return make_response("", 200, headers)
157
158 @require_permission('view_query')
159 def get(self, query_id=None, query_result_id=None, filetype='json'):
160 """
161 Retrieve query results.
162
163 :param number query_id: The ID of the query whose results should be fetched
164 :param number query_result_id: the ID of the query result to fetch
165 :param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.
166
167 :<json number id: Query result ID
168 :<json string query: Query that produced this result
169 :<json string query_hash: Hash code for query text
170 :<json object data: Query output
171 :<json number data_source_id: ID of data source that produced this result
172 :<json number runtime: Length of execution time in seconds
173 :<json string retrieved_at: Query retrieval date/time, in ISO format
174 """
175 # TODO:
176 # This method handles two cases: retrieving result by id & retrieving result by query id.
177 # They need to be split, as they have different logic (for example, retrieving by query id
178 # should check for query parameters and shouldn't cache the result).
179 should_cache = query_result_id is not None
180
181 parameter_values = collect_parameters_from_request(request.args)
182 max_age = int(request.args.get('maxAge', 0))
183
184 query_result = None
185
186 if query_result_id:
187 query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
188 elif query_id is not None:
189 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
190
191 if query is not None:
192 if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:
193 query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)
194 elif query.latest_query_data_id is not None:
195 query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)
196
197 if query_result:
198 require_access(query_result.data_source.groups, self.current_user, view_only)
199
200 if isinstance(self.current_user, models.ApiUser):
201 event = {
202 'user_id': None,
203 'org_id': self.current_org.id,
204 'action': 'api_get',
205 'timestamp': int(time.time()),
206 'api_key': self.current_user.name,
207 'file_type': filetype,
208 'user_agent': request.user_agent.string,
209 'ip': request.remote_addr
210 }
211
212 if query_id:
213 event['object_type'] = 'query'
214 event['object_id'] = query_id
215 else:
216 event['object_type'] = 'query_result'
217 event['object_id'] = query_result_id
218
219 record_event.delay(event)
220
221 if filetype == 'json':
222 response = self.make_json_response(query_result)
223 elif filetype == 'xlsx':
224 response = self.make_excel_response(query_result)
225 else:
226 response = self.make_csv_response(query_result)
227
228 if len(settings.ACCESS_CONTROL_ALLOW_ORIGIN) > 0:
229 self.add_cors_headers(response.headers)
230
231 if should_cache:
232 response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)
233
234 return response
235
236 else:
237 abort(404, message='No cached result found for this query.')
238
239 def make_json_response(self, query_result):
240 data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)
241 headers = {'Content-Type': "application/json"}
242 return make_response(data, 200, headers)
243
244 @staticmethod
245 def make_csv_response(query_result):
246 headers = {'Content-Type': "text/csv; charset=UTF-8"}
247 return make_response(query_result.make_csv_content(), 200, headers)
248
249 @staticmethod
250 def make_excel_response(query_result):
251 headers = {'Content-Type': "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"}
252 return make_response(query_result.make_excel_content(), 200, headers)
253
254
255 class JobResource(BaseResource):
256 def get(self, job_id):
257 """
258 Retrieve info about a running query job.
259 """
260 job = QueryTask(job_id=job_id)
261 return {'job': job.to_dict()}
262
263 def delete(self, job_id):
264 """
265 Cancel a query job in progress.
266 """
267 job = QueryTask(job_id=job_id)
268 job.cancel()
269
[end of redash/handlers/query_results.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py
--- a/redash/handlers/query_results.py
+++ b/redash/handlers/query_results.py
@@ -185,14 +185,19 @@
if query_result_id:
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)
- elif query_id is not None:
+
+ if query_id is not None:
query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
- if query is not None:
+ if query_result is None and query is not None:
if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:
query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)
elif query.latest_query_data_id is not None:
query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)
+
+ if query is not None and query_result is not None and self.current_user.is_api_user():
+ if query.query_hash != query_result.query_hash:
+ abort(404, message='No cached result found for this query.')
if query_result:
require_access(query_result.data_source.groups, self.current_user, view_only)
@@ -229,7 +234,7 @@
self.add_cors_headers(response.headers)
if should_cache:
- response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)
+ response.headers.add_header('Cache-Control', 'private,max-age=%d' % ONE_YEAR)
return response
| {"golden_diff": "diff --git a/redash/handlers/query_results.py b/redash/handlers/query_results.py\n--- a/redash/handlers/query_results.py\n+++ b/redash/handlers/query_results.py\n@@ -185,14 +185,19 @@\n \n if query_result_id:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)\n- elif query_id is not None:\n+\n+ if query_id is not None:\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n \n- if query is not None:\n+ if query_result is None and query is not None:\n if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:\n query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)\n elif query.latest_query_data_id is not None:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)\n+ \n+ if query is not None and query_result is not None and self.current_user.is_api_user():\n+ if query.query_hash != query_result.query_hash:\n+ abort(404, message='No cached result found for this query.')\n \n if query_result:\n require_access(query_result.data_source.groups, self.current_user, view_only)\n@@ -229,7 +234,7 @@\n self.add_cors_headers(response.headers)\n \n if should_cache:\n- response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)\n+ response.headers.add_header('Cache-Control', 'private,max-age=%d' % ONE_YEAR)\n \n return response\n", "issue": "The API key for one query may be used to retrieve another query's results\n### Issue Summary\r\n\r\nA query's API key may be used to obtain another query's results via the REST api when including the API key in the query string.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create one query and execute it to obtain results (call it query A)\r\n2. Create another query and execute it to obtain different results (call it query B)\r\n3. Get the query's API key for query A (A_API_KEY) and the query number for query A (A_QUERY_NUMBER)\r\n4. Get the result number from query B's most recent run (B_RESULT_NUMBER)\r\n5. Execute the below code and you'll see that the API key for query A can get results from query B\r\n\r\n```bash\r\nREDASH_DOMAIN='yourdomain.com'\r\nA_QUERY_NUMBER='query number for query A'\r\nA_API_KEY_A='api key for query A'\r\nB_RESULT_NUMBER='query result number for query b'\r\n\r\n# this will download query B's results using query A's access key\r\nwget \\\r\n -O query_b_results.csv \\\r\n \"https://$REDASH_DOMAIN/api/queries/$A_QUERY_NUMBER/results/$B_RESULT_NUMBER.csv?api_key=$A_API_KEY\"\r\n```\r\n\r\nThis is a bug because one query's API key should NOT be able to access another query's results.\r\n\r\n### Technical details:\r\n\r\n* Redash Version: 1.0.3\r\n* Browser/OS: (Command Line) / Linux Mint 18.2\r\n* How did you install Redash: Command line\r\n\n", "before_files": [{"content": "import logging\nimport json\nimport time\n\nimport pystache\nfrom flask import make_response, request\nfrom flask_login import current_user\nfrom flask_restful import abort\nfrom redash import models, settings, utils\nfrom redash.tasks import QueryTask, record_event\nfrom redash.permissions import require_permission, not_view_only, has_access, require_access, view_only\nfrom redash.handlers.base import BaseResource, get_object_or_404\nfrom redash.utils import collect_query_parameters, collect_parameters_from_request, gen_query_hash\nfrom redash.tasks.queries import enqueue_query\n\n\ndef error_response(message):\n return {'job': {'status': 4, 'error': message}}, 400\n\n\n#\n# Run a parameterized query synchronously and return the result\n# DISCLAIMER: Temporary solution to support parameters in queries. Should be\n# removed once we refactor the query results API endpoints and handling\n# on the client side. Please don't reuse in other API handlers.\n#\ndef run_query_sync(data_source, parameter_values, query_text, max_age=0):\n query_parameters = set(collect_query_parameters(query_text))\n missing_params = set(query_parameters) - set(parameter_values.keys())\n if missing_params:\n raise Exception('Missing parameter value for: {}'.format(\", \".join(missing_params)))\n\n if query_parameters:\n query_text = pystache.render(query_text, parameter_values)\n\n if max_age <= 0:\n query_result = None\n else:\n query_result = models.QueryResult.get_latest(data_source, query_text, max_age)\n\n query_hash = gen_query_hash(query_text)\n\n if query_result:\n logging.info(\"Returning cached result for query %s\" % query_hash)\n return query_result\n\n try:\n started_at = time.time()\n data, error = data_source.query_runner.run_query(query_text, current_user)\n\n if error:\n logging.info('got bak error')\n logging.info(error)\n return None\n\n run_time = time.time() - started_at\n query_result, updated_query_ids = models.QueryResult.store_result(data_source.org, data_source,\n query_hash, query_text, data,\n run_time, utils.utcnow())\n\n models.db.session.commit()\n return query_result\n except Exception as e:\n if max_age > 0:\n abort(404, message=\"Unable to get result from the database, and no cached query result found.\")\n else:\n abort(503, message=\"Unable to get result from the database.\")\n return None\n\ndef run_query(data_source, parameter_values, query_text, query_id, max_age=0):\n query_parameters = set(collect_query_parameters(query_text))\n missing_params = set(query_parameters) - set(parameter_values.keys())\n if missing_params:\n return error_response('Missing parameter value for: {}'.format(\", \".join(missing_params)))\n\n if data_source.paused:\n if data_source.pause_reason:\n message = '{} is paused ({}). Please try later.'.format(data_source.name, data_source.pause_reason)\n else:\n message = '{} is paused. Please try later.'.format(data_source.name)\n\n return error_response(message)\n\n if query_parameters:\n query_text = pystache.render(query_text, parameter_values)\n\n if max_age == 0:\n query_result = None\n else:\n query_result = models.QueryResult.get_latest(data_source, query_text, max_age)\n\n if query_result:\n return {'query_result': query_result.to_dict()}\n else:\n job = enqueue_query(query_text, data_source, current_user.id, metadata={\"Username\": current_user.email, \"Query ID\": query_id})\n return {'job': job.to_dict()}\n\n\nclass QueryResultListResource(BaseResource):\n @require_permission('execute_query')\n def post(self):\n \"\"\"\n Execute a query (or retrieve recent results).\n\n :qparam string query: The query text to execute\n :qparam number query_id: The query object to update with the result (optional)\n :qparam number max_age: If query results less than `max_age` seconds old are available, return them, otherwise execute the query; if omitted, always execute\n :qparam number data_source_id: ID of data source to query\n \"\"\"\n params = request.get_json(force=True)\n parameter_values = collect_parameters_from_request(request.args)\n\n query = params['query']\n max_age = int(params.get('max_age', -1))\n query_id = params.get('query_id', 'adhoc')\n\n data_source = models.DataSource.get_by_id_and_org(params.get('data_source_id'), self.current_org)\n\n if not has_access(data_source.groups, self.current_user, not_view_only):\n return {'job': {'status': 4, 'error': 'You do not have permission to run queries with this data source.'}}, 403\n\n self.record_event({\n 'action': 'execute_query',\n 'timestamp': int(time.time()),\n 'object_id': data_source.id,\n 'object_type': 'data_source',\n 'query': query\n })\n return run_query(data_source, parameter_values, query, query_id, max_age)\n\n\nONE_YEAR = 60 * 60 * 24 * 365.25\n\n\nclass QueryResultResource(BaseResource):\n @staticmethod\n def add_cors_headers(headers):\n if 'Origin' in request.headers:\n origin = request.headers['Origin']\n\n if set(['*', origin]) & settings.ACCESS_CONTROL_ALLOW_ORIGIN:\n headers['Access-Control-Allow-Origin'] = origin\n headers['Access-Control-Allow-Credentials'] = str(settings.ACCESS_CONTROL_ALLOW_CREDENTIALS).lower()\n\n @require_permission('view_query')\n def options(self, query_id=None, query_result_id=None, filetype='json'):\n headers = {}\n self.add_cors_headers(headers)\n\n if settings.ACCESS_CONTROL_REQUEST_METHOD:\n headers['Access-Control-Request-Method'] = settings.ACCESS_CONTROL_REQUEST_METHOD\n\n if settings.ACCESS_CONTROL_ALLOW_HEADERS:\n headers['Access-Control-Allow-Headers'] = settings.ACCESS_CONTROL_ALLOW_HEADERS\n\n return make_response(\"\", 200, headers)\n\n @require_permission('view_query')\n def get(self, query_id=None, query_result_id=None, filetype='json'):\n \"\"\"\n Retrieve query results.\n\n :param number query_id: The ID of the query whose results should be fetched\n :param number query_result_id: the ID of the query result to fetch\n :param string filetype: Format to return. One of 'json', 'xlsx', or 'csv'. Defaults to 'json'.\n\n :<json number id: Query result ID\n :<json string query: Query that produced this result\n :<json string query_hash: Hash code for query text\n :<json object data: Query output\n :<json number data_source_id: ID of data source that produced this result\n :<json number runtime: Length of execution time in seconds\n :<json string retrieved_at: Query retrieval date/time, in ISO format\n \"\"\"\n # TODO:\n # This method handles two cases: retrieving result by id & retrieving result by query id.\n # They need to be split, as they have different logic (for example, retrieving by query id\n # should check for query parameters and shouldn't cache the result).\n should_cache = query_result_id is not None\n\n parameter_values = collect_parameters_from_request(request.args)\n max_age = int(request.args.get('maxAge', 0))\n\n query_result = None\n\n if query_result_id:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query_result_id, self.current_org)\n elif query_id is not None:\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n\n if query is not None:\n if settings.ALLOW_PARAMETERS_IN_EMBEDS and parameter_values:\n query_result = run_query_sync(query.data_source, parameter_values, query.to_dict()['query'], max_age=max_age)\n elif query.latest_query_data_id is not None:\n query_result = get_object_or_404(models.QueryResult.get_by_id_and_org, query.latest_query_data_id, self.current_org)\n\n if query_result:\n require_access(query_result.data_source.groups, self.current_user, view_only)\n\n if isinstance(self.current_user, models.ApiUser):\n event = {\n 'user_id': None,\n 'org_id': self.current_org.id,\n 'action': 'api_get',\n 'timestamp': int(time.time()),\n 'api_key': self.current_user.name,\n 'file_type': filetype,\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n }\n\n if query_id:\n event['object_type'] = 'query'\n event['object_id'] = query_id\n else:\n event['object_type'] = 'query_result'\n event['object_id'] = query_result_id\n\n record_event.delay(event)\n\n if filetype == 'json':\n response = self.make_json_response(query_result)\n elif filetype == 'xlsx':\n response = self.make_excel_response(query_result)\n else:\n response = self.make_csv_response(query_result)\n\n if len(settings.ACCESS_CONTROL_ALLOW_ORIGIN) > 0:\n self.add_cors_headers(response.headers)\n\n if should_cache:\n response.headers.add_header('Cache-Control', 'max-age=%d' % ONE_YEAR)\n\n return response\n\n else:\n abort(404, message='No cached result found for this query.')\n\n def make_json_response(self, query_result):\n data = json.dumps({'query_result': query_result.to_dict()}, cls=utils.JSONEncoder)\n headers = {'Content-Type': \"application/json\"}\n return make_response(data, 200, headers)\n\n @staticmethod\n def make_csv_response(query_result):\n headers = {'Content-Type': \"text/csv; charset=UTF-8\"}\n return make_response(query_result.make_csv_content(), 200, headers)\n\n @staticmethod\n def make_excel_response(query_result):\n headers = {'Content-Type': \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"}\n return make_response(query_result.make_excel_content(), 200, headers)\n\n\nclass JobResource(BaseResource):\n def get(self, job_id):\n \"\"\"\n Retrieve info about a running query job.\n \"\"\"\n job = QueryTask(job_id=job_id)\n return {'job': job.to_dict()}\n\n def delete(self, job_id):\n \"\"\"\n Cancel a query job in progress.\n \"\"\"\n job = QueryTask(job_id=job_id)\n job.cancel()\n", "path": "redash/handlers/query_results.py"}]} | 3,874 | 391 |
gh_patches_debug_12685 | rasdani/github-patches | git_diff | sopel-irc__sopel-985 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error in url setup procedure - Bot no longer posts titles of links
Hello. I've actually had this error for awhile and tried fixing it awhile back to no avail so I'm hoping someone here could help. The bot no longer seems to show titles of links posted in the channels and when starting the bot I get:
```
Error in url setup procedure: nothing to repeat (../../../../../lib/python2.7/sre_parse.py:517)
```
Which looking at that file points to this:
```
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
```
So I'm not sure what to do here.
</issue>
<code>
[start of sopel/modules/url.py]
1 # coding=utf-8
2 """URL title module"""
3 # Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham
4 # Copyright 2012-2013 Elsie Powell
5 # Copyright 2013 Lior Ramati ([email protected])
6 # Copyright © 2014 Elad Alfassa <[email protected]>
7 # Licensed under the Eiffel Forum License 2.
8 from __future__ import unicode_literals, absolute_import, print_function, division
9
10 import re
11 from sopel import web, tools
12 from sopel.module import commands, rule, example
13 from sopel.config.types import ValidatedAttribute, StaticSection
14
15
16 url_finder = None
17 # These are used to clean up the title tag before actually parsing it. Not the
18 # world's best way to do this, but it'll do for now.
19 title_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)
20 quoted_title = re.compile('[\'"]<title>[\'"]', re.IGNORECASE)
21 # This is another regex that presumably does something important.
22 re_dcc = re.compile(r'(?i)dcc\ssend')
23 # This sets the maximum number of bytes that should be read in order to find
24 # the title. We don't want it too high, or a link to a big file/stream will
25 # just keep downloading until there's no more memory. 640k ought to be enough
26 # for anybody.
27 max_bytes = 655360
28
29
30 class UrlSection(StaticSection):
31 # TODO some validation rules maybe?
32 exclude = ValidatedAttribute('exclude')
33 exclusion_char = ValidatedAttribute('exclusion_char', default='!')
34
35
36 def configure(config):
37 config.define_section('url', UrlSection)
38 config.url.configure_setting(
39 'exclude',
40 'Enter regular expressions for each URL you would like to exclude.'
41 )
42 config.url.configure_setting(
43 'exclusion_char',
44 'Enter a character which can be prefixed to suppress URL titling'
45 )
46
47
48 def setup(bot=None):
49 global url_finder
50
51 # TODO figure out why this is needed, and get rid of it, because really?
52 if not bot:
53 return
54 bot.config.define_section('url', UrlSection)
55
56 if bot.config.url.exclude:
57 regexes = [re.compile(s) for s in bot.config.url.exclude]
58 else:
59 regexes = []
60
61 # We're keeping these in their own list, rather than putting then in the
62 # callbacks list because 1, it's easier to deal with modules that are still
63 # using this list, and not the newer callbacks list and 2, having a lambda
64 # just to pass is kinda ugly.
65 if not bot.memory.contains('url_exclude'):
66 bot.memory['url_exclude'] = regexes
67 else:
68 exclude = bot.memory['url_exclude']
69 if regexes:
70 exclude.extend(regexes)
71 bot.memory['url_exclude'] = exclude
72
73 # Ensure that url_callbacks and last_seen_url are in memory
74 if not bot.memory.contains('url_callbacks'):
75 bot.memory['url_callbacks'] = tools.SopelMemory()
76 if not bot.memory.contains('last_seen_url'):
77 bot.memory['last_seen_url'] = tools.SopelMemory()
78
79 url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\S+))' %
80 (bot.config.url.exclusion_char))
81
82
83 @commands('title')
84 @example('.title http://google.com', '[ Google ] - google.com')
85 def title_command(bot, trigger):
86 """
87 Show the title or URL information for the given URL, or the last URL seen
88 in this channel.
89 """
90 if not trigger.group(2):
91 if trigger.sender not in bot.memory['last_seen_url']:
92 return
93 matched = check_callbacks(bot, trigger,
94 bot.memory['last_seen_url'][trigger.sender],
95 True)
96 if matched:
97 return
98 else:
99 urls = [bot.memory['last_seen_url'][trigger.sender]]
100 else:
101 urls = re.findall(url_finder, trigger)
102
103 results = process_urls(bot, trigger, urls)
104 for title, domain in results[:4]:
105 bot.reply('[ %s ] - %s' % (title, domain))
106
107
108 @rule('(?u).*(https?://\S+).*')
109 def title_auto(bot, trigger):
110 """
111 Automatically show titles for URLs. For shortened URLs/redirects, find
112 where the URL redirects to and show the title for that (or call a function
113 from another module to give more information).
114 """
115 if re.match(bot.config.core.prefix + 'title', trigger):
116 return
117
118 # Avoid fetching known malicious links
119 if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:
120 if bot.memory['safety_cache'][trigger]['positives'] > 1:
121 return
122
123 urls = re.findall(url_finder, trigger)
124 results = process_urls(bot, trigger, urls)
125 bot.memory['last_seen_url'][trigger.sender] = urls[-1]
126
127 for title, domain in results[:4]:
128 message = '[ %s ] - %s' % (title, domain)
129 # Guard against responding to other instances of this bot.
130 if message != trigger:
131 bot.say(message)
132
133
134 def process_urls(bot, trigger, urls):
135 """
136 For each URL in the list, ensure that it isn't handled by another module.
137 If not, find where it redirects to, if anywhere. If that redirected URL
138 should be handled by another module, dispatch the callback for it.
139 Return a list of (title, hostname) tuples for each URL which is not handled by
140 another module.
141 """
142
143 results = []
144 for url in urls:
145 if not url.startswith(bot.config.url.exclusion_char):
146 # Magic stuff to account for international domain names
147 try:
148 url = web.iri_to_uri(url)
149 except:
150 pass
151 # First, check that the URL we got doesn't match
152 matched = check_callbacks(bot, trigger, url, False)
153 if matched:
154 continue
155 # Then see if it redirects anywhere
156 new_url = follow_redirects(url)
157 if not new_url:
158 continue
159 # Then see if the final URL matches anything
160 matched = check_callbacks(bot, trigger, new_url, new_url != url)
161 if matched:
162 continue
163 # Finally, actually show the URL
164 title = find_title(url)
165 if title:
166 results.append((title, get_hostname(url)))
167 return results
168
169
170 def follow_redirects(url):
171 """
172 Follow HTTP 3xx redirects, and return the actual URL. Return None if
173 there's a problem.
174 """
175 try:
176 connection = web.get_urllib_object(url, 60)
177 url = connection.geturl() or url
178 connection.close()
179 except:
180 return None
181 return url
182
183
184 def check_callbacks(bot, trigger, url, run=True):
185 """
186 Check the given URL against the callbacks list. If it matches, and ``run``
187 is given as ``True``, run the callback function, otherwise pass. Returns
188 ``True`` if the url matched anything in the callbacks list.
189 """
190 # Check if it matches the exclusion list first
191 matched = any(regex.search(url) for regex in bot.memory['url_exclude'])
192 # Then, check if there's anything in the callback list
193 for regex, function in tools.iteritems(bot.memory['url_callbacks']):
194 match = regex.search(url)
195 if match:
196 if run:
197 function(bot, trigger, match)
198 matched = True
199 return matched
200
201
202 def find_title(url):
203 """Return the title for the given URL."""
204 try:
205 content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes)
206 except UnicodeDecodeError:
207 return # Fail silently when data can't be decoded
208
209 # Some cleanup that I don't really grok, but was in the original, so
210 # we'll keep it (with the compiled regexes made global) for now.
211 content = title_tag_data.sub(r'<\1title>', content)
212 content = quoted_title.sub('', content)
213
214 start = content.find('<title>')
215 end = content.find('</title>')
216 if start == -1 or end == -1:
217 return
218 title = web.decode(content[start + 7:end])
219 title = title.strip()[:200]
220
221 title = ' '.join(title.split()) # cleanly remove multiple spaces
222
223 # More cryptic regex substitutions. This one looks to be myano's invention.
224 title = re_dcc.sub('', title)
225
226 return title or None
227
228
229 def get_hostname(url):
230 idx = 7
231 if url.startswith('https://'):
232 idx = 8
233 elif url.startswith('ftp://'):
234 idx = 6
235 hostname = url[idx:]
236 slash = hostname.find('/')
237 if slash != -1:
238 hostname = hostname[:slash]
239 return hostname
240
241 if __name__ == "__main__":
242 from sopel.test_tools import run_example_tests
243 run_example_tests(__file__)
244
[end of sopel/modules/url.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sopel/modules/url.py b/sopel/modules/url.py
--- a/sopel/modules/url.py
+++ b/sopel/modules/url.py
@@ -10,7 +10,7 @@
import re
from sopel import web, tools
from sopel.module import commands, rule, example
-from sopel.config.types import ValidatedAttribute, StaticSection
+from sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection
url_finder = None
@@ -29,7 +29,7 @@
class UrlSection(StaticSection):
# TODO some validation rules maybe?
- exclude = ValidatedAttribute('exclude')
+ exclude = ListAttribute('exclude')
exclusion_char = ValidatedAttribute('exclusion_char', default='!')
| {"golden_diff": "diff --git a/sopel/modules/url.py b/sopel/modules/url.py\n--- a/sopel/modules/url.py\n+++ b/sopel/modules/url.py\n@@ -10,7 +10,7 @@\n import re\n from sopel import web, tools\n from sopel.module import commands, rule, example\n-from sopel.config.types import ValidatedAttribute, StaticSection\n+from sopel.config.types import ValidatedAttribute, ListAttribute, StaticSection\n \n \n url_finder = None\n@@ -29,7 +29,7 @@\n \n class UrlSection(StaticSection):\n # TODO some validation rules maybe?\n- exclude = ValidatedAttribute('exclude')\n+ exclude = ListAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n", "issue": "Error in url setup procedure - Bot no longer posts titles of links\nHello. I've actually had this error for awhile and tried fixing it awhile back to no avail so I'm hoping someone here could help. The bot no longer seems to show titles of links posted in the channels and when starting the bot I get:\n\n```\nError in url setup procedure: nothing to repeat (../../../../../lib/python2.7/sre_parse.py:517)\n```\n\nWhich looking at that file points to this:\n\n```\n if not item or (_len(item) == 1 and item[0][0] == AT):\n raise error, \"nothing to repeat\"\n```\n\nSo I'm not sure what to do here.\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"URL title module\"\"\"\n# Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham\n# Copyright 2012-2013 Elsie Powell\n# Copyright 2013 Lior Ramati ([email protected])\n# Copyright \u00a9 2014 Elad Alfassa <[email protected]>\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\nfrom sopel import web, tools\nfrom sopel.module import commands, rule, example\nfrom sopel.config.types import ValidatedAttribute, StaticSection\n\n\nurl_finder = None\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ValidatedAttribute('exclude')\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n\n\ndef configure(config):\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n\n\ndef setup(bot=None):\n global url_finder\n\n # TODO figure out why this is needed, and get rid of it, because really?\n if not bot:\n return\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if not bot.memory.contains('url_exclude'):\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure that url_callbacks and last_seen_url are in memory\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n if not bot.memory.contains('last_seen_url'):\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n url_finder = re.compile(r'(?u)(%s?(?:http|https|ftp)(?:://\\S+))' %\n (bot.config.url.exclusion_char))\n\n\n@commands('title')\n@example('.title http://google.com', '[ Google ] - google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(bot, trigger,\n bot.memory['last_seen_url'][trigger.sender],\n True)\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = re.findall(url_finder, trigger)\n\n results = process_urls(bot, trigger, urls)\n for title, domain in results[:4]:\n bot.reply('[ %s ] - %s' % (title, domain))\n\n\n@rule('(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = re.findall(url_finder, trigger)\n results = process_urls(bot, trigger, urls)\n bot.memory['last_seen_url'][trigger.sender] = urls[-1]\n\n for title, domain in results[:4]:\n message = '[ %s ] - %s' % (title, domain)\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled by\n another module.\n \"\"\"\n\n results = []\n for url in urls:\n if not url.startswith(bot.config.url.exclusion_char):\n # Magic stuff to account for international domain names\n try:\n url = web.iri_to_uri(url)\n except:\n pass\n # First, check that the URL we got doesn't match\n matched = check_callbacks(bot, trigger, url, False)\n if matched:\n continue\n # Then see if it redirects anywhere\n new_url = follow_redirects(url)\n if not new_url:\n continue\n # Then see if the final URL matches anything\n matched = check_callbacks(bot, trigger, new_url, new_url != url)\n if matched:\n continue\n # Finally, actually show the URL\n title = find_title(url)\n if title:\n results.append((title, get_hostname(url)))\n return results\n\n\ndef follow_redirects(url):\n \"\"\"\n Follow HTTP 3xx redirects, and return the actual URL. Return None if\n there's a problem.\n \"\"\"\n try:\n connection = web.get_urllib_object(url, 60)\n url = connection.geturl() or url\n connection.close()\n except:\n return None\n return url\n\n\ndef check_callbacks(bot, trigger, url, run=True):\n \"\"\"\n Check the given URL against the callbacks list. If it matches, and ``run``\n is given as ``True``, run the callback function, otherwise pass. Returns\n ``True`` if the url matched anything in the callbacks list.\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n # Then, check if there's anything in the callback list\n for regex, function in tools.iteritems(bot.memory['url_callbacks']):\n match = regex.search(url)\n if match:\n if run:\n function(bot, trigger, match)\n matched = True\n return matched\n\n\ndef find_title(url):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n content, headers = web.get(url, return_headers=True, limit_bytes=max_bytes)\n except UnicodeDecodeError:\n return # Fail silently when data can't be decoded\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.find('<title>')\n end = content.find('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}]} | 3,309 | 168 |
gh_patches_debug_7367 | rasdani/github-patches | git_diff | holoviz__holoviews-5450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Empty Box-Whisker plot with the Matplotlib backend when vdims has NaNs
The following snippet correctly returns a Box-Whisker plot grouped by *carrier* with the Bokeh backend but just returns an empty plot with the matplotlib backend.
```python
import holoviews as hv
from hvplot.sample_data import us_crime, airline_flights
flights = airline_flights.to_dask().persist()
flight_subset = flights[flights.carrier.isin(['AA', 'US', 'OH'])]
ds = hv.Dataset(flight_subset, kdims=['carrier'], vdims=['depdelay'])
hv.BoxWhisker(ds)
```

Note that this has nothing to do with Dask, since adding `flight_subset = flight_subset.compute()` has no effect.
I've done some exploration and found out that the `depdelay` column has `NaN`s values, replacing them with `0` (or any float) fixes the issue.
</issue>
<code>
[start of holoviews/plotting/mpl/stats.py]
1 import param
2 import numpy as np
3
4 from ...core.ndmapping import sorted_context
5 from .chart import AreaPlot, ChartPlot
6 from .path import PolygonPlot
7 from .plot import AdjoinedPlot
8
9
10 class DistributionPlot(AreaPlot):
11 """
12 DistributionPlot visualizes a distribution of values as a KDE.
13 """
14
15 bandwidth = param.Number(default=None, doc="""
16 The bandwidth of the kernel for the density estimate.""")
17
18 cut = param.Number(default=3, doc="""
19 Draw the estimate to cut * bw from the extreme data points.""")
20
21 filled = param.Boolean(default=True, doc="""
22 Whether the bivariate contours should be filled.""")
23
24
25 class BivariatePlot(PolygonPlot):
26 """
27 Bivariate plot visualizes two-dimensional kernel density
28 estimates. Additionally, by enabling the joint option, the
29 marginals distributions can be plotted alongside each axis (does
30 not animate or compose).
31 """
32
33 bandwidth = param.Number(default=None, doc="""
34 The bandwidth of the kernel for the density estimate.""")
35
36 cut = param.Number(default=3, doc="""
37 Draw the estimate to cut * bw from the extreme data points.""")
38
39 filled = param.Boolean(default=False, doc="""
40 Whether the bivariate contours should be filled.""")
41
42 levels = param.ClassSelector(default=10, class_=(list, int), doc="""
43 A list of scalar values used to specify the contour levels.""")
44
45
46 class BoxPlot(ChartPlot):
47 """
48 BoxPlot plots the ErrorBar Element type and supporting
49 both horizontal and vertical error bars via the 'horizontal'
50 plot option.
51 """
52
53 style_opts = ['notch', 'sym', 'whis', 'bootstrap',
54 'conf_intervals', 'widths', 'showmeans',
55 'show_caps', 'showfliers', 'boxprops',
56 'whiskerprops', 'capprops', 'flierprops',
57 'medianprops', 'meanprops', 'meanline']
58
59 _nonvectorized_styles = style_opts
60
61 _plot_methods = dict(single='boxplot')
62
63 def get_extents(self, element, ranges, range_type='combined'):
64 return super().get_extents(
65 element, ranges, range_type, 'categorical', element.vdims[0]
66 )
67
68 def get_data(self, element, ranges, style):
69 if element.kdims:
70 with sorted_context(False):
71 groups = element.groupby(element.kdims).data.items()
72 else:
73 groups = [(element.label, element)]
74
75 data, labels = [], []
76 for key, group in groups:
77 if element.kdims:
78 label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
79 else:
80 label = key
81 data.append(group[group.vdims[0]])
82 labels.append(label)
83 style['labels'] = labels
84 style = {k: v for k, v in style.items()
85 if k not in ['zorder', 'label']}
86 style['vert'] = not self.invert_axes
87 format_kdims = [kd.clone(value_format=None) for kd in element.kdims]
88 return (data,), style, {'dimensions': [format_kdims, element.vdims[0]]}
89
90 def init_artists(self, ax, plot_args, plot_kwargs):
91 artists = ax.boxplot(*plot_args, **plot_kwargs)
92 artists['artist'] = artists['boxes']
93 return artists
94
95 def teardown_handles(self):
96 for g in ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means'):
97 for v in self.handles.get(g, []):
98 v.remove()
99
100
101 class SideBoxPlot(AdjoinedPlot, BoxPlot):
102
103 bgcolor = param.Parameter(default=(1, 1, 1, 0), doc="""
104 Make plot background invisible.""")
105
106 border_size = param.Number(default=0, doc="""
107 The size of the border expressed as a fraction of the main plot.""")
108
109 xaxis = param.ObjectSelector(default='bare',
110 objects=['top', 'bottom', 'bare', 'top-bare',
111 'bottom-bare', None], doc="""
112 Whether and where to display the xaxis, bare options allow suppressing
113 all axis labels including ticks and xlabel. Valid options are 'top',
114 'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
115
116 yaxis = param.ObjectSelector(default='bare',
117 objects=['left', 'right', 'bare', 'left-bare',
118 'right-bare', None], doc="""
119 Whether and where to display the yaxis, bare options allow suppressing
120 all axis labels including ticks and ylabel. Valid options are 'left',
121 'right', 'bare' 'left-bare' and 'right-bare'.""")
122
123 def __init__(self, *args, **kwargs):
124 super().__init__(*args, **kwargs)
125 if self.adjoined:
126 self.invert_axes = not self.invert_axes
127
128
129 class ViolinPlot(BoxPlot):
130 """
131 BoxPlot plots the ErrorBar Element type and supporting
132 both horizontal and vertical error bars via the 'horizontal'
133 plot option.
134 """
135
136 bandwidth = param.Number(default=None, doc="""
137 Allows supplying explicit bandwidth value rather than relying
138 on scott or silverman method.""")
139
140 inner = param.ObjectSelector(objects=['box', 'medians', None],
141 default='box', doc="""
142 Inner visual indicator for distribution values:
143
144 * box - A small box plot
145 * stick - Lines indicating each sample value
146 * quartiles - Indicates first, second and third quartiles
147 """)
148
149 _plot_methods = dict(single='violinplot')
150
151 style_opts = ['showmeans', 'facecolors', 'showextrema', 'bw_method',
152 'widths', 'stats_color', 'box_color', 'alpha', 'edgecolors']
153
154 _nonvectorized_styles = [
155 s for s in style_opts if s not in ('facecolors', 'edgecolors', 'widths')
156 ]
157
158 def init_artists(self, ax, plot_args, plot_kwargs):
159 box_color = plot_kwargs.pop('box_color', 'black')
160 stats_color = plot_kwargs.pop('stats_color', 'black')
161 facecolors = plot_kwargs.pop('facecolors', [])
162 edgecolors = plot_kwargs.pop('edgecolors', 'black')
163 labels = plot_kwargs.pop('labels')
164 alpha = plot_kwargs.pop('alpha', 1.)
165 showmedians = self.inner == 'medians'
166 bw_method = self.bandwidth or 'scott'
167 artists = ax.violinplot(*plot_args, bw_method=bw_method,
168 showmedians=showmedians, **plot_kwargs)
169 if self.inner == 'box':
170 box = ax.boxplot(*plot_args, positions=plot_kwargs['positions'],
171 showfliers=False, showcaps=False, patch_artist=True,
172 boxprops={'facecolor': box_color},
173 medianprops={'color': 'white'}, widths=0.1,
174 labels=labels)
175 artists.update(box)
176 for body, color in zip(artists['bodies'], facecolors):
177 body.set_facecolors(color)
178 body.set_edgecolors(edgecolors)
179 body.set_alpha(alpha)
180 for stat in ['cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars']:
181 if stat in artists:
182 artists[stat].set_edgecolors(stats_color)
183 artists['bodies'] = artists['bodies']
184 return artists
185
186 def get_data(self, element, ranges, style):
187 if element.kdims:
188 with sorted_context(False):
189 groups = element.groupby(element.kdims).data.items()
190 else:
191 groups = [(element.label, element)]
192
193 data, labels, colors = [], [], []
194 elstyle = self.lookup_options(element, 'style')
195 for i, (key, group) in enumerate(groups):
196 if element.kdims:
197 label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
198 else:
199 label = key
200 d = group[group.vdims[0]]
201 data.append(d[np.isfinite(d)])
202 labels.append(label)
203 colors.append(elstyle[i].get('facecolors', 'blue'))
204 style['positions'] = list(range(len(data)))
205 style['labels'] = labels
206 style['facecolors'] = colors
207
208 if element.ndims > 0:
209 element = element.aggregate(function=np.mean)
210 else:
211 element = element.clone([(element.aggregate(function=np.mean),)])
212
213 new_style = self._apply_transforms(element, ranges, style)
214 style = {k: v for k, v in new_style.items()
215 if k not in ['zorder', 'label']}
216 style['vert'] = not self.invert_axes
217 format_kdims = [kd.clone(value_format=None) for kd in element.kdims]
218 ticks = {'yticks' if self.invert_axes else 'xticks': list(enumerate(labels))}
219 return (data,), style, dict(dimensions=[format_kdims, element.vdims[0]], **ticks)
220
221 def teardown_handles(self):
222 box_artists = ('cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars', 'bodies')
223 violin_artists = ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means')
224 for group in box_artists+violin_artists:
225 for v in self.handles.get(group, []):
226 v.remove()
227
[end of holoviews/plotting/mpl/stats.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holoviews/plotting/mpl/stats.py b/holoviews/plotting/mpl/stats.py
--- a/holoviews/plotting/mpl/stats.py
+++ b/holoviews/plotting/mpl/stats.py
@@ -78,7 +78,8 @@
label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])
else:
label = key
- data.append(group[group.vdims[0]])
+ d = group[group.vdims[0]]
+ data.append(d[np.isfinite(d)])
labels.append(label)
style['labels'] = labels
style = {k: v for k, v in style.items()
| {"golden_diff": "diff --git a/holoviews/plotting/mpl/stats.py b/holoviews/plotting/mpl/stats.py\n--- a/holoviews/plotting/mpl/stats.py\n+++ b/holoviews/plotting/mpl/stats.py\n@@ -78,7 +78,8 @@\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n- data.append(group[group.vdims[0]])\n+ d = group[group.vdims[0]]\n+ data.append(d[np.isfinite(d)])\n labels.append(label)\n style['labels'] = labels\n style = {k: v for k, v in style.items()\n", "issue": "Empty Box-Whisker plot with the Matplotlib backend when vdims has NaNs\nThe following snippet correctly returns a Box-Whisker plot grouped by *carrier* with the Bokeh backend but just returns an empty plot with the matplotlib backend.\r\n\r\n```python\r\nimport holoviews as hv\r\nfrom hvplot.sample_data import us_crime, airline_flights\r\n\r\nflights = airline_flights.to_dask().persist()\r\nflight_subset = flights[flights.carrier.isin(['AA', 'US', 'OH'])]\r\n\r\nds = hv.Dataset(flight_subset, kdims=['carrier'], vdims=['depdelay'])\r\nhv.BoxWhisker(ds)\r\n```\r\n\r\n\r\nNote that this has nothing to do with Dask, since adding `flight_subset = flight_subset.compute()` has no effect.\r\n\r\nI've done some exploration and found out that the `depdelay` column has `NaN`s values, replacing them with `0` (or any float) fixes the issue. \n", "before_files": [{"content": "import param\nimport numpy as np\n\nfrom ...core.ndmapping import sorted_context\nfrom .chart import AreaPlot, ChartPlot\nfrom .path import PolygonPlot\nfrom .plot import AdjoinedPlot\n\n\nclass DistributionPlot(AreaPlot):\n \"\"\"\n DistributionPlot visualizes a distribution of values as a KDE.\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=True, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n\nclass BivariatePlot(PolygonPlot):\n \"\"\"\n Bivariate plot visualizes two-dimensional kernel density\n estimates. Additionally, by enabling the joint option, the\n marginals distributions can be plotted alongside each axis (does\n not animate or compose).\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n The bandwidth of the kernel for the density estimate.\"\"\")\n\n cut = param.Number(default=3, doc=\"\"\"\n Draw the estimate to cut * bw from the extreme data points.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether the bivariate contours should be filled.\"\"\")\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n\nclass BoxPlot(ChartPlot):\n \"\"\"\n BoxPlot plots the ErrorBar Element type and supporting\n both horizontal and vertical error bars via the 'horizontal'\n plot option.\n \"\"\"\n\n style_opts = ['notch', 'sym', 'whis', 'bootstrap',\n 'conf_intervals', 'widths', 'showmeans',\n 'show_caps', 'showfliers', 'boxprops',\n 'whiskerprops', 'capprops', 'flierprops',\n 'medianprops', 'meanprops', 'meanline']\n\n _nonvectorized_styles = style_opts\n\n _plot_methods = dict(single='boxplot')\n\n def get_extents(self, element, ranges, range_type='combined'):\n return super().get_extents(\n element, ranges, range_type, 'categorical', element.vdims[0]\n )\n\n def get_data(self, element, ranges, style):\n if element.kdims:\n with sorted_context(False):\n groups = element.groupby(element.kdims).data.items()\n else:\n groups = [(element.label, element)]\n\n data, labels = [], []\n for key, group in groups:\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n data.append(group[group.vdims[0]])\n labels.append(label)\n style['labels'] = labels\n style = {k: v for k, v in style.items()\n if k not in ['zorder', 'label']}\n style['vert'] = not self.invert_axes\n format_kdims = [kd.clone(value_format=None) for kd in element.kdims]\n return (data,), style, {'dimensions': [format_kdims, element.vdims[0]]}\n\n def init_artists(self, ax, plot_args, plot_kwargs):\n artists = ax.boxplot(*plot_args, **plot_kwargs)\n artists['artist'] = artists['boxes']\n return artists\n\n def teardown_handles(self):\n for g in ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means'):\n for v in self.handles.get(g, []):\n v.remove()\n\n\nclass SideBoxPlot(AdjoinedPlot, BoxPlot):\n\n bgcolor = param.Parameter(default=(1, 1, 1, 0), doc=\"\"\"\n Make plot background invisible.\"\"\")\n\n border_size = param.Number(default=0, doc=\"\"\"\n The size of the border expressed as a fraction of the main plot.\"\"\")\n\n xaxis = param.ObjectSelector(default='bare',\n objects=['top', 'bottom', 'bare', 'top-bare',\n 'bottom-bare', None], doc=\"\"\"\n Whether and where to display the xaxis, bare options allow suppressing\n all axis labels including ticks and xlabel. Valid options are 'top',\n 'bottom', 'bare', 'top-bare' and 'bottom-bare'.\"\"\")\n\n yaxis = param.ObjectSelector(default='bare',\n objects=['left', 'right', 'bare', 'left-bare',\n 'right-bare', None], doc=\"\"\"\n Whether and where to display the yaxis, bare options allow suppressing\n all axis labels including ticks and ylabel. Valid options are 'left',\n 'right', 'bare' 'left-bare' and 'right-bare'.\"\"\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.adjoined:\n self.invert_axes = not self.invert_axes\n\n\nclass ViolinPlot(BoxPlot):\n \"\"\"\n BoxPlot plots the ErrorBar Element type and supporting\n both horizontal and vertical error bars via the 'horizontal'\n plot option.\n \"\"\"\n\n bandwidth = param.Number(default=None, doc=\"\"\"\n Allows supplying explicit bandwidth value rather than relying\n on scott or silverman method.\"\"\")\n\n inner = param.ObjectSelector(objects=['box', 'medians', None],\n default='box', doc=\"\"\"\n Inner visual indicator for distribution values:\n\n * box - A small box plot\n * stick - Lines indicating each sample value\n * quartiles - Indicates first, second and third quartiles\n \"\"\")\n\n _plot_methods = dict(single='violinplot')\n\n style_opts = ['showmeans', 'facecolors', 'showextrema', 'bw_method',\n 'widths', 'stats_color', 'box_color', 'alpha', 'edgecolors']\n\n _nonvectorized_styles = [\n s for s in style_opts if s not in ('facecolors', 'edgecolors', 'widths')\n ]\n\n def init_artists(self, ax, plot_args, plot_kwargs):\n box_color = plot_kwargs.pop('box_color', 'black')\n stats_color = plot_kwargs.pop('stats_color', 'black')\n facecolors = plot_kwargs.pop('facecolors', [])\n edgecolors = plot_kwargs.pop('edgecolors', 'black')\n labels = plot_kwargs.pop('labels')\n alpha = plot_kwargs.pop('alpha', 1.)\n showmedians = self.inner == 'medians'\n bw_method = self.bandwidth or 'scott'\n artists = ax.violinplot(*plot_args, bw_method=bw_method,\n showmedians=showmedians, **plot_kwargs)\n if self.inner == 'box':\n box = ax.boxplot(*plot_args, positions=plot_kwargs['positions'],\n showfliers=False, showcaps=False, patch_artist=True,\n boxprops={'facecolor': box_color},\n medianprops={'color': 'white'}, widths=0.1,\n labels=labels)\n artists.update(box)\n for body, color in zip(artists['bodies'], facecolors):\n body.set_facecolors(color)\n body.set_edgecolors(edgecolors)\n body.set_alpha(alpha)\n for stat in ['cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars']:\n if stat in artists:\n artists[stat].set_edgecolors(stats_color)\n artists['bodies'] = artists['bodies']\n return artists\n\n def get_data(self, element, ranges, style):\n if element.kdims:\n with sorted_context(False):\n groups = element.groupby(element.kdims).data.items()\n else:\n groups = [(element.label, element)]\n\n data, labels, colors = [], [], []\n elstyle = self.lookup_options(element, 'style')\n for i, (key, group) in enumerate(groups):\n if element.kdims:\n label = ','.join([d.pprint_value(v) for d, v in zip(element.kdims, key)])\n else:\n label = key\n d = group[group.vdims[0]]\n data.append(d[np.isfinite(d)])\n labels.append(label)\n colors.append(elstyle[i].get('facecolors', 'blue'))\n style['positions'] = list(range(len(data)))\n style['labels'] = labels\n style['facecolors'] = colors\n\n if element.ndims > 0:\n element = element.aggregate(function=np.mean)\n else:\n element = element.clone([(element.aggregate(function=np.mean),)])\n\n new_style = self._apply_transforms(element, ranges, style)\n style = {k: v for k, v in new_style.items()\n if k not in ['zorder', 'label']}\n style['vert'] = not self.invert_axes\n format_kdims = [kd.clone(value_format=None) for kd in element.kdims]\n ticks = {'yticks' if self.invert_axes else 'xticks': list(enumerate(labels))}\n return (data,), style, dict(dimensions=[format_kdims, element.vdims[0]], **ticks)\n\n def teardown_handles(self):\n box_artists = ('cmedians', 'cmeans', 'cmaxes', 'cmins', 'cbars', 'bodies')\n violin_artists = ('whiskers', 'fliers', 'medians', 'boxes', 'caps', 'means')\n for group in box_artists+violin_artists:\n for v in self.handles.get(group, []):\n v.remove()\n", "path": "holoviews/plotting/mpl/stats.py"}]} | 3,457 | 164 |
gh_patches_debug_1285 | rasdani/github-patches | git_diff | openai__gym-2633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug Report] Empty print version warning
**Describe the bug**
When I import gym, there's an empty line printed.
It's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30
Either it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `""` (empty string).
Currently it's cluttering the logs at best, or masking some other issue.
**Code example**
```python
import gym
```
**System Info**
Describe the characteristic of your environment:
Latest gym installed from pip, Ubuntu 20.04, Python 3.9.7
### Checklist
- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
</issue>
<code>
[start of gym/__init__.py]
1 from gym import error
2 from gym.version import VERSION as __version__
3
4 from gym.core import (
5 Env,
6 Wrapper,
7 ObservationWrapper,
8 ActionWrapper,
9 RewardWrapper,
10 )
11 from gym.spaces import Space
12 from gym.envs import make, spec, register
13 from gym import logger
14 from gym import vector
15 from gym import wrappers
16 import os
17
18
19 __all__ = ["Env", "Space", "Wrapper", "make", "spec", "register"]
20
21 os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
22
23 try:
24 import gym_notices.notices as notices
25 import sys
26
27 # print version warning if necessary
28 notice = notices.notices.get(__version__)
29 if notice is not None:
30 print(notice, file=sys.stderr)
31
32 except Exception: # nosec
33 pass
34
[end of gym/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/__init__.py b/gym/__init__.py
--- a/gym/__init__.py
+++ b/gym/__init__.py
@@ -26,7 +26,7 @@
# print version warning if necessary
notice = notices.notices.get(__version__)
- if notice is not None:
+ if notice:
print(notice, file=sys.stderr)
except Exception: # nosec
| {"golden_diff": "diff --git a/gym/__init__.py b/gym/__init__.py\n--- a/gym/__init__.py\n+++ b/gym/__init__.py\n@@ -26,7 +26,7 @@\n \n # print version warning if necessary\n notice = notices.notices.get(__version__)\n- if notice is not None:\n+ if notice:\n print(notice, file=sys.stderr)\n \n except Exception: # nosec\n", "issue": "[Bug Report] Empty print version warning \n**Describe the bug**\r\nWhen I import gym, there's an empty line printed.\r\nIt's because of this line: https://github.com/openai/gym/blob/master/gym/__init__.py#L30\r\n\r\nEither it's a bug, because `notice` shouldn't be an empty string, or the check should be `if notice:` which is false for both `None` and `\"\"` (empty string).\r\n\r\nCurrently it's cluttering the logs at best, or masking some other issue.\r\n\r\n**Code example**\r\n```python\r\nimport gym\r\n```\r\n\r\n**System Info**\r\nDescribe the characteristic of your environment:\r\nLatest gym installed from pip, Ubuntu 20.04, Python 3.9.7\r\n\r\n\r\n### Checklist\r\n\r\n- [x] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "from gym import error\nfrom gym.version import VERSION as __version__\n\nfrom gym.core import (\n Env,\n Wrapper,\n ObservationWrapper,\n ActionWrapper,\n RewardWrapper,\n)\nfrom gym.spaces import Space\nfrom gym.envs import make, spec, register\nfrom gym import logger\nfrom gym import vector\nfrom gym import wrappers\nimport os\n\n\n__all__ = [\"Env\", \"Space\", \"Wrapper\", \"make\", \"spec\", \"register\"]\n\nos.environ[\"PYGAME_HIDE_SUPPORT_PROMPT\"] = \"hide\"\n\ntry:\n import gym_notices.notices as notices\n import sys\n\n # print version warning if necessary\n notice = notices.notices.get(__version__)\n if notice is not None:\n print(notice, file=sys.stderr)\n\nexcept Exception: # nosec\n pass\n", "path": "gym/__init__.py"}]} | 966 | 98 |
gh_patches_debug_14124 | rasdani/github-patches | git_diff | pypa__pipenv-5628 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pipenv check looks at installed packages, not Pipfile.lock
## Use Case
I would like to run `pipenv check` as a separate job from the build/test job inside a CI pipeline without rebuilding environment. I discovered that I must actually install all packages to a `pipenv` environment before using `pipenv check`. Ideally, I should be able to scan the dependencies inside `Pipfile.lock` without actually installing the whole environment.
I believe its misleading that right now `pipenv` is just acting as a "proxy" to `safety`, and by default checks an environment that may not match `Pipfile.lock`. By using `pipenv check` the assumption should be that it is checking the environment specified in `Pipfile.lock` and if you need to check an environment that deviates, you use `safety` directly.
I've traced the behavior down to these lines:
https://github.com/pypa/pipenv/blob/8939c863464b23b5503569669d1c3f9ad31a498f/pipenv/core.py#L2900-L2902
Instead of generating the temp `requirements.txt` file from the current environment using `pip list`, can we instead generate the temp `requirements.txt` from `Pipfile.lock`? Something like
```python
# this command should also respect the wishes of the --dev argument, if provided. Unsure on specifics of implementation
target_venv_packages = run_command(
_cmd + ["-m", "pipenv", "requirements"], is_verbose=project.s.is_verbose()
)
```
## Workaround
I'm currently using the following workaround in my CI job, but would like to go through `pipenv` directly.
```bash
pipenv requirements --dev | safety check --stdin
```
</issue>
<code>
[start of pipenv/routines/check.py]
1 import io
2 import json as simplejson
3 import os
4 import sys
5 import tempfile
6 from pathlib import Path
7
8 from pipenv import exceptions, pep508checker
9 from pipenv.utils.processes import run_command
10 from pipenv.utils.project import ensure_project
11 from pipenv.utils.shell import cmd_list_to_shell, project_python
12 from pipenv.vendor import click, plette
13
14
15 def do_check(
16 project,
17 python=False,
18 system=False,
19 db=None,
20 ignore=None,
21 output="screen",
22 key=None,
23 quiet=False,
24 exit_code=True,
25 policy_file="",
26 save_json="",
27 audit_and_monitor=True,
28 safety_project=None,
29 pypi_mirror=None,
30 use_installed=False,
31 categories="",
32 ):
33 import json
34
35 if not system:
36 # Ensure that virtualenv is available.
37 ensure_project(
38 project,
39 python=python,
40 validate=False,
41 warn=False,
42 pypi_mirror=pypi_mirror,
43 )
44 if not quiet and not project.s.is_quiet():
45 click.secho("Checking PEP 508 requirements...", bold=True)
46 pep508checker_path = pep508checker.__file__.rstrip("cdo")
47 safety_path = os.path.join(
48 os.path.dirname(os.path.abspath(__file__)), "patched", "safety"
49 )
50 _cmd = [project_python(project, system=system)]
51 # Run the PEP 508 checker in the virtualenv.
52 cmd = _cmd + [Path(pep508checker_path).as_posix()]
53 c = run_command(cmd, is_verbose=project.s.is_verbose())
54 results = []
55 if c.returncode is not None:
56 try:
57 results = simplejson.loads(c.stdout.strip())
58 except json.JSONDecodeError:
59 click.echo(
60 "{}\n{}\n{}".format(
61 click.style(
62 "Failed parsing pep508 results: ",
63 fg="white",
64 bold=True,
65 ),
66 c.stdout.strip(),
67 c.stderr.strip(),
68 )
69 )
70 sys.exit(1)
71 # Load the pipfile.
72 p = plette.Pipfile.load(open(project.pipfile_location))
73 p = plette.Lockfile.with_meta_from(p)
74 failed = False
75 # Assert each specified requirement.
76 for marker, specifier in p._data["_meta"]["requires"].items():
77 if marker in results:
78 try:
79 assert results[marker] == specifier
80 except AssertionError:
81 failed = True
82 click.echo(
83 "Specifier {} does not match {} ({})."
84 "".format(
85 click.style(marker, fg="green"),
86 click.style(specifier, fg="cyan"),
87 click.style(results[marker], fg="yellow"),
88 ),
89 err=True,
90 )
91 if failed:
92 click.secho("Failed!", fg="red", err=True)
93 sys.exit(1)
94 else:
95 if not quiet and not project.s.is_quiet():
96 click.secho("Passed!", fg="green")
97 if not quiet and not project.s.is_quiet():
98 click.secho(
99 "Checking installed packages for vulnerabilities...",
100 bold=True,
101 )
102 if ignore:
103 if not isinstance(ignore, (tuple, list)):
104 ignore = [ignore]
105 ignored = [["--ignore", cve] for cve in ignore]
106 if not quiet and not project.s.is_quiet():
107 click.echo(
108 "Notice: Ignoring Vulnerabilit{} {}".format(
109 "ies" if len(ignored) > 1 else "y",
110 click.style(", ".join(ignore), fg="yellow"),
111 ),
112 err=True,
113 )
114 else:
115 ignored = []
116
117 options = [
118 "--audit-and-monitor" if audit_and_monitor else "--disable-audit-and-monitor",
119 "--exit-code" if exit_code else "--continue-on-error",
120 ]
121
122 if output == "full-report":
123 options.append("--full-report")
124 elif output == "minimal":
125 options.append("--json")
126 elif output not in ["screen", "default"]:
127 options.append(f"--output={output}")
128
129 if save_json:
130 options.append(f"--save-json={save_json}")
131
132 if policy_file:
133 options.append(f"--policy-file={policy_file}")
134
135 if safety_project:
136 options.append(f"--project={safety_project}")
137
138 if use_installed:
139 target_venv_packages = run_command(
140 _cmd + ["-m", "pip", "list", "--format=freeze"],
141 is_verbose=project.s.is_verbose(),
142 )
143 elif categories:
144 target_venv_packages = run_command(
145 ["pipenv", "requirements", "--categories", categories],
146 is_verbose=project.s.is_verbose(),
147 )
148 else:
149 target_venv_packages = run_command(
150 ["pipenv", "requirements"], is_verbose=project.s.is_verbose()
151 )
152
153 temp_requirements = tempfile.NamedTemporaryFile(
154 mode="w+",
155 prefix=f"{project.virtualenv_name}",
156 suffix="_requirements.txt",
157 delete=False,
158 )
159 temp_requirements.write(target_venv_packages.stdout.strip())
160 temp_requirements.close()
161
162 options.extend(["--file", temp_requirements.name])
163
164 cmd = _cmd + [safety_path, "check"] + options
165
166 if db:
167 if not quiet and not project.s.is_quiet():
168 click.echo(f"Using {db} database")
169 cmd.append(f"--db={db}")
170 elif key or project.s.PIPENV_PYUP_API_KEY:
171 cmd = cmd + [f"--key={key or project.s.PIPENV_PYUP_API_KEY}"]
172 else:
173 PIPENV_SAFETY_DB = (
174 "https://d2qjmgddvqvu75.cloudfront.net/aws/safety/pipenv/1.0.0/"
175 )
176 os.environ["SAFETY_ANNOUNCEMENTS_URL"] = f"{PIPENV_SAFETY_DB}announcements.json"
177 cmd.append(f"--db={PIPENV_SAFETY_DB}")
178
179 if ignored:
180 for cve in ignored:
181 cmd += cve
182
183 os.environ["SAFETY_CUSTOM_INTEGRATION"] = "True"
184 os.environ["SAFETY_SOURCE"] = "pipenv"
185 os.environ["SAFETY_PURE_YAML"] = "True"
186
187 from pipenv.patched.safety.cli import cli
188
189 sys.argv = cmd[1:]
190
191 if output == "minimal":
192 from contextlib import redirect_stderr, redirect_stdout
193
194 code = 0
195
196 with redirect_stdout(io.StringIO()) as out, redirect_stderr(io.StringIO()) as err:
197 try:
198 cli(prog_name="pipenv")
199 except SystemExit as exit_signal:
200 code = exit_signal.code
201
202 report = out.getvalue()
203 error = err.getvalue()
204
205 try:
206 json_report = simplejson.loads(report)
207 except Exception:
208 raise exceptions.PipenvCmdError(
209 cmd_list_to_shell(cmd), report, error, exit_code=code
210 )
211 meta = json_report.get("report_meta")
212 vulnerabilities_found = meta.get("vulnerabilities_found")
213
214 fg = "green"
215 message = "All good!"
216 db_type = "commercial" if meta.get("api_key", False) else "free"
217
218 if vulnerabilities_found >= 0:
219 fg = "red"
220 message = (
221 f"Scan was complete using Safety’s {db_type} vulnerability database."
222 )
223
224 click.echo()
225 click.secho(f"{vulnerabilities_found} vulnerabilities found.", fg=fg)
226 click.echo()
227
228 vulnerabilities = json_report.get("vulnerabilities", [])
229
230 for vuln in vulnerabilities:
231 click.echo(
232 "{}: {} {} open to vulnerability {} ({}). More info: {}".format(
233 click.style(vuln["vulnerability_id"], bold=True, fg="red"),
234 click.style(vuln["package_name"], fg="green"),
235 click.style(vuln["analyzed_version"], fg="yellow", bold=True),
236 click.style(vuln["vulnerability_id"], bold=True),
237 click.style(vuln["vulnerable_spec"], fg="yellow", bold=False),
238 click.style(vuln["more_info_url"], bold=True),
239 )
240 )
241 click.echo(f"{vuln['advisory']}")
242 click.echo()
243
244 click.secho(message, fg="white", bold=True)
245 sys.exit(code)
246
247 cli(prog_name="pipenv")
248
249 temp_requirements.remove()
250
[end of pipenv/routines/check.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pipenv/routines/check.py b/pipenv/routines/check.py
--- a/pipenv/routines/check.py
+++ b/pipenv/routines/check.py
@@ -95,10 +95,16 @@
if not quiet and not project.s.is_quiet():
click.secho("Passed!", fg="green")
if not quiet and not project.s.is_quiet():
- click.secho(
- "Checking installed packages for vulnerabilities...",
- bold=True,
- )
+ if use_installed:
+ click.secho(
+ "Checking installed packages for vulnerabilities...",
+ bold=True,
+ )
+ else:
+ click.secho(
+ "Checking Pipfile.lock packages for vulnerabilities...",
+ bold=True,
+ )
if ignore:
if not isinstance(ignore, (tuple, list)):
ignore = [ignore]
| {"golden_diff": "diff --git a/pipenv/routines/check.py b/pipenv/routines/check.py\n--- a/pipenv/routines/check.py\n+++ b/pipenv/routines/check.py\n@@ -95,10 +95,16 @@\n if not quiet and not project.s.is_quiet():\n click.secho(\"Passed!\", fg=\"green\")\n if not quiet and not project.s.is_quiet():\n- click.secho(\n- \"Checking installed packages for vulnerabilities...\",\n- bold=True,\n- )\n+ if use_installed:\n+ click.secho(\n+ \"Checking installed packages for vulnerabilities...\",\n+ bold=True,\n+ )\n+ else:\n+ click.secho(\n+ \"Checking Pipfile.lock packages for vulnerabilities...\",\n+ bold=True,\n+ )\n if ignore:\n if not isinstance(ignore, (tuple, list)):\n ignore = [ignore]\n", "issue": "pipenv check looks at installed packages, not Pipfile.lock\n## Use Case\r\n\r\nI would like to run `pipenv check` as a separate job from the build/test job inside a CI pipeline without rebuilding environment. I discovered that I must actually install all packages to a `pipenv` environment before using `pipenv check`. Ideally, I should be able to scan the dependencies inside `Pipfile.lock` without actually installing the whole environment. \r\n\r\nI believe its misleading that right now `pipenv` is just acting as a \"proxy\" to `safety`, and by default checks an environment that may not match `Pipfile.lock`. By using `pipenv check` the assumption should be that it is checking the environment specified in `Pipfile.lock` and if you need to check an environment that deviates, you use `safety` directly.\r\n\r\nI've traced the behavior down to these lines:\r\nhttps://github.com/pypa/pipenv/blob/8939c863464b23b5503569669d1c3f9ad31a498f/pipenv/core.py#L2900-L2902\r\n\r\nInstead of generating the temp `requirements.txt` file from the current environment using `pip list`, can we instead generate the temp `requirements.txt` from `Pipfile.lock`? Something like\r\n\r\n```python\r\n# this command should also respect the wishes of the --dev argument, if provided. Unsure on specifics of implementation\r\ntarget_venv_packages = run_command(\r\n _cmd + [\"-m\", \"pipenv\", \"requirements\"], is_verbose=project.s.is_verbose()\r\n )\r\n```\r\n\r\n## Workaround\r\n\r\nI'm currently using the following workaround in my CI job, but would like to go through `pipenv` directly. \r\n\r\n```bash\r\npipenv requirements --dev | safety check --stdin\r\n```\n", "before_files": [{"content": "import io\nimport json as simplejson\nimport os\nimport sys\nimport tempfile\nfrom pathlib import Path\n\nfrom pipenv import exceptions, pep508checker\nfrom pipenv.utils.processes import run_command\nfrom pipenv.utils.project import ensure_project\nfrom pipenv.utils.shell import cmd_list_to_shell, project_python\nfrom pipenv.vendor import click, plette\n\n\ndef do_check(\n project,\n python=False,\n system=False,\n db=None,\n ignore=None,\n output=\"screen\",\n key=None,\n quiet=False,\n exit_code=True,\n policy_file=\"\",\n save_json=\"\",\n audit_and_monitor=True,\n safety_project=None,\n pypi_mirror=None,\n use_installed=False,\n categories=\"\",\n):\n import json\n\n if not system:\n # Ensure that virtualenv is available.\n ensure_project(\n project,\n python=python,\n validate=False,\n warn=False,\n pypi_mirror=pypi_mirror,\n )\n if not quiet and not project.s.is_quiet():\n click.secho(\"Checking PEP 508 requirements...\", bold=True)\n pep508checker_path = pep508checker.__file__.rstrip(\"cdo\")\n safety_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"patched\", \"safety\"\n )\n _cmd = [project_python(project, system=system)]\n # Run the PEP 508 checker in the virtualenv.\n cmd = _cmd + [Path(pep508checker_path).as_posix()]\n c = run_command(cmd, is_verbose=project.s.is_verbose())\n results = []\n if c.returncode is not None:\n try:\n results = simplejson.loads(c.stdout.strip())\n except json.JSONDecodeError:\n click.echo(\n \"{}\\n{}\\n{}\".format(\n click.style(\n \"Failed parsing pep508 results: \",\n fg=\"white\",\n bold=True,\n ),\n c.stdout.strip(),\n c.stderr.strip(),\n )\n )\n sys.exit(1)\n # Load the pipfile.\n p = plette.Pipfile.load(open(project.pipfile_location))\n p = plette.Lockfile.with_meta_from(p)\n failed = False\n # Assert each specified requirement.\n for marker, specifier in p._data[\"_meta\"][\"requires\"].items():\n if marker in results:\n try:\n assert results[marker] == specifier\n except AssertionError:\n failed = True\n click.echo(\n \"Specifier {} does not match {} ({}).\"\n \"\".format(\n click.style(marker, fg=\"green\"),\n click.style(specifier, fg=\"cyan\"),\n click.style(results[marker], fg=\"yellow\"),\n ),\n err=True,\n )\n if failed:\n click.secho(\"Failed!\", fg=\"red\", err=True)\n sys.exit(1)\n else:\n if not quiet and not project.s.is_quiet():\n click.secho(\"Passed!\", fg=\"green\")\n if not quiet and not project.s.is_quiet():\n click.secho(\n \"Checking installed packages for vulnerabilities...\",\n bold=True,\n )\n if ignore:\n if not isinstance(ignore, (tuple, list)):\n ignore = [ignore]\n ignored = [[\"--ignore\", cve] for cve in ignore]\n if not quiet and not project.s.is_quiet():\n click.echo(\n \"Notice: Ignoring Vulnerabilit{} {}\".format(\n \"ies\" if len(ignored) > 1 else \"y\",\n click.style(\", \".join(ignore), fg=\"yellow\"),\n ),\n err=True,\n )\n else:\n ignored = []\n\n options = [\n \"--audit-and-monitor\" if audit_and_monitor else \"--disable-audit-and-monitor\",\n \"--exit-code\" if exit_code else \"--continue-on-error\",\n ]\n\n if output == \"full-report\":\n options.append(\"--full-report\")\n elif output == \"minimal\":\n options.append(\"--json\")\n elif output not in [\"screen\", \"default\"]:\n options.append(f\"--output={output}\")\n\n if save_json:\n options.append(f\"--save-json={save_json}\")\n\n if policy_file:\n options.append(f\"--policy-file={policy_file}\")\n\n if safety_project:\n options.append(f\"--project={safety_project}\")\n\n if use_installed:\n target_venv_packages = run_command(\n _cmd + [\"-m\", \"pip\", \"list\", \"--format=freeze\"],\n is_verbose=project.s.is_verbose(),\n )\n elif categories:\n target_venv_packages = run_command(\n [\"pipenv\", \"requirements\", \"--categories\", categories],\n is_verbose=project.s.is_verbose(),\n )\n else:\n target_venv_packages = run_command(\n [\"pipenv\", \"requirements\"], is_verbose=project.s.is_verbose()\n )\n\n temp_requirements = tempfile.NamedTemporaryFile(\n mode=\"w+\",\n prefix=f\"{project.virtualenv_name}\",\n suffix=\"_requirements.txt\",\n delete=False,\n )\n temp_requirements.write(target_venv_packages.stdout.strip())\n temp_requirements.close()\n\n options.extend([\"--file\", temp_requirements.name])\n\n cmd = _cmd + [safety_path, \"check\"] + options\n\n if db:\n if not quiet and not project.s.is_quiet():\n click.echo(f\"Using {db} database\")\n cmd.append(f\"--db={db}\")\n elif key or project.s.PIPENV_PYUP_API_KEY:\n cmd = cmd + [f\"--key={key or project.s.PIPENV_PYUP_API_KEY}\"]\n else:\n PIPENV_SAFETY_DB = (\n \"https://d2qjmgddvqvu75.cloudfront.net/aws/safety/pipenv/1.0.0/\"\n )\n os.environ[\"SAFETY_ANNOUNCEMENTS_URL\"] = f\"{PIPENV_SAFETY_DB}announcements.json\"\n cmd.append(f\"--db={PIPENV_SAFETY_DB}\")\n\n if ignored:\n for cve in ignored:\n cmd += cve\n\n os.environ[\"SAFETY_CUSTOM_INTEGRATION\"] = \"True\"\n os.environ[\"SAFETY_SOURCE\"] = \"pipenv\"\n os.environ[\"SAFETY_PURE_YAML\"] = \"True\"\n\n from pipenv.patched.safety.cli import cli\n\n sys.argv = cmd[1:]\n\n if output == \"minimal\":\n from contextlib import redirect_stderr, redirect_stdout\n\n code = 0\n\n with redirect_stdout(io.StringIO()) as out, redirect_stderr(io.StringIO()) as err:\n try:\n cli(prog_name=\"pipenv\")\n except SystemExit as exit_signal:\n code = exit_signal.code\n\n report = out.getvalue()\n error = err.getvalue()\n\n try:\n json_report = simplejson.loads(report)\n except Exception:\n raise exceptions.PipenvCmdError(\n cmd_list_to_shell(cmd), report, error, exit_code=code\n )\n meta = json_report.get(\"report_meta\")\n vulnerabilities_found = meta.get(\"vulnerabilities_found\")\n\n fg = \"green\"\n message = \"All good!\"\n db_type = \"commercial\" if meta.get(\"api_key\", False) else \"free\"\n\n if vulnerabilities_found >= 0:\n fg = \"red\"\n message = (\n f\"Scan was complete using Safety\u2019s {db_type} vulnerability database.\"\n )\n\n click.echo()\n click.secho(f\"{vulnerabilities_found} vulnerabilities found.\", fg=fg)\n click.echo()\n\n vulnerabilities = json_report.get(\"vulnerabilities\", [])\n\n for vuln in vulnerabilities:\n click.echo(\n \"{}: {} {} open to vulnerability {} ({}). More info: {}\".format(\n click.style(vuln[\"vulnerability_id\"], bold=True, fg=\"red\"),\n click.style(vuln[\"package_name\"], fg=\"green\"),\n click.style(vuln[\"analyzed_version\"], fg=\"yellow\", bold=True),\n click.style(vuln[\"vulnerability_id\"], bold=True),\n click.style(vuln[\"vulnerable_spec\"], fg=\"yellow\", bold=False),\n click.style(vuln[\"more_info_url\"], bold=True),\n )\n )\n click.echo(f\"{vuln['advisory']}\")\n click.echo()\n\n click.secho(message, fg=\"white\", bold=True)\n sys.exit(code)\n\n cli(prog_name=\"pipenv\")\n\n temp_requirements.remove()\n", "path": "pipenv/routines/check.py"}]} | 3,396 | 191 |
gh_patches_debug_67195 | rasdani/github-patches | git_diff | flairNLP__flair-3123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Avg Pooling in the Entity Linker
### Describe the bug
A runtime error is raised upon prediction when using "average" as the pooling operation in the Entity Linker
### To Reproduce
```python
from flair.data import Corpus
from flair.datasets import NEL_ENGLISH_TWEEKI
from flair.embeddings import TransformerWordEmbeddings
from flair.models import EntityLinker
corpus: Corpus = NEL_ENGLISH_TWEEKI(sample_missing_splits=False)
embeddings = TransformerWordEmbeddings(
model="distilbert-base-uncased",
fine_tune=True,
)
entity_linker = EntityLinker(
embeddings=embeddings,
label_dictionary=corpus.make_label_dictionary(label_type="nel"),
label_type="nel",
pooling_operation="average",
)
entity_linker.predict(corpus.train[0])
```
### Expected behaivor
The Entity Linker should be able to perform average pooling without any issues, like in the case of other pooling options.
### Logs and Stack traces
```
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In [1], line 20
8 embeddings = TransformerWordEmbeddings(
9 model="distilbert-base-uncased",
10 fine_tune=True,
11 )
13 entity_linker = EntityLinker(
14 embeddings=embeddings,
15 label_dictionary=corpus.make_label_dictionary(label_type="nel"),
16 label_type="nel",
17 pooling_operation="average",
18 )
---> 20 entity_linker.predict(corpus.train[0])
File ~/projects/flair_forked/flair/nn/model.py:826, in DefaultClassifier.predict(self, sentences, mini_batch_size, return_probabilities_for_all_classes, verbose, label_name, return_loss, embedding_storage_mode)
824 # pass data points through network and decode
825 data_point_tensor = self._encode_data_points(batch, data_points)
--> 826 scores = self.decoder(data_point_tensor)
827 scores = self._mask_scores(scores, data_points)
829 # if anything could possibly be predicted
File ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x4 and 768x650)
```
### Screenshots
_No response_
### Additional Context
_No response_
### Environment
#### Versions:
##### Flair
0.11.3
##### Pytorch
1.13.0
##### Transformers
4.24.0
#### GPU
False
</issue>
<code>
[start of flair/models/entity_linker_model.py]
1 import logging
2 import re
3 from functools import lru_cache
4 from pathlib import Path
5 from typing import Any, Callable, Dict, List, Optional, Set, Union
6 from unicodedata import category
7
8 import torch
9
10 import flair.embeddings
11 import flair.nn
12 from flair.data import Dictionary, Sentence, Span
13 from flair.file_utils import cached_path
14
15 log = logging.getLogger("flair")
16
17
18 class CandidateGenerator:
19 """
20 Given a string, the CandidateGenerator returns possible target classes as candidates.
21 """
22
23 def __init__(self, candidates: Union[str, Dict], backoff: bool = True):
24 # internal candidate lists of generator
25 self.mention_to_candidates_map: Dict = {}
26
27 # load Zelda candidates if so passed
28 if isinstance(candidates, str) and candidates.lower() == "zelda":
29 zelda_path: str = "https://flair.informatik.hu-berlin.de/resources/datasets/zelda"
30 zelda_candidates = cached_path(f"{zelda_path}/zelda_mention_entities_counter.pickle", cache_dir="datasets")
31 import pickle
32
33 with open(zelda_candidates, "rb") as handle:
34 mention_entities_counter = pickle.load(handle)
35
36 # create candidate lists
37 candidate_lists = {}
38 for mention in mention_entities_counter:
39 candidate_lists[mention] = list(mention_entities_counter[mention].keys())
40
41 self.mention_to_candidates_map = candidate_lists
42
43 elif isinstance(candidates, Dict):
44 self.mention_to_candidates_map = candidates
45
46 # if lower casing is enabled, create candidate lists of lower cased versions
47 self.backoff = backoff
48 if self.backoff:
49 # create a new dictionary for lower cased mentions
50 lowercased_mention_to_candidates_map: Dict = {}
51
52 # go through each mention and its candidates
53 for mention, candidates in self.mention_to_candidates_map.items():
54 backoff_mention = self._make_backoff_string(mention)
55 # check if backoff mention already seen. If so, add candidates. Else, create new entry.
56 if backoff_mention in lowercased_mention_to_candidates_map:
57 current_candidates = lowercased_mention_to_candidates_map[backoff_mention]
58 lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)
59 else:
60 lowercased_mention_to_candidates_map[backoff_mention] = candidates
61
62 # set lowercased version as map
63 self.mention_to_candidates_map = lowercased_mention_to_candidates_map
64
65 @lru_cache(maxsize=50000)
66 def _make_backoff_string(self, mention: str) -> str:
67 backoff_mention = mention.lower()
68 backoff_mention = "".join(ch for ch in backoff_mention if category(ch)[0] not in "P")
69 backoff_mention = re.sub(" +", " ", backoff_mention)
70 return backoff_mention
71
72 def get_candidates(self, mention: str) -> Set[str]:
73 """Given a mention, this method returns a set of candidate classes"""
74 if self.backoff:
75 mention = self._make_backoff_string(mention)
76
77 return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()
78
79
80 class EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):
81 """
82 Entity Linking Model
83 The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.
84 To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.
85 The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.
86 """
87
88 def __init__(
89 self,
90 embeddings: flair.embeddings.TokenEmbeddings,
91 label_dictionary: Dictionary,
92 pooling_operation: str = "first_last",
93 label_type: str = "nel",
94 candidates: Optional[CandidateGenerator] = None,
95 **classifierargs,
96 ):
97 """
98 Initializes an EntityLinker
99 :param embeddings: embeddings used to embed the words/sentences
100 :param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>
101 :param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.
102 E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates
103 the embedding of the first and the embedding of the last word.
104 :param label_type: name of the label you use.
105 """
106
107 super(EntityLinker, self).__init__(
108 embeddings=embeddings,
109 label_dictionary=label_dictionary,
110 final_embedding_size=embeddings.embedding_length * 2
111 if pooling_operation == "first_last"
112 else embeddings.embedding_length,
113 **classifierargs,
114 )
115
116 self.pooling_operation = pooling_operation
117 self._label_type = label_type
118
119 cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {
120 "average": self.emb_mean,
121 "first": self.emb_first,
122 "last": self.emb_last,
123 "first_last": self.emb_firstAndLast,
124 }
125
126 if pooling_operation not in cases:
127 raise KeyError('pooling_operation has to be one of "average", "first", "last" or "first_last"')
128
129 self.aggregated_embedding = cases[pooling_operation]
130
131 self.candidates = candidates
132
133 self.to(flair.device)
134
135 def emb_first(self, span: Span, embedding_names):
136 return span.tokens[0].get_embedding(embedding_names)
137
138 def emb_last(self, span: Span, embedding_names):
139 return span.tokens[-1].get_embedding(embedding_names)
140
141 def emb_firstAndLast(self, span: Span, embedding_names):
142 return torch.cat(
143 (span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0
144 )
145
146 def emb_mean(self, span, embedding_names):
147 return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)
148
149 def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:
150 return sentence.get_spans(self.label_type)
151
152 def _filter_data_point(self, data_point: Sentence) -> bool:
153 return bool(data_point.get_labels(self.label_type))
154
155 def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:
156 return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())
157
158 def _get_state_dict(self):
159 model_state = {
160 **super()._get_state_dict(),
161 "word_embeddings": self.embeddings.save_embeddings(use_state_dict=False),
162 "label_type": self.label_type,
163 "label_dictionary": self.label_dictionary,
164 "pooling_operation": self.pooling_operation,
165 "loss_weights": self.weight_dict,
166 "candidates": self.candidates,
167 }
168 return model_state
169
170 def _print_predictions(self, batch, gold_label_type):
171 lines = []
172 for datapoint in batch:
173 eval_line = f"\n{datapoint.to_original_text()}\n"
174
175 for span in datapoint.get_spans(gold_label_type):
176 symbol = "✓" if span.get_label(gold_label_type).value == span.get_label("predicted").value else "❌"
177 eval_line += (
178 f' - "{span.text}" / {span.get_label(gold_label_type).value}'
179 f' --> {span.get_label("predicted").value} ({symbol})\n'
180 )
181
182 lines.append(eval_line)
183 return lines
184
185 @classmethod
186 def _init_model_with_state_dict(cls, state, **kwargs):
187 # remap state dict for models serialized with Flair <= 0.11.3
188 import re
189
190 state_dict = state["state_dict"]
191 for key in list(state_dict.keys()):
192 state_dict[re.sub("^word_embeddings\\.", "embeddings.", key)] = state_dict.pop(key)
193
194 return super()._init_model_with_state_dict(
195 state,
196 embeddings=state.get("word_embeddings"),
197 label_dictionary=state.get("label_dictionary"),
198 label_type=state.get("label_type"),
199 pooling_operation=state.get("pooling_operation"),
200 loss_weights=state.get("loss_weights", {"<unk>": 0.3}),
201 candidates=state.get("candidates", None),
202 **kwargs,
203 )
204
205 @property
206 def label_type(self):
207 return self._label_type
208
209 def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):
210 if not self.candidates:
211 return scores
212
213 masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)
214
215 for idx, span in enumerate(data_points):
216 # get the candidates
217 candidate_set = self.candidates.get_candidates(span.text)
218 # during training, add the gold value as candidate
219 if self.training:
220 candidate_set.add(span.get_label(self.label_type).value)
221 candidate_set.add("<unk>")
222 indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]
223 masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]
224
225 return masked_scores
226
227 @classmethod
228 def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> "EntityLinker":
229 from typing import cast
230
231 return cast("EntityLinker", super().load(model_path=model_path))
232
[end of flair/models/entity_linker_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flair/models/entity_linker_model.py b/flair/models/entity_linker_model.py
--- a/flair/models/entity_linker_model.py
+++ b/flair/models/entity_linker_model.py
@@ -144,7 +144,7 @@
)
def emb_mean(self, span, embedding_names):
- return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)
+ return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)
def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:
return sentence.get_spans(self.label_type)
| {"golden_diff": "diff --git a/flair/models/entity_linker_model.py b/flair/models/entity_linker_model.py\n--- a/flair/models/entity_linker_model.py\n+++ b/flair/models/entity_linker_model.py\n@@ -144,7 +144,7 @@\n )\n \n def emb_mean(self, span, embedding_names):\n- return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)\n+ return torch.mean(torch.stack([token.get_embedding(embedding_names) for token in span], 0), 0)\n \n def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:\n return sentence.get_spans(self.label_type)\n", "issue": "[Bug]: Avg Pooling in the Entity Linker\n### Describe the bug\r\n\r\nA runtime error is raised upon prediction when using \"average\" as the pooling operation in the Entity Linker\r\n\r\n### To Reproduce\r\n\r\n```python\r\nfrom flair.data import Corpus\r\nfrom flair.datasets import NEL_ENGLISH_TWEEKI\r\nfrom flair.embeddings import TransformerWordEmbeddings\r\nfrom flair.models import EntityLinker\r\n\r\ncorpus: Corpus = NEL_ENGLISH_TWEEKI(sample_missing_splits=False)\r\n\r\nembeddings = TransformerWordEmbeddings(\r\n model=\"distilbert-base-uncased\",\r\n fine_tune=True,\r\n)\r\n\r\nentity_linker = EntityLinker(\r\n embeddings=embeddings,\r\n label_dictionary=corpus.make_label_dictionary(label_type=\"nel\"),\r\n label_type=\"nel\",\r\n pooling_operation=\"average\",\r\n)\r\n\r\nentity_linker.predict(corpus.train[0])\r\n```\r\n\r\n\r\n### Expected behaivor\r\n\r\nThe Entity Linker should be able to perform average pooling without any issues, like in the case of other pooling options.\r\n\r\n### Logs and Stack traces\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nRuntimeError Traceback (most recent call last)\r\nCell In [1], line 20\r\n 8 embeddings = TransformerWordEmbeddings(\r\n 9 model=\"distilbert-base-uncased\",\r\n 10 fine_tune=True,\r\n 11 )\r\n 13 entity_linker = EntityLinker(\r\n 14 embeddings=embeddings,\r\n 15 label_dictionary=corpus.make_label_dictionary(label_type=\"nel\"),\r\n 16 label_type=\"nel\",\r\n 17 pooling_operation=\"average\",\r\n 18 )\r\n---> 20 entity_linker.predict(corpus.train[0])\r\n\r\nFile ~/projects/flair_forked/flair/nn/model.py:826, in DefaultClassifier.predict(self, sentences, mini_batch_size, return_probabilities_for_all_classes, verbose, label_name, return_loss, embedding_storage_mode)\r\n 824 # pass data points through network and decode\r\n 825 data_point_tensor = self._encode_data_points(batch, data_points)\r\n--> 826 scores = self.decoder(data_point_tensor)\r\n 827 scores = self._mask_scores(scores, data_points)\r\n 829 # if anything could possibly be predicted\r\n\r\nFile ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)\r\n 1186 # If we don't have any hooks, we want to skip the rest of the logic in\r\n 1187 # this function, and just call forward.\r\n 1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\r\n 1189 or _global_forward_hooks or _global_forward_pre_hooks):\r\n-> 1190 return forward_call(*input, **kwargs)\r\n 1191 # Do not call functions when jit is used\r\n 1192 full_backward_hooks, non_full_backward_hooks = [], []\r\n\r\nFile ~/miniforge3/envs/flair/lib/python3.9/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)\r\n 113 def forward(self, input: Tensor) -> Tensor:\r\n--> 114 return F.linear(input, self.weight, self.bias)\r\n\r\nRuntimeError: mat1 and mat2 shapes cannot be multiplied (1x4 and 768x650)\r\n```\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n#### Versions:\r\n##### Flair\r\n0.11.3\r\n##### Pytorch\r\n1.13.0\r\n##### Transformers\r\n4.24.0\r\n#### GPU\r\nFalse\n", "before_files": [{"content": "import logging\nimport re\nfrom functools import lru_cache\nfrom pathlib import Path\nfrom typing import Any, Callable, Dict, List, Optional, Set, Union\nfrom unicodedata import category\n\nimport torch\n\nimport flair.embeddings\nimport flair.nn\nfrom flair.data import Dictionary, Sentence, Span\nfrom flair.file_utils import cached_path\n\nlog = logging.getLogger(\"flair\")\n\n\nclass CandidateGenerator:\n \"\"\"\n Given a string, the CandidateGenerator returns possible target classes as candidates.\n \"\"\"\n\n def __init__(self, candidates: Union[str, Dict], backoff: bool = True):\n # internal candidate lists of generator\n self.mention_to_candidates_map: Dict = {}\n\n # load Zelda candidates if so passed\n if isinstance(candidates, str) and candidates.lower() == \"zelda\":\n zelda_path: str = \"https://flair.informatik.hu-berlin.de/resources/datasets/zelda\"\n zelda_candidates = cached_path(f\"{zelda_path}/zelda_mention_entities_counter.pickle\", cache_dir=\"datasets\")\n import pickle\n\n with open(zelda_candidates, \"rb\") as handle:\n mention_entities_counter = pickle.load(handle)\n\n # create candidate lists\n candidate_lists = {}\n for mention in mention_entities_counter:\n candidate_lists[mention] = list(mention_entities_counter[mention].keys())\n\n self.mention_to_candidates_map = candidate_lists\n\n elif isinstance(candidates, Dict):\n self.mention_to_candidates_map = candidates\n\n # if lower casing is enabled, create candidate lists of lower cased versions\n self.backoff = backoff\n if self.backoff:\n # create a new dictionary for lower cased mentions\n lowercased_mention_to_candidates_map: Dict = {}\n\n # go through each mention and its candidates\n for mention, candidates in self.mention_to_candidates_map.items():\n backoff_mention = self._make_backoff_string(mention)\n # check if backoff mention already seen. If so, add candidates. Else, create new entry.\n if backoff_mention in lowercased_mention_to_candidates_map:\n current_candidates = lowercased_mention_to_candidates_map[backoff_mention]\n lowercased_mention_to_candidates_map[backoff_mention] = set(current_candidates).union(candidates)\n else:\n lowercased_mention_to_candidates_map[backoff_mention] = candidates\n\n # set lowercased version as map\n self.mention_to_candidates_map = lowercased_mention_to_candidates_map\n\n @lru_cache(maxsize=50000)\n def _make_backoff_string(self, mention: str) -> str:\n backoff_mention = mention.lower()\n backoff_mention = \"\".join(ch for ch in backoff_mention if category(ch)[0] not in \"P\")\n backoff_mention = re.sub(\" +\", \" \", backoff_mention)\n return backoff_mention\n\n def get_candidates(self, mention: str) -> Set[str]:\n \"\"\"Given a mention, this method returns a set of candidate classes\"\"\"\n if self.backoff:\n mention = self._make_backoff_string(mention)\n\n return set(self.mention_to_candidates_map[mention]) if mention in self.mention_to_candidates_map else set()\n\n\nclass EntityLinker(flair.nn.DefaultClassifier[Sentence, Span]):\n \"\"\"\n Entity Linking Model\n The model expects text/sentences with annotated entity mentions and predicts entities to these mentions.\n To this end a word embedding is used to embed the sentences and the embedding of the entity mention goes through a linear layer to get the actual class label.\n The model is able to predict '<unk>' for entity mentions that the model can not confidently match to any of the known labels.\n \"\"\"\n\n def __init__(\n self,\n embeddings: flair.embeddings.TokenEmbeddings,\n label_dictionary: Dictionary,\n pooling_operation: str = \"first_last\",\n label_type: str = \"nel\",\n candidates: Optional[CandidateGenerator] = None,\n **classifierargs,\n ):\n \"\"\"\n Initializes an EntityLinker\n :param embeddings: embeddings used to embed the words/sentences\n :param label_dictionary: dictionary that gives ids to all classes. Should contain <unk>\n :param pooling_operation: either 'average', 'first', 'last' or 'first&last'. Specifies the way of how text representations of entity mentions (with more than one word) are handled.\n E.g. 'average' means that as text representation we take the average of the embeddings of the words in the mention. 'first&last' concatenates\n the embedding of the first and the embedding of the last word.\n :param label_type: name of the label you use.\n \"\"\"\n\n super(EntityLinker, self).__init__(\n embeddings=embeddings,\n label_dictionary=label_dictionary,\n final_embedding_size=embeddings.embedding_length * 2\n if pooling_operation == \"first_last\"\n else embeddings.embedding_length,\n **classifierargs,\n )\n\n self.pooling_operation = pooling_operation\n self._label_type = label_type\n\n cases: Dict[str, Callable[[Span, List[str]], torch.Tensor]] = {\n \"average\": self.emb_mean,\n \"first\": self.emb_first,\n \"last\": self.emb_last,\n \"first_last\": self.emb_firstAndLast,\n }\n\n if pooling_operation not in cases:\n raise KeyError('pooling_operation has to be one of \"average\", \"first\", \"last\" or \"first_last\"')\n\n self.aggregated_embedding = cases[pooling_operation]\n\n self.candidates = candidates\n\n self.to(flair.device)\n\n def emb_first(self, span: Span, embedding_names):\n return span.tokens[0].get_embedding(embedding_names)\n\n def emb_last(self, span: Span, embedding_names):\n return span.tokens[-1].get_embedding(embedding_names)\n\n def emb_firstAndLast(self, span: Span, embedding_names):\n return torch.cat(\n (span.tokens[0].get_embedding(embedding_names), span.tokens[-1].get_embedding(embedding_names)), 0\n )\n\n def emb_mean(self, span, embedding_names):\n return torch.mean(torch.cat([token.get_embedding(embedding_names) for token in span], 0), 0)\n\n def _get_data_points_from_sentence(self, sentence: Sentence) -> List[Span]:\n return sentence.get_spans(self.label_type)\n\n def _filter_data_point(self, data_point: Sentence) -> bool:\n return bool(data_point.get_labels(self.label_type))\n\n def _get_embedding_for_data_point(self, prediction_data_point: Span) -> torch.Tensor:\n return self.aggregated_embedding(prediction_data_point, self.embeddings.get_names())\n\n def _get_state_dict(self):\n model_state = {\n **super()._get_state_dict(),\n \"word_embeddings\": self.embeddings.save_embeddings(use_state_dict=False),\n \"label_type\": self.label_type,\n \"label_dictionary\": self.label_dictionary,\n \"pooling_operation\": self.pooling_operation,\n \"loss_weights\": self.weight_dict,\n \"candidates\": self.candidates,\n }\n return model_state\n\n def _print_predictions(self, batch, gold_label_type):\n lines = []\n for datapoint in batch:\n eval_line = f\"\\n{datapoint.to_original_text()}\\n\"\n\n for span in datapoint.get_spans(gold_label_type):\n symbol = \"\u2713\" if span.get_label(gold_label_type).value == span.get_label(\"predicted\").value else \"\u274c\"\n eval_line += (\n f' - \"{span.text}\" / {span.get_label(gold_label_type).value}'\n f' --> {span.get_label(\"predicted\").value} ({symbol})\\n'\n )\n\n lines.append(eval_line)\n return lines\n\n @classmethod\n def _init_model_with_state_dict(cls, state, **kwargs):\n # remap state dict for models serialized with Flair <= 0.11.3\n import re\n\n state_dict = state[\"state_dict\"]\n for key in list(state_dict.keys()):\n state_dict[re.sub(\"^word_embeddings\\\\.\", \"embeddings.\", key)] = state_dict.pop(key)\n\n return super()._init_model_with_state_dict(\n state,\n embeddings=state.get(\"word_embeddings\"),\n label_dictionary=state.get(\"label_dictionary\"),\n label_type=state.get(\"label_type\"),\n pooling_operation=state.get(\"pooling_operation\"),\n loss_weights=state.get(\"loss_weights\", {\"<unk>\": 0.3}),\n candidates=state.get(\"candidates\", None),\n **kwargs,\n )\n\n @property\n def label_type(self):\n return self._label_type\n\n def _mask_scores(self, scores: torch.Tensor, data_points: List[Span]):\n if not self.candidates:\n return scores\n\n masked_scores = -torch.inf * torch.ones(scores.size(), requires_grad=True, device=flair.device)\n\n for idx, span in enumerate(data_points):\n # get the candidates\n candidate_set = self.candidates.get_candidates(span.text)\n # during training, add the gold value as candidate\n if self.training:\n candidate_set.add(span.get_label(self.label_type).value)\n candidate_set.add(\"<unk>\")\n indices_of_candidates = [self.label_dictionary.get_idx_for_item(candidate) for candidate in candidate_set]\n masked_scores[idx, indices_of_candidates] = scores[idx, indices_of_candidates]\n\n return masked_scores\n\n @classmethod\n def load(cls, model_path: Union[str, Path, Dict[str, Any]]) -> \"EntityLinker\":\n from typing import cast\n\n return cast(\"EntityLinker\", super().load(model_path=model_path))\n", "path": "flair/models/entity_linker_model.py"}]} | 4,051 | 154 |
gh_patches_debug_23023 | rasdani/github-patches | git_diff | conda__conda-9464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
conda raises bogus NotWritableError exception for a bogus attempt to chown to root
This isn't a dupe (well it is a dupe of #7109) but the issue still persists.
#7180 fixed the problem in one section of code, but the same problematic snippet is present here:
https://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121
conda raises bogus NotWritableError exception for a bogus attempt to chown to root
This isn't a dupe (well it is a dupe of #7109) but the issue still persists.
#7180 fixed the problem in one section of code, but the same problematic snippet is present here:
https://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121
</issue>
<code>
[start of conda/gateways/disk/update.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4 from __future__ import absolute_import, division, print_function, unicode_literals
5
6 from errno import EINVAL, EXDEV, EPERM
7 from logging import getLogger
8 import os
9 from os.path import dirname, isdir, split, basename, join, exists
10 import re
11 from shutil import move
12 from subprocess import Popen, PIPE
13
14 from . import exp_backoff_fn, mkdir_p, mkdir_p_sudo_safe
15 from .delete import rm_rf
16 from .link import lexists
17 from ...base.context import context
18 from ...common.compat import on_win
19 from ...common.path import expand
20 from ...exceptions import NotWritableError
21
22 log = getLogger(__name__)
23
24 SHEBANG_REGEX = re.compile(br'^(#!((?:\\ |[^ \n\r])+)(.*))')
25
26
27 class CancelOperation(Exception):
28 pass
29
30
31 def update_file_in_place_as_binary(file_full_path, callback):
32 # callback should be a callable that takes one positional argument, which is the
33 # content of the file before updating
34 # this method updates the file in-place, without releasing the file lock
35 fh = None
36 try:
37 fh = exp_backoff_fn(open, file_full_path, 'rb+')
38 log.trace("in-place update path locked for %s", file_full_path)
39 data = fh.read()
40 fh.seek(0)
41 try:
42 fh.write(callback(data))
43 fh.truncate()
44 except CancelOperation:
45 pass # NOQA
46 finally:
47 if fh:
48 fh.close()
49
50
51 def rename(source_path, destination_path, force=False):
52 if lexists(destination_path) and force:
53 rm_rf(destination_path)
54 if lexists(source_path):
55 log.trace("renaming %s => %s", source_path, destination_path)
56 try:
57 os.rename(source_path, destination_path)
58 except EnvironmentError as e:
59 if (on_win and dirname(source_path) == dirname(destination_path)
60 and os.path.isfile(source_path)):
61 condabin_dir = join(context.conda_prefix, "condabin")
62 rename_script = join(condabin_dir, 'rename_tmp.bat')
63 if exists(rename_script):
64 _dirname, _src_fn = split(source_path)
65 _dest_fn = basename(destination_path)
66 p = Popen(['cmd.exe', '/C', rename_script, _dirname,
67 _src_fn, _dest_fn], stdout=PIPE, stderr=PIPE)
68 stdout, stderr = p.communicate()
69 else:
70 log.debug("{} is missing. Conda was not installed correctly or has been "
71 "corrupted. Please file an issue on the conda github repo."
72 .format(rename_script))
73 elif e.errno in (EINVAL, EXDEV, EPERM):
74 # https://github.com/conda/conda/issues/6811
75 # https://github.com/conda/conda/issues/6711
76 log.trace("Could not rename %s => %s due to errno [%s]. Falling back"
77 " to copy/unlink", source_path, destination_path, e.errno)
78 # https://github.com/moby/moby/issues/25409#issuecomment-238537855
79 # shutil.move() falls back to copy+unlink
80 move(source_path, destination_path)
81 else:
82 raise
83 else:
84 log.trace("cannot rename; source path does not exist '%s'", source_path)
85
86
87 def backoff_rename(source_path, destination_path, force=False):
88 exp_backoff_fn(rename, source_path, destination_path, force)
89
90
91 def touch(path, mkdir=False, sudo_safe=False):
92 # sudo_safe: use any time `path` is within the user's home directory
93 # returns:
94 # True if the file did not exist but was created
95 # False if the file already existed
96 # raises: NotWritableError, which is also an OSError having attached errno
97 try:
98 path = expand(path)
99 log.trace("touching path %s", path)
100 if lexists(path):
101 os.utime(path, None)
102 return True
103 else:
104 dirpath = dirname(path)
105 if not isdir(dirpath) and mkdir:
106 if sudo_safe:
107 mkdir_p_sudo_safe(dirpath)
108 else:
109 mkdir_p(dirpath)
110 else:
111 assert isdir(dirname(path))
112 try:
113 fh = open(path, 'a')
114 except:
115 raise
116 else:
117 fh.close()
118 if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:
119 uid = int(os.environ['SUDO_UID'])
120 gid = int(os.environ.get('SUDO_GID', -1))
121 log.trace("chowning %s:%s %s", uid, gid, path)
122 os.chown(path, uid, gid)
123 return False
124 except (IOError, OSError) as e:
125 raise NotWritableError(path, e.errno, caused_by=e)
126
[end of conda/gateways/disk/update.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda/gateways/disk/update.py b/conda/gateways/disk/update.py
--- a/conda/gateways/disk/update.py
+++ b/conda/gateways/disk/update.py
@@ -109,17 +109,17 @@
mkdir_p(dirpath)
else:
assert isdir(dirname(path))
- try:
- fh = open(path, 'a')
- except:
- raise
- else:
- fh.close()
- if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:
- uid = int(os.environ['SUDO_UID'])
- gid = int(os.environ.get('SUDO_GID', -1))
- log.trace("chowning %s:%s %s", uid, gid, path)
- os.chown(path, uid, gid)
- return False
+ with open(path, 'a'):
+ pass
+ # This chown call causes a false positive PermissionError to be
+ # raised (similar to #7109) when called in an environment which
+ # comes from sudo -u.
+ #
+ # if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:
+ # uid = int(os.environ['SUDO_UID'])
+ # gid = int(os.environ.get('SUDO_GID', -1))
+ # log.trace("chowning %s:%s %s", uid, gid, path)
+ # os.chown(path, uid, gid)
+ return False
except (IOError, OSError) as e:
raise NotWritableError(path, e.errno, caused_by=e)
| {"golden_diff": "diff --git a/conda/gateways/disk/update.py b/conda/gateways/disk/update.py\n--- a/conda/gateways/disk/update.py\n+++ b/conda/gateways/disk/update.py\n@@ -109,17 +109,17 @@\n mkdir_p(dirpath)\n else:\n assert isdir(dirname(path))\n- try:\n- fh = open(path, 'a')\n- except:\n- raise\n- else:\n- fh.close()\n- if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n- uid = int(os.environ['SUDO_UID'])\n- gid = int(os.environ.get('SUDO_GID', -1))\n- log.trace(\"chowning %s:%s %s\", uid, gid, path)\n- os.chown(path, uid, gid)\n- return False\n+ with open(path, 'a'):\n+ pass\n+ # This chown call causes a false positive PermissionError to be\n+ # raised (similar to #7109) when called in an environment which\n+ # comes from sudo -u.\n+ #\n+ # if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n+ # uid = int(os.environ['SUDO_UID'])\n+ # gid = int(os.environ.get('SUDO_GID', -1))\n+ # log.trace(\"chowning %s:%s %s\", uid, gid, path)\n+ # os.chown(path, uid, gid)\n+ return False\n except (IOError, OSError) as e:\n raise NotWritableError(path, e.errno, caused_by=e)\n", "issue": "conda raises bogus NotWritableError exception for a bogus attempt to chown to root\nThis isn't a dupe (well it is a dupe of #7109) but the issue still persists.\r\n\r\n#7180 fixed the problem in one section of code, but the same problematic snippet is present here:\r\n\r\nhttps://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121\nconda raises bogus NotWritableError exception for a bogus attempt to chown to root\nThis isn't a dupe (well it is a dupe of #7109) but the issue still persists.\r\n\r\n#7180 fixed the problem in one section of code, but the same problematic snippet is present here:\r\n\r\nhttps://github.com/conda/conda/blob/master/conda/gateways/disk/update.py#L121\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom errno import EINVAL, EXDEV, EPERM\nfrom logging import getLogger\nimport os\nfrom os.path import dirname, isdir, split, basename, join, exists\nimport re\nfrom shutil import move\nfrom subprocess import Popen, PIPE\n\nfrom . import exp_backoff_fn, mkdir_p, mkdir_p_sudo_safe\nfrom .delete import rm_rf\nfrom .link import lexists\nfrom ...base.context import context\nfrom ...common.compat import on_win\nfrom ...common.path import expand\nfrom ...exceptions import NotWritableError\n\nlog = getLogger(__name__)\n\nSHEBANG_REGEX = re.compile(br'^(#!((?:\\\\ |[^ \\n\\r])+)(.*))')\n\n\nclass CancelOperation(Exception):\n pass\n\n\ndef update_file_in_place_as_binary(file_full_path, callback):\n # callback should be a callable that takes one positional argument, which is the\n # content of the file before updating\n # this method updates the file in-place, without releasing the file lock\n fh = None\n try:\n fh = exp_backoff_fn(open, file_full_path, 'rb+')\n log.trace(\"in-place update path locked for %s\", file_full_path)\n data = fh.read()\n fh.seek(0)\n try:\n fh.write(callback(data))\n fh.truncate()\n except CancelOperation:\n pass # NOQA\n finally:\n if fh:\n fh.close()\n\n\ndef rename(source_path, destination_path, force=False):\n if lexists(destination_path) and force:\n rm_rf(destination_path)\n if lexists(source_path):\n log.trace(\"renaming %s => %s\", source_path, destination_path)\n try:\n os.rename(source_path, destination_path)\n except EnvironmentError as e:\n if (on_win and dirname(source_path) == dirname(destination_path)\n and os.path.isfile(source_path)):\n condabin_dir = join(context.conda_prefix, \"condabin\")\n rename_script = join(condabin_dir, 'rename_tmp.bat')\n if exists(rename_script):\n _dirname, _src_fn = split(source_path)\n _dest_fn = basename(destination_path)\n p = Popen(['cmd.exe', '/C', rename_script, _dirname,\n _src_fn, _dest_fn], stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate()\n else:\n log.debug(\"{} is missing. Conda was not installed correctly or has been \"\n \"corrupted. Please file an issue on the conda github repo.\"\n .format(rename_script))\n elif e.errno in (EINVAL, EXDEV, EPERM):\n # https://github.com/conda/conda/issues/6811\n # https://github.com/conda/conda/issues/6711\n log.trace(\"Could not rename %s => %s due to errno [%s]. Falling back\"\n \" to copy/unlink\", source_path, destination_path, e.errno)\n # https://github.com/moby/moby/issues/25409#issuecomment-238537855\n # shutil.move() falls back to copy+unlink\n move(source_path, destination_path)\n else:\n raise\n else:\n log.trace(\"cannot rename; source path does not exist '%s'\", source_path)\n\n\ndef backoff_rename(source_path, destination_path, force=False):\n exp_backoff_fn(rename, source_path, destination_path, force)\n\n\ndef touch(path, mkdir=False, sudo_safe=False):\n # sudo_safe: use any time `path` is within the user's home directory\n # returns:\n # True if the file did not exist but was created\n # False if the file already existed\n # raises: NotWritableError, which is also an OSError having attached errno\n try:\n path = expand(path)\n log.trace(\"touching path %s\", path)\n if lexists(path):\n os.utime(path, None)\n return True\n else:\n dirpath = dirname(path)\n if not isdir(dirpath) and mkdir:\n if sudo_safe:\n mkdir_p_sudo_safe(dirpath)\n else:\n mkdir_p(dirpath)\n else:\n assert isdir(dirname(path))\n try:\n fh = open(path, 'a')\n except:\n raise\n else:\n fh.close()\n if sudo_safe and not on_win and os.environ.get('SUDO_UID') is not None:\n uid = int(os.environ['SUDO_UID'])\n gid = int(os.environ.get('SUDO_GID', -1))\n log.trace(\"chowning %s:%s %s\", uid, gid, path)\n os.chown(path, uid, gid)\n return False\n except (IOError, OSError) as e:\n raise NotWritableError(path, e.errno, caused_by=e)\n", "path": "conda/gateways/disk/update.py"}]} | 2,090 | 383 |
gh_patches_debug_21705 | rasdani/github-patches | git_diff | crytic__slither-373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
slither-flat does not handle cycle
Example:
```
$ echo a.sol
import './b.sol';
contract A{
B b;
}
$ echo b.sol
import './a.sol';
contract B{
A a;
}
```
slither-flat does not handle cycle
Example:
```
$ echo a.sol
import './b.sol';
contract A{
B b;
}
$ echo b.sol
import './a.sol';
contract B{
A a;
}
```
</issue>
<code>
[start of slither/tools/flattening/flattening.py]
1 from pathlib import Path
2 import re
3 import logging
4 from slither.exceptions import SlitherException
5 from slither.core.solidity_types.user_defined_type import UserDefinedType
6 from slither.core.declarations.structure import Structure
7 from slither.core.declarations.enum import Enum
8 from slither.core.declarations.contract import Contract
9 from slither.slithir.operations import NewContract, TypeConversion
10
11 logger = logging.getLogger("Slither-flattening")
12
13 class Flattening:
14
15 DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')
16
17 def __init__(self, slither, external_to_public=False):
18 self._source_codes = {}
19 self._slither = slither
20 self._external_to_public = external_to_public
21 self._use_abi_encoder_v2 = False
22
23 self._check_abi_encoder_v2()
24
25 for contract in slither.contracts:
26 self._get_source_code(contract)
27
28 def _check_abi_encoder_v2(self):
29 for p in self._slither.pragma_directives:
30 if 'ABIEncoderV2' in str(p.directive):
31 self._use_abi_encoder_v2 = True
32 return
33
34 def _get_source_code(self, contract):
35 src_mapping = contract.source_mapping
36 content = self._slither.source_code[src_mapping['filename_absolute']]
37 start = src_mapping['start']
38 end = src_mapping['start'] + src_mapping['length']
39
40 # interface must use external
41 if self._external_to_public and contract.contract_kind != "interface":
42 # to_patch is a list of (index, bool). The bool indicates
43 # if the index is for external -> public (true)
44 # or a calldata -> memory (false)
45 to_patch = []
46 for f in contract.functions_declared:
47 # fallback must be external
48 if f.is_fallback or f.is_constructor_variables:
49 continue
50 if f.visibility == 'external':
51 attributes_start = (f.parameters_src.source_mapping['start'] +
52 f.parameters_src.source_mapping['length'])
53 attributes_end = f.returns_src.source_mapping['start']
54 attributes = content[attributes_start:attributes_end]
55 regex = re.search(r'((\sexternal)\s+)|(\sexternal)$|(\)external)$', attributes)
56 if regex:
57 to_patch.append((attributes_start + regex.span()[0] + 1, True))
58 else:
59 raise SlitherException(f'External keyword not found {f.name} {attributes}')
60
61 for var in f.parameters:
62 if var.location == "calldata":
63 calldata_start = var.source_mapping['start']
64 calldata_end = calldata_start + var.source_mapping['length']
65 calldata_idx = content[calldata_start:calldata_end].find(' calldata ')
66 to_patch.append((calldata_start + calldata_idx + 1, False))
67
68 to_patch.sort(key=lambda x:x[0], reverse=True)
69
70 content = content[start:end]
71 for (index, is_external) in to_patch:
72 index = index - start
73 if is_external:
74 content = content[:index] + 'public' + content[index + len('external'):]
75 else:
76 content = content[:index] + 'memory' + content[index + len('calldata'):]
77 else:
78 content = content[start:end]
79
80 self._source_codes[contract] = content
81
82
83 def _export_from_type(self, t, contract, exported, list_contract):
84 if isinstance(t, UserDefinedType):
85 if isinstance(t.type, (Enum, Structure)):
86 if t.type.contract != contract and not t.type.contract in exported:
87 self._export_contract(t.type.contract, exported, list_contract)
88 else:
89 assert isinstance(t.type, Contract)
90 if t.type != contract and not t.type in exported:
91 self._export_contract(t.type, exported, list_contract)
92
93 def _export_contract(self, contract, exported, list_contract):
94 if contract.name in exported:
95 return
96 for inherited in contract.inheritance:
97 self._export_contract(inherited, exported, list_contract)
98
99 # Find all the external contracts called
100 externals = contract.all_library_calls + contract.all_high_level_calls
101 # externals is a list of (contract, function)
102 # We also filter call to itself to avoid infilite loop
103 externals = list(set([e[0] for e in externals if e[0] != contract]))
104
105 for inherited in externals:
106 self._export_contract(inherited, exported, list_contract)
107
108 # Find all the external contracts use as a base type
109 local_vars = []
110 for f in contract.functions_declared:
111 local_vars += f.variables
112
113 for v in contract.variables + local_vars:
114 self._export_from_type(v.type, contract, exported, list_contract)
115
116 # Find all convert and "new" operation that can lead to use an external contract
117 for f in contract.functions_declared:
118 for ir in f.slithir_operations:
119 if isinstance(ir, NewContract):
120 if ir.contract_created != contract and not ir.contract_created in exported:
121 self._export_contract(ir.contract_created, exported, list_contract)
122 if isinstance(ir, TypeConversion):
123 self._export_from_type(ir.type, contract, exported, list_contract)
124 if contract.name in exported:
125 return
126 exported.add(contract.name)
127 list_contract.append(self._source_codes[contract])
128
129 def _export(self, contract, ret):
130 self._export_contract(contract, set(), ret)
131 path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')
132 logger.info(f'Export {path}')
133 with open(path, 'w') as f:
134 if self._slither.solc_version:
135 f.write(f'pragma solidity {self._slither.solc_version};\n')
136 if self._use_abi_encoder_v2:
137 f.write('pragma experimental ABIEncoderV2;\n')
138 f.write('\n'.join(ret))
139 f.write('\n')
140
141 def export(self, target=None):
142
143 if not self.DEFAULT_EXPORT_PATH.exists():
144 self.DEFAULT_EXPORT_PATH.mkdir(parents=True)
145
146 if target is None:
147 for contract in self._slither.contracts_derived:
148 ret = []
149 self._export(contract, ret)
150 else:
151 contract = self._slither.get_contract_from_name(target)
152 if contract is None:
153 logger.error(f'{target} not found')
154 else:
155 ret = []
156 self._export(contract, ret)
157
158
[end of slither/tools/flattening/flattening.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py
--- a/slither/tools/flattening/flattening.py
+++ b/slither/tools/flattening/flattening.py
@@ -93,6 +93,7 @@
def _export_contract(self, contract, exported, list_contract):
if contract.name in exported:
return
+ exported.add(contract.name)
for inherited in contract.inheritance:
self._export_contract(inherited, exported, list_contract)
@@ -121,9 +122,6 @@
self._export_contract(ir.contract_created, exported, list_contract)
if isinstance(ir, TypeConversion):
self._export_from_type(ir.type, contract, exported, list_contract)
- if contract.name in exported:
- return
- exported.add(contract.name)
list_contract.append(self._source_codes[contract])
def _export(self, contract, ret):
| {"golden_diff": "diff --git a/slither/tools/flattening/flattening.py b/slither/tools/flattening/flattening.py\n--- a/slither/tools/flattening/flattening.py\n+++ b/slither/tools/flattening/flattening.py\n@@ -93,6 +93,7 @@\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n+ exported.add(contract.name)\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n \n@@ -121,9 +122,6 @@\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n- if contract.name in exported:\n- return\n- exported.add(contract.name)\n list_contract.append(self._source_codes[contract])\n \n def _export(self, contract, ret):\n", "issue": "slither-flat does not handle cycle \nExample:\r\n```\r\n$ echo a.sol\r\nimport './b.sol';\r\n\r\ncontract A{\r\n\r\n B b;\r\n\r\n}\r\n$ echo b.sol\r\nimport './a.sol';\r\n\r\ncontract B{\r\n\r\n A a;\r\n\r\n}\r\n```\nslither-flat does not handle cycle \nExample:\r\n```\r\n$ echo a.sol\r\nimport './b.sol';\r\n\r\ncontract A{\r\n\r\n B b;\r\n\r\n}\r\n$ echo b.sol\r\nimport './a.sol';\r\n\r\ncontract B{\r\n\r\n A a;\r\n\r\n}\r\n```\n", "before_files": [{"content": "from pathlib import Path\nimport re\nimport logging\nfrom slither.exceptions import SlitherException\nfrom slither.core.solidity_types.user_defined_type import UserDefinedType\nfrom slither.core.declarations.structure import Structure\nfrom slither.core.declarations.enum import Enum\nfrom slither.core.declarations.contract import Contract\nfrom slither.slithir.operations import NewContract, TypeConversion\n\nlogger = logging.getLogger(\"Slither-flattening\")\n\nclass Flattening:\n\n DEFAULT_EXPORT_PATH = Path('crytic-export/flattening')\n\n def __init__(self, slither, external_to_public=False):\n self._source_codes = {}\n self._slither = slither\n self._external_to_public = external_to_public\n self._use_abi_encoder_v2 = False\n\n self._check_abi_encoder_v2()\n\n for contract in slither.contracts:\n self._get_source_code(contract)\n\n def _check_abi_encoder_v2(self):\n for p in self._slither.pragma_directives:\n if 'ABIEncoderV2' in str(p.directive):\n self._use_abi_encoder_v2 = True\n return\n\n def _get_source_code(self, contract):\n src_mapping = contract.source_mapping\n content = self._slither.source_code[src_mapping['filename_absolute']]\n start = src_mapping['start']\n end = src_mapping['start'] + src_mapping['length']\n\n # interface must use external\n if self._external_to_public and contract.contract_kind != \"interface\":\n # to_patch is a list of (index, bool). The bool indicates\n # if the index is for external -> public (true)\n # or a calldata -> memory (false)\n to_patch = []\n for f in contract.functions_declared:\n # fallback must be external\n if f.is_fallback or f.is_constructor_variables:\n continue\n if f.visibility == 'external':\n attributes_start = (f.parameters_src.source_mapping['start'] +\n f.parameters_src.source_mapping['length'])\n attributes_end = f.returns_src.source_mapping['start']\n attributes = content[attributes_start:attributes_end]\n regex = re.search(r'((\\sexternal)\\s+)|(\\sexternal)$|(\\)external)$', attributes)\n if regex:\n to_patch.append((attributes_start + regex.span()[0] + 1, True))\n else:\n raise SlitherException(f'External keyword not found {f.name} {attributes}')\n\n for var in f.parameters:\n if var.location == \"calldata\":\n calldata_start = var.source_mapping['start']\n calldata_end = calldata_start + var.source_mapping['length']\n calldata_idx = content[calldata_start:calldata_end].find(' calldata ')\n to_patch.append((calldata_start + calldata_idx + 1, False))\n\n to_patch.sort(key=lambda x:x[0], reverse=True)\n\n content = content[start:end]\n for (index, is_external) in to_patch:\n index = index - start\n if is_external:\n content = content[:index] + 'public' + content[index + len('external'):]\n else:\n content = content[:index] + 'memory' + content[index + len('calldata'):]\n else:\n content = content[start:end]\n\n self._source_codes[contract] = content\n\n\n def _export_from_type(self, t, contract, exported, list_contract):\n if isinstance(t, UserDefinedType):\n if isinstance(t.type, (Enum, Structure)):\n if t.type.contract != contract and not t.type.contract in exported:\n self._export_contract(t.type.contract, exported, list_contract)\n else:\n assert isinstance(t.type, Contract)\n if t.type != contract and not t.type in exported:\n self._export_contract(t.type, exported, list_contract)\n\n def _export_contract(self, contract, exported, list_contract):\n if contract.name in exported:\n return\n for inherited in contract.inheritance:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts called\n externals = contract.all_library_calls + contract.all_high_level_calls\n # externals is a list of (contract, function)\n # We also filter call to itself to avoid infilite loop\n externals = list(set([e[0] for e in externals if e[0] != contract]))\n\n for inherited in externals:\n self._export_contract(inherited, exported, list_contract)\n\n # Find all the external contracts use as a base type\n local_vars = []\n for f in contract.functions_declared:\n local_vars += f.variables\n\n for v in contract.variables + local_vars:\n self._export_from_type(v.type, contract, exported, list_contract)\n\n # Find all convert and \"new\" operation that can lead to use an external contract\n for f in contract.functions_declared:\n for ir in f.slithir_operations:\n if isinstance(ir, NewContract):\n if ir.contract_created != contract and not ir.contract_created in exported:\n self._export_contract(ir.contract_created, exported, list_contract)\n if isinstance(ir, TypeConversion):\n self._export_from_type(ir.type, contract, exported, list_contract)\n if contract.name in exported:\n return\n exported.add(contract.name)\n list_contract.append(self._source_codes[contract])\n\n def _export(self, contract, ret):\n self._export_contract(contract, set(), ret)\n path = Path(self.DEFAULT_EXPORT_PATH, f'{contract.name}.sol')\n logger.info(f'Export {path}')\n with open(path, 'w') as f:\n if self._slither.solc_version:\n f.write(f'pragma solidity {self._slither.solc_version};\\n')\n if self._use_abi_encoder_v2:\n f.write('pragma experimental ABIEncoderV2;\\n')\n f.write('\\n'.join(ret))\n f.write('\\n')\n\n def export(self, target=None):\n\n if not self.DEFAULT_EXPORT_PATH.exists():\n self.DEFAULT_EXPORT_PATH.mkdir(parents=True)\n\n if target is None:\n for contract in self._slither.contracts_derived:\n ret = []\n self._export(contract, ret)\n else:\n contract = self._slither.get_contract_from_name(target)\n if contract is None:\n logger.error(f'{target} not found')\n else:\n ret = []\n self._export(contract, ret)\n\n", "path": "slither/tools/flattening/flattening.py"}]} | 2,413 | 210 |
gh_patches_debug_16882 | rasdani/github-patches | git_diff | learningequality__kolibri-1535 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Learn's Search is broken
## Summary
* Search within Learn is currently broken on Develop.
* See [triggerSearch()](https://github.com/learningequality/kolibri/blob/develop/kolibri/plugins/learn/assets/src/state/actions.js#L377)
## Exception
`Exception Value: 'list' object has no attribute 'values_list'`
## How to reproduce
1. Search for something within Learn
## Real-life consequences
:rage4:
</issue>
<code>
[start of kolibri/content/serializers.py]
1 from django.db.models import Manager
2 from django.db.models.query import RawQuerySet
3 from kolibri.content.models import AssessmentMetaData, ChannelMetadataCache, ContentNode, File
4 from rest_framework import serializers
5
6 from .content_db_router import default_database_is_attached, get_active_content_database
7
8
9 class ChannelMetadataCacheSerializer(serializers.ModelSerializer):
10
11 class Meta:
12 model = ChannelMetadataCache
13 fields = ('root_pk', 'id', 'name', 'description', 'author')
14
15
16 class FileSerializer(serializers.ModelSerializer):
17 storage_url = serializers.SerializerMethodField()
18 preset = serializers.SerializerMethodField()
19 download_url = serializers.SerializerMethodField()
20
21 def get_storage_url(self, target_node):
22 return target_node.get_storage_url()
23
24 def get_preset(self, target_node):
25 return target_node.get_preset()
26
27 def get_download_url(self, target_node):
28 return target_node.get_download_url()
29
30 class Meta:
31 model = File
32 fields = ('storage_url', 'id', 'priority', 'checksum', 'available', 'file_size', 'extension', 'preset', 'lang',
33 'supplementary', 'thumbnail', 'download_url')
34
35
36 class AssessmentMetaDataSerializer(serializers.ModelSerializer):
37
38 assessment_item_ids = serializers.JSONField(default='[]')
39 mastery_model = serializers.JSONField(default='{}')
40
41 class Meta:
42 model = AssessmentMetaData
43 fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )
44
45
46 def get_progress_fraction(content_id, user):
47 from kolibri.logger.models import ContentSummaryLog
48 try:
49 # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress
50 overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content_id).progress
51 except ContentSummaryLog.DoesNotExist:
52 return None
53 return round(overall_progress, 4)
54
55
56 def get_progress_fractions(nodes, user):
57 from kolibri.logger.models import ContentSummaryLog
58 if isinstance(nodes, RawQuerySet):
59 leaf_ids = [datum.content_id for datum in nodes]
60 else:
61 leaf_ids = nodes.values_list("content_id", flat=True)
62
63 # get all summary logs for the current user that correspond to the descendant content nodes
64 if default_database_is_attached(): # if possible, do a direct join between the content and default databases
65 channel_alias = get_active_content_database()
66 summary_logs = ContentSummaryLog.objects.using(channel_alias).filter(user=user, content_id__in=leaf_ids)
67 else: # otherwise, convert the leaf queryset into a flat list of ids and use that
68 summary_logs = ContentSummaryLog.objects.filter(user=user, content_id__in=list(leaf_ids))
69
70 # make a lookup dict for all logs to allow mapping from content_id to current progress
71 overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}
72 return overall_progress
73
74
75 class ContentNodeListSerializer(serializers.ListSerializer):
76
77 def to_representation(self, data):
78
79 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
80 progress_dict = {}
81 else:
82 user = self.context["request"].user
83 progress_dict = get_progress_fractions(data, user)
84
85 # Dealing with nested relationships, data can be a Manager,
86 # so, first get a queryset from the Manager if needed
87 iterable = data.all() if isinstance(data, Manager) else data
88
89 return [
90 self.child.to_representation(item, progress_dict.get(item.content_id)) for item in iterable
91 ]
92
93
94 class ContentNodeSerializer(serializers.ModelSerializer):
95 parent = serializers.PrimaryKeyRelatedField(read_only=True)
96 files = FileSerializer(many=True, read_only=True)
97 assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)
98 license = serializers.StringRelatedField(many=False)
99 license_description = serializers.SerializerMethodField()
100
101 def __init__(self, *args, **kwargs):
102 # Instantiate the superclass normally
103 super(ContentNodeSerializer, self).__init__(*args, **kwargs)
104
105 # enable dynamic fields specification!
106 if 'request' in self.context and self.context['request'].GET.get('fields', None):
107 fields = self.context['request'].GET['fields'].split(',')
108 # Drop any fields that are not specified in the `fields` argument.
109 allowed = set(fields)
110 existing = set(self.fields.keys())
111 for field_name in existing - allowed:
112 self.fields.pop(field_name)
113
114 def to_representation(self, instance, progress_fraction=None):
115 if progress_fraction is None:
116 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
117 progress_fraction = 0
118 else:
119 user = self.context["request"].user
120 progress_fraction = get_progress_fraction(instance.content_id, user)
121 value = super(ContentNodeSerializer, self).to_representation(instance)
122 value['progress_fraction'] = progress_fraction
123 return value
124
125 def get_license_description(self, target_node):
126 if target_node.license_id:
127 return target_node.license.license_description
128 return ''
129
130 class Meta:
131 model = ContentNode
132 fields = (
133 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',
134 'license', 'license_description', 'files', 'parent', 'author',
135 'assessmentmetadata',
136 )
137
138 list_serializer_class = ContentNodeListSerializer
139
[end of kolibri/content/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/content/serializers.py b/kolibri/content/serializers.py
--- a/kolibri/content/serializers.py
+++ b/kolibri/content/serializers.py
@@ -55,7 +55,7 @@
def get_progress_fractions(nodes, user):
from kolibri.logger.models import ContentSummaryLog
- if isinstance(nodes, RawQuerySet):
+ if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):
leaf_ids = [datum.content_id for datum in nodes]
else:
leaf_ids = nodes.values_list("content_id", flat=True)
@@ -76,6 +76,9 @@
def to_representation(self, data):
+ if not data:
+ return data
+
if 'request' not in self.context or not self.context['request'].user.is_facility_user:
progress_dict = {}
else:
| {"golden_diff": "diff --git a/kolibri/content/serializers.py b/kolibri/content/serializers.py\n--- a/kolibri/content/serializers.py\n+++ b/kolibri/content/serializers.py\n@@ -55,7 +55,7 @@\n \n def get_progress_fractions(nodes, user):\n from kolibri.logger.models import ContentSummaryLog\n- if isinstance(nodes, RawQuerySet):\n+ if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.values_list(\"content_id\", flat=True)\n@@ -76,6 +76,9 @@\n \n def to_representation(self, data):\n \n+ if not data:\n+ return data\n+\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n", "issue": "Learn's Search is broken\n## Summary\r\n\r\n* Search within Learn is currently broken on Develop.\r\n* See [triggerSearch()](https://github.com/learningequality/kolibri/blob/develop/kolibri/plugins/learn/assets/src/state/actions.js#L377)\r\n\r\n## Exception\r\n\r\n`Exception Value: 'list' object has no attribute 'values_list'`\r\n\r\n## How to reproduce\r\n\r\n1. Search for something within Learn\r\n\r\n## Real-life consequences\r\n\r\n:rage4: \n", "before_files": [{"content": "from django.db.models import Manager\nfrom django.db.models.query import RawQuerySet\nfrom kolibri.content.models import AssessmentMetaData, ChannelMetadataCache, ContentNode, File\nfrom rest_framework import serializers\n\nfrom .content_db_router import default_database_is_attached, get_active_content_database\n\n\nclass ChannelMetadataCacheSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = ChannelMetadataCache\n fields = ('root_pk', 'id', 'name', 'description', 'author')\n\n\nclass FileSerializer(serializers.ModelSerializer):\n storage_url = serializers.SerializerMethodField()\n preset = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n\n def get_storage_url(self, target_node):\n return target_node.get_storage_url()\n\n def get_preset(self, target_node):\n return target_node.get_preset()\n\n def get_download_url(self, target_node):\n return target_node.get_download_url()\n\n class Meta:\n model = File\n fields = ('storage_url', 'id', 'priority', 'checksum', 'available', 'file_size', 'extension', 'preset', 'lang',\n 'supplementary', 'thumbnail', 'download_url')\n\n\nclass AssessmentMetaDataSerializer(serializers.ModelSerializer):\n\n assessment_item_ids = serializers.JSONField(default='[]')\n mastery_model = serializers.JSONField(default='{}')\n\n class Meta:\n model = AssessmentMetaData\n fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )\n\n\ndef get_progress_fraction(content_id, user):\n from kolibri.logger.models import ContentSummaryLog\n try:\n # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress\n overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content_id).progress\n except ContentSummaryLog.DoesNotExist:\n return None\n return round(overall_progress, 4)\n\n\ndef get_progress_fractions(nodes, user):\n from kolibri.logger.models import ContentSummaryLog\n if isinstance(nodes, RawQuerySet):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.values_list(\"content_id\", flat=True)\n\n # get all summary logs for the current user that correspond to the descendant content nodes\n if default_database_is_attached(): # if possible, do a direct join between the content and default databases\n channel_alias = get_active_content_database()\n summary_logs = ContentSummaryLog.objects.using(channel_alias).filter(user=user, content_id__in=leaf_ids)\n else: # otherwise, convert the leaf queryset into a flat list of ids and use that\n summary_logs = ContentSummaryLog.objects.filter(user=user, content_id__in=list(leaf_ids))\n\n # make a lookup dict for all logs to allow mapping from content_id to current progress\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n return overall_progress\n\n\nclass ContentNodeListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n progress_dict = get_progress_fractions(data, user)\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data.all() if isinstance(data, Manager) else data\n\n return [\n self.child.to_representation(item, progress_dict.get(item.content_id)) for item in iterable\n ]\n\n\nclass ContentNodeSerializer(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n files = FileSerializer(many=True, read_only=True)\n assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)\n license = serializers.StringRelatedField(many=False)\n license_description = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n # Instantiate the superclass normally\n super(ContentNodeSerializer, self).__init__(*args, **kwargs)\n\n # enable dynamic fields specification!\n if 'request' in self.context and self.context['request'].GET.get('fields', None):\n fields = self.context['request'].GET['fields'].split(',')\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n def to_representation(self, instance, progress_fraction=None):\n if progress_fraction is None:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_fraction = 0\n else:\n user = self.context[\"request\"].user\n progress_fraction = get_progress_fraction(instance.content_id, user)\n value = super(ContentNodeSerializer, self).to_representation(instance)\n value['progress_fraction'] = progress_fraction\n return value\n\n def get_license_description(self, target_node):\n if target_node.license_id:\n return target_node.license.license_description\n return ''\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',\n 'license', 'license_description', 'files', 'parent', 'author',\n 'assessmentmetadata',\n )\n\n list_serializer_class = ContentNodeListSerializer\n", "path": "kolibri/content/serializers.py"}]} | 2,146 | 198 |
gh_patches_debug_15758 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-3133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mail_password form crashes if user is not found
Visiting /mali_password without providing a userid will simply crash. And if mail_password_form is submitted and a user is not found, or the user doesn't have an email configured, it will crash as well.
</issue>
<code>
[start of Products/CMFPlone/browser/login/mail_password.py]
1 # -*- coding: utf-8 -*-
2 from AccessControl import Unauthorized
3 from Products.CMFPlone import PloneMessageFactory as _
4 from Products.Five import BrowserView
5 from Products.statusmessages.interfaces import IStatusMessage
6
7
8 class MailPasswordView(BrowserView):
9
10 def __call__(self):
11 try:
12 response = self.context.portal_registration.mailPassword(
13 self.request.form.get('userid', ''),
14 self.request,
15 )
16 except ValueError as e:
17 try:
18 msg = _(str(e))
19 except Unauthorized:
20 # If we are not allowed to tell the user, what is wrong, he
21 # should get an error message and contact the admins
22 raise e
23 IStatusMessage(self.request).add(msg)
24 self.request.response.redirect(
25 self.context.absolute_url() + 'mail_password_form'
26 )
27 return response
28
[end of Products/CMFPlone/browser/login/mail_password.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Products/CMFPlone/browser/login/mail_password.py b/Products/CMFPlone/browser/login/mail_password.py
--- a/Products/CMFPlone/browser/login/mail_password.py
+++ b/Products/CMFPlone/browser/login/mail_password.py
@@ -8,6 +8,7 @@
class MailPasswordView(BrowserView):
def __call__(self):
+ response = None
try:
response = self.context.portal_registration.mailPassword(
self.request.form.get('userid', ''),
@@ -22,6 +23,6 @@
raise e
IStatusMessage(self.request).add(msg)
self.request.response.redirect(
- self.context.absolute_url() + 'mail_password_form'
+ self.context.absolute_url() + '/mail_password_form'
)
return response
| {"golden_diff": "diff --git a/Products/CMFPlone/browser/login/mail_password.py b/Products/CMFPlone/browser/login/mail_password.py\n--- a/Products/CMFPlone/browser/login/mail_password.py\n+++ b/Products/CMFPlone/browser/login/mail_password.py\n@@ -8,6 +8,7 @@\n class MailPasswordView(BrowserView):\n \n def __call__(self):\n+ response = None\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n@@ -22,6 +23,6 @@\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n- self.context.absolute_url() + 'mail_password_form'\n+ self.context.absolute_url() + '/mail_password_form'\n )\n return response\n", "issue": "mail_password form crashes if user is not found\nVisiting /mali_password without providing a userid will simply crash. And if mail_password_form is submitted and a user is not found, or the user doesn't have an email configured, it will crash as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import Unauthorized\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.Five import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\n\n\nclass MailPasswordView(BrowserView):\n\n def __call__(self):\n try:\n response = self.context.portal_registration.mailPassword(\n self.request.form.get('userid', ''),\n self.request,\n )\n except ValueError as e:\n try:\n msg = _(str(e))\n except Unauthorized:\n # If we are not allowed to tell the user, what is wrong, he\n # should get an error message and contact the admins\n raise e\n IStatusMessage(self.request).add(msg)\n self.request.response.redirect(\n self.context.absolute_url() + 'mail_password_form'\n )\n return response\n", "path": "Products/CMFPlone/browser/login/mail_password.py"}]} | 827 | 178 |
gh_patches_debug_368 | rasdani/github-patches | git_diff | frappe__frappe-17020 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Share doesn't disappear
## Description of the issue
When the read Permission of a Share is removed by de-selecting the checkbox, then the corresponding DocShare is removed in the Backend, but the checkbox is automatically re-selected in the frontend. After a refresh, the share
## Context information (for bug reports)
**Output of `bench version`**
```
frappe 14.x.x-develop
```
## Steps to reproduce the issue
1. Open an arbitrary Document
2. Add a share with read permissions
3. Remove the read permission by clicking the checkbox
### Observed result
The checkbox is automatically re-selected
### Expected result
The share entry disappears
## Additional information
</issue>
<code>
[start of frappe/share.py]
1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
2 # License: MIT. See LICENSE
3
4 import frappe
5 from frappe import _
6 from frappe.desk.doctype.notification_log.notification_log import (
7 enqueue_create_notification,
8 get_title,
9 get_title_html,
10 )
11 from frappe.desk.form.document_follow import follow_document
12 from frappe.utils import cint
13
14
15 @frappe.whitelist()
16 def add(
17 doctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0
18 ):
19 """Share the given document with a user."""
20 if not user:
21 user = frappe.session.user
22
23 if not (flags or {}).get("ignore_share_permission"):
24 check_share_permission(doctype, name)
25
26 share_name = get_share_name(doctype, name, user, everyone)
27
28 if share_name:
29 doc = frappe.get_doc("DocShare", share_name)
30 else:
31 doc = frappe.new_doc("DocShare")
32 doc.update(
33 {"user": user, "share_doctype": doctype, "share_name": name, "everyone": cint(everyone)}
34 )
35
36 if flags:
37 doc.flags.update(flags)
38
39 doc.update(
40 {
41 # always add read, since you are adding!
42 "read": 1,
43 "write": cint(write),
44 "submit": cint(submit),
45 "share": cint(share),
46 }
47 )
48
49 doc.save(ignore_permissions=True)
50 notify_assignment(user, doctype, name, everyone, notify=notify)
51
52 if frappe.get_cached_value("User", user, "follow_shared_documents"):
53 follow_document(doctype, name, user)
54
55 return doc
56
57
58 def remove(doctype, name, user, flags=None):
59 share_name = frappe.db.get_value(
60 "DocShare", {"user": user, "share_name": name, "share_doctype": doctype}
61 )
62
63 if share_name:
64 frappe.delete_doc("DocShare", share_name, flags=flags)
65
66
67 @frappe.whitelist()
68 def set_permission(doctype, name, user, permission_to, value=1, everyone=0):
69 """Set share permission."""
70 check_share_permission(doctype, name)
71
72 share_name = get_share_name(doctype, name, user, everyone)
73 value = int(value)
74
75 if not share_name:
76 if value:
77 share = add(doctype, name, user, everyone=everyone, **{permission_to: 1})
78 else:
79 # no share found, nothing to remove
80 share = {}
81 pass
82 else:
83 share = frappe.get_doc("DocShare", share_name)
84 share.flags.ignore_permissions = True
85 share.set(permission_to, value)
86
87 if not value:
88 # un-set higher-order permissions too
89 if permission_to == "read":
90 share.read = share.write = share.submit = share.share = 0
91
92 share.save()
93
94 if not (share.read or share.write or share.submit or share.share):
95 share.delete()
96 share = {}
97
98 return share
99
100
101 @frappe.whitelist()
102 def get_users(doctype, name):
103 """Get list of users with which this document is shared"""
104 return frappe.db.get_all(
105 "DocShare",
106 fields=[
107 "`name`",
108 "`user`",
109 "`read`",
110 "`write`",
111 "`submit`",
112 "`share`",
113 "everyone",
114 "owner",
115 "creation",
116 ],
117 filters=dict(share_doctype=doctype, share_name=name),
118 )
119
120
121 def get_shared(doctype, user=None, rights=None):
122 """Get list of shared document names for given user and DocType.
123
124 :param doctype: DocType of which shared names are queried.
125 :param user: User for which shared names are queried.
126 :param rights: List of rights for which the document is shared. List of `read`, `write`, `share`"""
127
128 if not user:
129 user = frappe.session.user
130
131 if not rights:
132 rights = ["read"]
133
134 filters = [[right, "=", 1] for right in rights]
135 filters += [["share_doctype", "=", doctype]]
136 or_filters = [["user", "=", user]]
137 if user != "Guest":
138 or_filters += [["everyone", "=", 1]]
139
140 shared_docs = frappe.db.get_all(
141 "DocShare", fields=["share_name"], filters=filters, or_filters=or_filters
142 )
143
144 return [doc.share_name for doc in shared_docs]
145
146
147 def get_shared_doctypes(user=None):
148 """Return list of doctypes in which documents are shared for the given user."""
149 if not user:
150 user = frappe.session.user
151 table = frappe.qb.DocType("DocShare")
152 query = (
153 frappe.qb.from_(table)
154 .where((table.user == user) | (table.everyone == 1))
155 .select(table.share_doctype)
156 .distinct()
157 )
158 return query.run(pluck=True)
159
160
161 def get_share_name(doctype, name, user, everyone):
162 if cint(everyone):
163 share_name = frappe.db.get_value(
164 "DocShare", {"everyone": 1, "share_name": name, "share_doctype": doctype}
165 )
166 else:
167 share_name = frappe.db.get_value(
168 "DocShare", {"user": user, "share_name": name, "share_doctype": doctype}
169 )
170
171 return share_name
172
173
174 def check_share_permission(doctype, name):
175 """Check if the user can share with other users"""
176 if not frappe.has_permission(doctype, ptype="share", doc=name):
177 frappe.throw(
178 _("No permission to {0} {1} {2}").format("share", doctype, name), frappe.PermissionError
179 )
180
181
182 def notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):
183
184 if not (shared_by and doctype and doc_name) or everyone or not notify:
185 return
186
187 from frappe.utils import get_fullname
188
189 title = get_title(doctype, doc_name)
190
191 reference_user = get_fullname(frappe.session.user)
192 notification_message = _("{0} shared a document {1} {2} with you").format(
193 frappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)
194 )
195
196 notification_doc = {
197 "type": "Share",
198 "document_type": doctype,
199 "subject": notification_message,
200 "document_name": doc_name,
201 "from_user": frappe.session.user,
202 }
203
204 enqueue_create_notification(shared_by, notification_doc)
205
[end of frappe/share.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/share.py b/frappe/share.py
--- a/frappe/share.py
+++ b/frappe/share.py
@@ -93,7 +93,7 @@
if not (share.read or share.write or share.submit or share.share):
share.delete()
- share = {}
+ share = None
return share
| {"golden_diff": "diff --git a/frappe/share.py b/frappe/share.py\n--- a/frappe/share.py\n+++ b/frappe/share.py\n@@ -93,7 +93,7 @@\n \n \t\tif not (share.read or share.write or share.submit or share.share):\n \t\t\tshare.delete()\n-\t\t\tshare = {}\n+\t\t\tshare = None\n \n \treturn share\n", "issue": "Remove Share doesn't disappear \n## Description of the issue\r\nWhen the read Permission of a Share is removed by de-selecting the checkbox, then the corresponding DocShare is removed in the Backend, but the checkbox is automatically re-selected in the frontend. After a refresh, the share \r\n\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\nfrappe 14.x.x-develop\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Open an arbitrary Document\r\n2. Add a share with read permissions\r\n3. Remove the read permission by clicking the checkbox\r\n\r\n### Observed result\r\nThe checkbox is automatically re-selected\r\n\r\n### Expected result\r\nThe share entry disappears\r\n\r\n## Additional information\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe import _\nfrom frappe.desk.doctype.notification_log.notification_log import (\n\tenqueue_create_notification,\n\tget_title,\n\tget_title_html,\n)\nfrom frappe.desk.form.document_follow import follow_document\nfrom frappe.utils import cint\n\n\[email protected]()\ndef add(\n\tdoctype, name, user=None, read=1, write=0, submit=0, share=0, everyone=0, flags=None, notify=0\n):\n\t\"\"\"Share the given document with a user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not (flags or {}).get(\"ignore_share_permission\"):\n\t\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\n\tif share_name:\n\t\tdoc = frappe.get_doc(\"DocShare\", share_name)\n\telse:\n\t\tdoc = frappe.new_doc(\"DocShare\")\n\t\tdoc.update(\n\t\t\t{\"user\": user, \"share_doctype\": doctype, \"share_name\": name, \"everyone\": cint(everyone)}\n\t\t)\n\n\tif flags:\n\t\tdoc.flags.update(flags)\n\n\tdoc.update(\n\t\t{\n\t\t\t# always add read, since you are adding!\n\t\t\t\"read\": 1,\n\t\t\t\"write\": cint(write),\n\t\t\t\"submit\": cint(submit),\n\t\t\t\"share\": cint(share),\n\t\t}\n\t)\n\n\tdoc.save(ignore_permissions=True)\n\tnotify_assignment(user, doctype, name, everyone, notify=notify)\n\n\tif frappe.get_cached_value(\"User\", user, \"follow_shared_documents\"):\n\t\tfollow_document(doctype, name, user)\n\n\treturn doc\n\n\ndef remove(doctype, name, user, flags=None):\n\tshare_name = frappe.db.get_value(\n\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t)\n\n\tif share_name:\n\t\tfrappe.delete_doc(\"DocShare\", share_name, flags=flags)\n\n\[email protected]()\ndef set_permission(doctype, name, user, permission_to, value=1, everyone=0):\n\t\"\"\"Set share permission.\"\"\"\n\tcheck_share_permission(doctype, name)\n\n\tshare_name = get_share_name(doctype, name, user, everyone)\n\tvalue = int(value)\n\n\tif not share_name:\n\t\tif value:\n\t\t\tshare = add(doctype, name, user, everyone=everyone, **{permission_to: 1})\n\t\telse:\n\t\t\t# no share found, nothing to remove\n\t\t\tshare = {}\n\t\t\tpass\n\telse:\n\t\tshare = frappe.get_doc(\"DocShare\", share_name)\n\t\tshare.flags.ignore_permissions = True\n\t\tshare.set(permission_to, value)\n\n\t\tif not value:\n\t\t\t# un-set higher-order permissions too\n\t\t\tif permission_to == \"read\":\n\t\t\t\tshare.read = share.write = share.submit = share.share = 0\n\n\t\tshare.save()\n\n\t\tif not (share.read or share.write or share.submit or share.share):\n\t\t\tshare.delete()\n\t\t\tshare = {}\n\n\treturn share\n\n\[email protected]()\ndef get_users(doctype, name):\n\t\"\"\"Get list of users with which this document is shared\"\"\"\n\treturn frappe.db.get_all(\n\t\t\"DocShare\",\n\t\tfields=[\n\t\t\t\"`name`\",\n\t\t\t\"`user`\",\n\t\t\t\"`read`\",\n\t\t\t\"`write`\",\n\t\t\t\"`submit`\",\n\t\t\t\"`share`\",\n\t\t\t\"everyone\",\n\t\t\t\"owner\",\n\t\t\t\"creation\",\n\t\t],\n\t\tfilters=dict(share_doctype=doctype, share_name=name),\n\t)\n\n\ndef get_shared(doctype, user=None, rights=None):\n\t\"\"\"Get list of shared document names for given user and DocType.\n\n\t:param doctype: DocType of which shared names are queried.\n\t:param user: User for which shared names are queried.\n\t:param rights: List of rights for which the document is shared. List of `read`, `write`, `share`\"\"\"\n\n\tif not user:\n\t\tuser = frappe.session.user\n\n\tif not rights:\n\t\trights = [\"read\"]\n\n\tfilters = [[right, \"=\", 1] for right in rights]\n\tfilters += [[\"share_doctype\", \"=\", doctype]]\n\tor_filters = [[\"user\", \"=\", user]]\n\tif user != \"Guest\":\n\t\tor_filters += [[\"everyone\", \"=\", 1]]\n\n\tshared_docs = frappe.db.get_all(\n\t\t\"DocShare\", fields=[\"share_name\"], filters=filters, or_filters=or_filters\n\t)\n\n\treturn [doc.share_name for doc in shared_docs]\n\n\ndef get_shared_doctypes(user=None):\n\t\"\"\"Return list of doctypes in which documents are shared for the given user.\"\"\"\n\tif not user:\n\t\tuser = frappe.session.user\n\ttable = frappe.qb.DocType(\"DocShare\")\n\tquery = (\n\t\tfrappe.qb.from_(table)\n\t\t.where((table.user == user) | (table.everyone == 1))\n\t\t.select(table.share_doctype)\n\t\t.distinct()\n\t)\n\treturn query.run(pluck=True)\n\n\ndef get_share_name(doctype, name, user, everyone):\n\tif cint(everyone):\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"everyone\": 1, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\telse:\n\t\tshare_name = frappe.db.get_value(\n\t\t\t\"DocShare\", {\"user\": user, \"share_name\": name, \"share_doctype\": doctype}\n\t\t)\n\n\treturn share_name\n\n\ndef check_share_permission(doctype, name):\n\t\"\"\"Check if the user can share with other users\"\"\"\n\tif not frappe.has_permission(doctype, ptype=\"share\", doc=name):\n\t\tfrappe.throw(\n\t\t\t_(\"No permission to {0} {1} {2}\").format(\"share\", doctype, name), frappe.PermissionError\n\t\t)\n\n\ndef notify_assignment(shared_by, doctype, doc_name, everyone, notify=0):\n\n\tif not (shared_by and doctype and doc_name) or everyone or not notify:\n\t\treturn\n\n\tfrom frappe.utils import get_fullname\n\n\ttitle = get_title(doctype, doc_name)\n\n\treference_user = get_fullname(frappe.session.user)\n\tnotification_message = _(\"{0} shared a document {1} {2} with you\").format(\n\t\tfrappe.bold(reference_user), frappe.bold(doctype), get_title_html(title)\n\t)\n\n\tnotification_doc = {\n\t\t\"type\": \"Share\",\n\t\t\"document_type\": doctype,\n\t\t\"subject\": notification_message,\n\t\t\"document_name\": doc_name,\n\t\t\"from_user\": frappe.session.user,\n\t}\n\n\tenqueue_create_notification(shared_by, notification_doc)\n", "path": "frappe/share.py"}]} | 2,745 | 79 |
gh_patches_debug_13309 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1524 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
note should print when verbosity is set to at least verbose
We have the `note` function that prints its argument only on the final example, but actually it should probably print it for *every* example when `verbosity >= Verbosity.verbose`, so that people can actually see the tests as they run.
</issue>
<code>
[start of hypothesis-python/src/hypothesis/control.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import traceback
21
22 from hypothesis.errors import CleanupFailed, InvalidArgument, \
23 UnsatisfiedAssumption
24 from hypothesis.reporting import report
25 from hypothesis.utils.dynamicvariables import DynamicVariable
26
27 if False:
28 from typing import Any, AnyStr # noqa
29
30
31 def reject():
32 raise UnsatisfiedAssumption()
33
34
35 def assume(condition):
36 # type: (Any) -> bool
37 """Calling ``assume`` is like an :ref:`assert <python:assert>` that marks
38 the example as bad, rather than failing the test.
39
40 This allows you to specify properties that you *assume* will be
41 true, and let Hypothesis try to avoid similar examples in future.
42 """
43 if not condition:
44 raise UnsatisfiedAssumption()
45 return True
46
47
48 _current_build_context = DynamicVariable(None)
49
50
51 def current_build_context():
52 context = _current_build_context.value
53 if context is None:
54 raise InvalidArgument(
55 u'No build context registered')
56 return context
57
58
59 class BuildContext(object):
60
61 def __init__(self, data, is_final=False, close_on_capture=True):
62 self.data = data
63 self.tasks = []
64 self.is_final = is_final
65 self.close_on_capture = close_on_capture
66 self.close_on_del = False
67 self.notes = []
68
69 def __enter__(self):
70 self.assign_variable = _current_build_context.with_value(self)
71 self.assign_variable.__enter__()
72 return self
73
74 def __exit__(self, exc_type, exc_value, tb):
75 self.assign_variable.__exit__(exc_type, exc_value, tb)
76 if self.close() and exc_type is None:
77 raise CleanupFailed()
78
79 def local(self):
80 return _current_build_context.with_value(self)
81
82 def close(self):
83 any_failed = False
84 for task in self.tasks:
85 try:
86 task()
87 except BaseException:
88 any_failed = True
89 report(traceback.format_exc())
90 return any_failed
91
92
93 def cleanup(teardown):
94 """Register a function to be called when the current test has finished
95 executing. Any exceptions thrown in teardown will be printed but not
96 rethrown.
97
98 Inside a test this isn't very interesting, because you can just use
99 a finally block, but note that you can use this inside map, flatmap,
100 etc. in order to e.g. insist that a value is closed at the end.
101 """
102 context = _current_build_context.value
103 if context is None:
104 raise InvalidArgument(
105 u'Cannot register cleanup outside of build context')
106 context.tasks.append(teardown)
107
108
109 def note(value):
110 # type: (AnyStr) -> None
111 """Report this value in the final execution."""
112 context = _current_build_context.value
113 if context is None:
114 raise InvalidArgument(
115 'Cannot make notes outside of a test')
116 context.notes.append(value)
117 if context.is_final:
118 report(value)
119
120
121 def event(value):
122 # type: (AnyStr) -> None
123 """Record an event that occurred this test. Statistics on number of test
124 runs with each event will be reported at the end if you run Hypothesis in
125 statistics reporting mode.
126
127 Events should be strings or convertible to them.
128 """
129 context = _current_build_context.value
130 if context is None:
131 raise InvalidArgument(
132 'Cannot make record events outside of a test')
133
134 if context.data is not None:
135 context.data.note_event(value)
136
[end of hypothesis-python/src/hypothesis/control.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py
--- a/hypothesis-python/src/hypothesis/control.py
+++ b/hypothesis-python/src/hypothesis/control.py
@@ -19,6 +19,7 @@
import traceback
+from hypothesis import Verbosity, settings
from hypothesis.errors import CleanupFailed, InvalidArgument, \
UnsatisfiedAssumption
from hypothesis.reporting import report
@@ -114,7 +115,7 @@
raise InvalidArgument(
'Cannot make notes outside of a test')
context.notes.append(value)
- if context.is_final:
+ if context.is_final or settings.default.verbosity >= Verbosity.verbose:
report(value)
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py\n--- a/hypothesis-python/src/hypothesis/control.py\n+++ b/hypothesis-python/src/hypothesis/control.py\n@@ -19,6 +19,7 @@\n \n import traceback\n \n+from hypothesis import Verbosity, settings\n from hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\n from hypothesis.reporting import report\n@@ -114,7 +115,7 @@\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n- if context.is_final:\n+ if context.is_final or settings.default.verbosity >= Verbosity.verbose:\n report(value)\n", "issue": "note should print when verbosity is set to at least verbose \nWe have the `note` function that prints its argument only on the final example, but actually it should probably print it for *every* example when `verbosity >= Verbosity.verbose`, so that people can actually see the tests as they run. \n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport traceback\n\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\nfrom hypothesis.reporting import report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\nif False:\n from typing import Any, AnyStr # noqa\n\n\ndef reject():\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition):\n # type: (Any) -> bool\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'No build context registered')\n return context\n\n\nclass BuildContext(object):\n\n def __init__(self, data, is_final=False, close_on_capture=True):\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n self.notes = []\n\n def __enter__(self):\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def local(self):\n return _current_build_context.with_value(self)\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'Cannot register cleanup outside of build context')\n context.tasks.append(teardown)\n\n\ndef note(value):\n # type: (AnyStr) -> None\n \"\"\"Report this value in the final execution.\"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n if context.is_final:\n report(value)\n\n\ndef event(value):\n # type: (AnyStr) -> None\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make record events outside of a test')\n\n if context.data is not None:\n context.data.note_event(value)\n", "path": "hypothesis-python/src/hypothesis/control.py"}]} | 1,846 | 162 |
gh_patches_debug_342 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5236 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
qa_loader.py uses a fixed random seed every run
## Description
Always using the same seed makes it impossible to run `qa_loader.py` multiple times with the same database, as supposedly random values aren't, causing unique constraint violations.
## Steps to Reproduce
- Run the dev server with `make dev`
- Start a shell in the container with `docker exec -it securedrop-dev-0 bash`
- In that shell, run `./qa_loader --journalist-count 1 --source-count 1`
- Run the same command a second time.
## Expected Behavior
That you could keep adding random journalists and sources to the database.
## Actual Behavior
You get `sqlalchemy.exc.IntegrityError: (sqlite3.IntegrityError) UNIQUE constraint failed: journalists.username` because [`random.seed` is always called with the same value](https://github.com/freedomofpress/securedrop/blob/ec2220c3c2b9120d029b616d3a07647b175bc6ab/securedrop/qa_loader.py#L22).
</issue>
<code>
[start of securedrop/qa_loader.py]
1 #!/opt/venvs/securedrop-app-code/bin/python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import random
6 import string
7 import sys
8 from argparse import ArgumentParser
9 from datetime import datetime
10 from itertools import cycle
11 from os import path
12
13 from flask import current_app
14
15 from crypto_util import DICEWARE_SAFE_CHARS
16 from db import db
17 from journalist_app import create_app
18 from models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission
19 from sdconfig import config as sdconfig
20
21
22 random.seed("~(=^–^)") # mrow?
23
24
25 def random_bool():
26 return bool(random.getrandbits(1))
27
28
29 def random_chars(len, nullable, chars=string.ascii_letters):
30 if nullable and random_bool():
31 return None
32 else:
33 return "".join([random.choice(chars) for _ in range(len)])
34
35
36 def bool_or_none():
37 return random.choice([True, False, None])
38
39
40 def random_datetime(nullable):
41 if nullable and random_bool():
42 return None
43 else:
44 now = datetime.now()
45 return datetime(
46 year=random.randint(2013, now.year),
47 month=random.randint(1, now.month),
48 day=random.randint(1, now.day),
49 hour=random.randint(0, 23),
50 minute=random.randint(0, 59),
51 second=random.randint(0, 59),
52 microsecond=random.randint(0, 1000),
53 )
54
55
56 def positive_int(s):
57 i = int(s)
58 if i < 1:
59 raise ValueError("{} is not >= 1".format(s))
60 return i
61
62
63 def fraction(s):
64 f = float(s)
65 if 0 <= f <= 1:
66 return f
67 raise ValueError("{} should be a float between 0 and 1".format(s))
68
69
70 submissions = cycle(
71 [
72 "This is a test submission without markup!",
73 'This is a test submission with markup and characters such as \, \\, \', " and ". '
74 + "<strong>This text should not be bold</strong>!", # noqa: W605, E501
75 ]
76 )
77
78
79 replies = cycle(
80 [
81 "This is a test reply without markup!",
82 'This is a test reply with markup and characters such as \, \\, \', " and ". '
83 + "<strong>This text should not be bold</strong>!", # noqa: W605, E501
84 ]
85 )
86
87
88 class QaLoader(object):
89 def __init__(
90 self,
91 config,
92 journalist_count=10,
93 source_count=50,
94 submissions_per_source=1,
95 replies_per_source=1,
96 source_star_fraction=0.1,
97 source_reply_fraction=0.5,
98 ):
99 """
100 source_star_fraction and source_reply_fraction are simply the
101 fraction of sources starred or replied to.
102 """
103 self.config = config
104 self.app = create_app(config)
105
106 self.journalist_count = journalist_count
107 self.source_count = source_count
108 self.submissions_per_source = submissions_per_source
109 self.replies_per_source = replies_per_source
110 self.source_star_fraction = source_star_fraction
111 self.source_reply_fraction = source_reply_fraction
112
113 self.journalists = []
114 self.sources = []
115
116 def new_journalist(self):
117 # Make a diceware-like password
118 pw = " ".join(
119 [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]
120 )
121 journalist = Journalist(
122 username=random_chars(random.randint(3, 32), nullable=False),
123 password=pw,
124 is_admin=random_bool(),
125 )
126 if random_bool():
127 # to add legacy passwords back in
128 journalist.passphrase_hash = None
129 journalist.pw_salt = random_chars(32, nullable=False).encode("utf-8")
130 journalist.pw_hash = random_chars(64, nullable=False).encode("utf-8")
131
132 journalist.is_admin = bool_or_none()
133
134 journalist.is_totp = bool_or_none()
135 journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None
136 journalist.created_on = random_datetime(nullable=True)
137 journalist.last_access = random_datetime(nullable=True)
138
139 db.session.add(journalist)
140 db.session.flush()
141 self.journalists.append(journalist.id)
142
143 def new_source(self):
144 codename = current_app.crypto_util.genrandomid()
145 filesystem_id = current_app.crypto_util.hash_codename(codename)
146 journalist_designation = current_app.crypto_util.display_id()
147 source = Source(filesystem_id, journalist_designation)
148 db.session.add(source)
149 db.session.flush()
150
151 # Generate submissions directory and generate source key
152 os.mkdir(current_app.storage.path(source.filesystem_id))
153 current_app.crypto_util.genkeypair(source.filesystem_id, codename)
154
155 self.sources.append(source.id)
156
157 def new_submission(self, source_id):
158 source = Source.query.get(source_id)
159
160 source.interaction_count += 1
161 fpath = current_app.storage.save_message_submission(
162 source.filesystem_id,
163 source.interaction_count,
164 source.journalist_filename,
165 next(submissions),
166 )
167 submission = Submission(source, fpath)
168 db.session.add(submission)
169
170 source.pending = False
171 source.last_updated = datetime.utcnow()
172
173 db.session.flush()
174
175 def new_source_star(self, source_id):
176 source = Source.query.get(source_id)
177 star = SourceStar(source, bool_or_none())
178 db.session.add(star)
179
180 def new_reply(self, journalist_id, source_id):
181 source = Source.query.get(source_id)
182
183 journalist = Journalist.query.get(journalist_id)
184
185 source.interaction_count += 1
186 source.last_updated = datetime.utcnow()
187
188 fname = "{}-{}-reply.gpg".format(source.interaction_count, source.journalist_filename)
189 current_app.crypto_util.encrypt(
190 next(replies),
191 [
192 current_app.crypto_util.get_fingerprint(source.filesystem_id),
193 sdconfig.JOURNALIST_KEY
194 ],
195 current_app.storage.path(source.filesystem_id, fname),
196 )
197
198 reply = Reply(journalist, source, fname)
199 db.session.add(reply)
200 db.session.flush()
201
202 def new_journalist_login_attempt(self, journalist_id):
203 journalist = Journalist.query.get(journalist_id)
204 attempt = JournalistLoginAttempt(journalist)
205 attempt.timestamp = random_datetime(nullable=True)
206 db.session.add(attempt)
207
208 def load(self):
209 with self.app.app_context():
210 print("Creating {:d} journalists...".format(self.journalist_count))
211 for i in range(1, self.journalist_count + 1):
212 self.new_journalist()
213 if i % min(10, max(1, int(self.journalist_count / 10))) == 0:
214 sys.stdout.write("{}\r{}".format(" " * len(str(self.journalist_count + 1)), i))
215 print("\n")
216 db.session.commit()
217
218 print("Creating {:d} sources...".format(self.source_count))
219 for i in range(1, self.source_count + 1):
220 self.new_source()
221 if i % min(10, max(1, int(self.source_count / 10))) == 0:
222 sys.stdout.write("{}\r{}".format(" " * len(str(self.source_count + 1)), i))
223 print("\n")
224 db.session.commit()
225
226 print(
227 "Creating submissions ({:d} each) for each source...".format(
228 self.submissions_per_source
229 )
230 )
231 for sid in self.sources:
232 for _ in range(1, self.submissions_per_source + 1):
233 self.new_submission(sid)
234 db.session.commit()
235
236 print("Starring {:.2f}% of all sources...".format(self.source_star_fraction * 100))
237 for sid in random.sample(
238 self.sources, int(self.source_count * self.source_star_fraction)
239 ):
240 self.new_source_star(sid)
241 db.session.commit()
242
243 print(
244 "Creating replies ({:d} each) for {:.2f}% of sources...".format(
245 self.replies_per_source, self.source_reply_fraction * 100
246 )
247 )
248 for sid in random.sample(
249 self.sources, int(self.source_count * self.source_reply_fraction)
250 ):
251 jid = random.choice(self.journalists)
252 for _ in range(self.replies_per_source):
253 self.new_reply(jid, sid)
254 db.session.commit()
255
256 for jid in self.journalists:
257 self.new_journalist_login_attempt(jid)
258 db.session.commit()
259
260
261 def arg_parser():
262 parser = ArgumentParser(
263 path.basename(__file__), description="Loads data into the database for testing upgrades"
264 )
265 parser.add_argument(
266 "--journalist-count",
267 type=positive_int,
268 default=10,
269 help=("Number of journalists to create"),
270 )
271 parser.add_argument(
272 "--source-count", type=positive_int, default=50, help=("Number of sources to create")
273 )
274 parser.add_argument(
275 "--submissions-per-source",
276 type=positive_int,
277 default=1,
278 help=("Number of submissions to create for each source"),
279 )
280 parser.add_argument(
281 "--replies-per-source",
282 type=positive_int,
283 default=1,
284 help=("Number of replies to create for each source"),
285 )
286 parser.add_argument(
287 "--source-star-fraction",
288 type=fraction,
289 default=0.1,
290 help=("Fraction of sources to star"),
291 )
292 parser.add_argument(
293 "--source-reply-fraction",
294 type=fraction,
295 default=0.5,
296 help=("Fraction of sources to reply to"),
297 )
298 return parser
299
300
301 def main():
302 args = arg_parser().parse_args()
303 print("Loading data. This may take a while.")
304 QaLoader(
305 sdconfig,
306 args.journalist_count,
307 args.source_count,
308 args.submissions_per_source,
309 args.replies_per_source,
310 args.source_star_fraction,
311 args.source_reply_fraction,
312 ).load()
313
314
315 if __name__ == "__main__":
316 try:
317 main()
318 except KeyboardInterrupt:
319 print("") # for prompt on a newline
320 sys.exit(1)
321
[end of securedrop/qa_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/qa_loader.py b/securedrop/qa_loader.py
--- a/securedrop/qa_loader.py
+++ b/securedrop/qa_loader.py
@@ -19,9 +19,6 @@
from sdconfig import config as sdconfig
-random.seed("~(=^–^)") # mrow?
-
-
def random_bool():
return bool(random.getrandbits(1))
| {"golden_diff": "diff --git a/securedrop/qa_loader.py b/securedrop/qa_loader.py\n--- a/securedrop/qa_loader.py\n+++ b/securedrop/qa_loader.py\n@@ -19,9 +19,6 @@\n from sdconfig import config as sdconfig\n \n \n-random.seed(\"~(=^\u2013^)\") # mrow?\n-\n-\n def random_bool():\n return bool(random.getrandbits(1))\n", "issue": "qa_loader.py uses a fixed random seed every run\n## Description\r\n\r\nAlways using the same seed makes it impossible to run `qa_loader.py` multiple times with the same database, as supposedly random values aren't, causing unique constraint violations.\r\n\r\n## Steps to Reproduce\r\n\r\n- Run the dev server with `make dev`\r\n- Start a shell in the container with `docker exec -it securedrop-dev-0 bash`\r\n- In that shell, run `./qa_loader --journalist-count 1 --source-count 1`\r\n- Run the same command a second time.\r\n\r\n## Expected Behavior\r\n\r\nThat you could keep adding random journalists and sources to the database.\r\n\r\n## Actual Behavior\r\n\r\nYou get `sqlalchemy.exc.IntegrityError: (sqlite3.IntegrityError) UNIQUE constraint failed: journalists.username` because [`random.seed` is always called with the same value](https://github.com/freedomofpress/securedrop/blob/ec2220c3c2b9120d029b616d3a07647b175bc6ab/securedrop/qa_loader.py#L22).\n", "before_files": [{"content": "#!/opt/venvs/securedrop-app-code/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport random\nimport string\nimport sys\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom itertools import cycle\nfrom os import path\n\nfrom flask import current_app\n\nfrom crypto_util import DICEWARE_SAFE_CHARS\nfrom db import db\nfrom journalist_app import create_app\nfrom models import Journalist, JournalistLoginAttempt, Reply, Source, SourceStar, Submission\nfrom sdconfig import config as sdconfig\n\n\nrandom.seed(\"~(=^\u2013^)\") # mrow?\n\n\ndef random_bool():\n return bool(random.getrandbits(1))\n\n\ndef random_chars(len, nullable, chars=string.ascii_letters):\n if nullable and random_bool():\n return None\n else:\n return \"\".join([random.choice(chars) for _ in range(len)])\n\n\ndef bool_or_none():\n return random.choice([True, False, None])\n\n\ndef random_datetime(nullable):\n if nullable and random_bool():\n return None\n else:\n now = datetime.now()\n return datetime(\n year=random.randint(2013, now.year),\n month=random.randint(1, now.month),\n day=random.randint(1, now.day),\n hour=random.randint(0, 23),\n minute=random.randint(0, 59),\n second=random.randint(0, 59),\n microsecond=random.randint(0, 1000),\n )\n\n\ndef positive_int(s):\n i = int(s)\n if i < 1:\n raise ValueError(\"{} is not >= 1\".format(s))\n return i\n\n\ndef fraction(s):\n f = float(s)\n if 0 <= f <= 1:\n return f\n raise ValueError(\"{} should be a float between 0 and 1\".format(s))\n\n\nsubmissions = cycle(\n [\n \"This is a test submission without markup!\",\n 'This is a test submission with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nreplies = cycle(\n [\n \"This is a test reply without markup!\",\n 'This is a test reply with markup and characters such as \\, \\\\, \\', \" and \". '\n + \"<strong>This text should not be bold</strong>!\", # noqa: W605, E501\n ]\n)\n\n\nclass QaLoader(object):\n def __init__(\n self,\n config,\n journalist_count=10,\n source_count=50,\n submissions_per_source=1,\n replies_per_source=1,\n source_star_fraction=0.1,\n source_reply_fraction=0.5,\n ):\n \"\"\"\n source_star_fraction and source_reply_fraction are simply the\n fraction of sources starred or replied to.\n \"\"\"\n self.config = config\n self.app = create_app(config)\n\n self.journalist_count = journalist_count\n self.source_count = source_count\n self.submissions_per_source = submissions_per_source\n self.replies_per_source = replies_per_source\n self.source_star_fraction = source_star_fraction\n self.source_reply_fraction = source_reply_fraction\n\n self.journalists = []\n self.sources = []\n\n def new_journalist(self):\n # Make a diceware-like password\n pw = \" \".join(\n [random_chars(3, nullable=False, chars=DICEWARE_SAFE_CHARS) for _ in range(7)]\n )\n journalist = Journalist(\n username=random_chars(random.randint(3, 32), nullable=False),\n password=pw,\n is_admin=random_bool(),\n )\n if random_bool():\n # to add legacy passwords back in\n journalist.passphrase_hash = None\n journalist.pw_salt = random_chars(32, nullable=False).encode(\"utf-8\")\n journalist.pw_hash = random_chars(64, nullable=False).encode(\"utf-8\")\n\n journalist.is_admin = bool_or_none()\n\n journalist.is_totp = bool_or_none()\n journalist.hotp_counter = random.randint(-1000, 1000) if random_bool() else None\n journalist.created_on = random_datetime(nullable=True)\n journalist.last_access = random_datetime(nullable=True)\n\n db.session.add(journalist)\n db.session.flush()\n self.journalists.append(journalist.id)\n\n def new_source(self):\n codename = current_app.crypto_util.genrandomid()\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n journalist_designation = current_app.crypto_util.display_id()\n source = Source(filesystem_id, journalist_designation)\n db.session.add(source)\n db.session.flush()\n\n # Generate submissions directory and generate source key\n os.mkdir(current_app.storage.path(source.filesystem_id))\n current_app.crypto_util.genkeypair(source.filesystem_id, codename)\n\n self.sources.append(source.id)\n\n def new_submission(self, source_id):\n source = Source.query.get(source_id)\n\n source.interaction_count += 1\n fpath = current_app.storage.save_message_submission(\n source.filesystem_id,\n source.interaction_count,\n source.journalist_filename,\n next(submissions),\n )\n submission = Submission(source, fpath)\n db.session.add(submission)\n\n source.pending = False\n source.last_updated = datetime.utcnow()\n\n db.session.flush()\n\n def new_source_star(self, source_id):\n source = Source.query.get(source_id)\n star = SourceStar(source, bool_or_none())\n db.session.add(star)\n\n def new_reply(self, journalist_id, source_id):\n source = Source.query.get(source_id)\n\n journalist = Journalist.query.get(journalist_id)\n\n source.interaction_count += 1\n source.last_updated = datetime.utcnow()\n\n fname = \"{}-{}-reply.gpg\".format(source.interaction_count, source.journalist_filename)\n current_app.crypto_util.encrypt(\n next(replies),\n [\n current_app.crypto_util.get_fingerprint(source.filesystem_id),\n sdconfig.JOURNALIST_KEY\n ],\n current_app.storage.path(source.filesystem_id, fname),\n )\n\n reply = Reply(journalist, source, fname)\n db.session.add(reply)\n db.session.flush()\n\n def new_journalist_login_attempt(self, journalist_id):\n journalist = Journalist.query.get(journalist_id)\n attempt = JournalistLoginAttempt(journalist)\n attempt.timestamp = random_datetime(nullable=True)\n db.session.add(attempt)\n\n def load(self):\n with self.app.app_context():\n print(\"Creating {:d} journalists...\".format(self.journalist_count))\n for i in range(1, self.journalist_count + 1):\n self.new_journalist()\n if i % min(10, max(1, int(self.journalist_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.journalist_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\"Creating {:d} sources...\".format(self.source_count))\n for i in range(1, self.source_count + 1):\n self.new_source()\n if i % min(10, max(1, int(self.source_count / 10))) == 0:\n sys.stdout.write(\"{}\\r{}\".format(\" \" * len(str(self.source_count + 1)), i))\n print(\"\\n\")\n db.session.commit()\n\n print(\n \"Creating submissions ({:d} each) for each source...\".format(\n self.submissions_per_source\n )\n )\n for sid in self.sources:\n for _ in range(1, self.submissions_per_source + 1):\n self.new_submission(sid)\n db.session.commit()\n\n print(\"Starring {:.2f}% of all sources...\".format(self.source_star_fraction * 100))\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_star_fraction)\n ):\n self.new_source_star(sid)\n db.session.commit()\n\n print(\n \"Creating replies ({:d} each) for {:.2f}% of sources...\".format(\n self.replies_per_source, self.source_reply_fraction * 100\n )\n )\n for sid in random.sample(\n self.sources, int(self.source_count * self.source_reply_fraction)\n ):\n jid = random.choice(self.journalists)\n for _ in range(self.replies_per_source):\n self.new_reply(jid, sid)\n db.session.commit()\n\n for jid in self.journalists:\n self.new_journalist_login_attempt(jid)\n db.session.commit()\n\n\ndef arg_parser():\n parser = ArgumentParser(\n path.basename(__file__), description=\"Loads data into the database for testing upgrades\"\n )\n parser.add_argument(\n \"--journalist-count\",\n type=positive_int,\n default=10,\n help=(\"Number of journalists to create\"),\n )\n parser.add_argument(\n \"--source-count\", type=positive_int, default=50, help=(\"Number of sources to create\")\n )\n parser.add_argument(\n \"--submissions-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of submissions to create for each source\"),\n )\n parser.add_argument(\n \"--replies-per-source\",\n type=positive_int,\n default=1,\n help=(\"Number of replies to create for each source\"),\n )\n parser.add_argument(\n \"--source-star-fraction\",\n type=fraction,\n default=0.1,\n help=(\"Fraction of sources to star\"),\n )\n parser.add_argument(\n \"--source-reply-fraction\",\n type=fraction,\n default=0.5,\n help=(\"Fraction of sources to reply to\"),\n )\n return parser\n\n\ndef main():\n args = arg_parser().parse_args()\n print(\"Loading data. This may take a while.\")\n QaLoader(\n sdconfig,\n args.journalist_count,\n args.source_count,\n args.submissions_per_source,\n args.replies_per_source,\n args.source_star_fraction,\n args.source_reply_fraction,\n ).load()\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\") # for prompt on a newline\n sys.exit(1)\n", "path": "securedrop/qa_loader.py"}]} | 3,885 | 92 |
gh_patches_debug_40821 | rasdani/github-patches | git_diff | falconry__falcon-2026 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement FloatConverter
Implement `FloatConverter` along the lines of [`IntConverter`](https://falcon.readthedocs.io/en/stable/api/routing.html#falcon.routing.IntConverter). Draw inspiration from `IntConverter`, or even find an efficient way to share code between the two!
Add the new converter to the list of [Built-in Converters](https://falcon.readthedocs.io/en/stable/api/routing.html#built-in-converters) under the `float` identifier.
Open questions: should we support converting `nan`, `inf` & `-inf` from path?
</issue>
<code>
[start of falcon/routing/converters.py]
1 # Copyright 2017 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import abc
16 from datetime import datetime
17 import uuid
18
19 __all__ = (
20 'BaseConverter',
21 'IntConverter',
22 'DateTimeConverter',
23 'UUIDConverter',
24 )
25
26
27 # PERF(kgriffs): Avoid an extra namespace lookup when using this function
28 strptime = datetime.strptime
29
30
31 class BaseConverter(metaclass=abc.ABCMeta):
32 """Abstract base class for URI template field converters."""
33
34 CONSUME_MULTIPLE_SEGMENTS = False
35 """When set to ``True`` it indicates that this converter will consume
36 multiple URL path segments. Currently a converter with
37 ``CONSUME_MULTIPLE_SEGMENTS=True`` must be at the end of the URL template
38 effectively meaning that it will consume all of the remaining URL path
39 segments.
40 """
41
42 @abc.abstractmethod # pragma: no cover
43 def convert(self, value):
44 """Convert a URI template field value to another format or type.
45
46 Args:
47 value (str or List[str]): Original string to convert.
48 If ``CONSUME_MULTIPLE_SEGMENTS=True`` this value is a
49 list of strings containing the path segments matched by
50 the converter.
51
52 Returns:
53 object: Converted field value, or ``None`` if the field
54 can not be converted.
55 """
56
57
58 def _consumes_multiple_segments(converter):
59 return getattr(converter, 'CONSUME_MULTIPLE_SEGMENTS', False)
60
61
62 class IntConverter(BaseConverter):
63 """Converts a field value to an int.
64
65 Identifier: `int`
66
67 Keyword Args:
68 num_digits (int): Require the value to have the given
69 number of digits.
70 min (int): Reject the value if it is less than this number.
71 max (int): Reject the value if it is greater than this number.
72 """
73
74 __slots__ = ('_num_digits', '_min', '_max')
75
76 def __init__(self, num_digits=None, min=None, max=None):
77 if num_digits is not None and num_digits < 1:
78 raise ValueError('num_digits must be at least 1')
79
80 self._num_digits = num_digits
81 self._min = min
82 self._max = max
83
84 def convert(self, value):
85 if self._num_digits is not None and len(value) != self._num_digits:
86 return None
87
88 # NOTE(kgriffs): int() will accept numbers with preceding or
89 # trailing whitespace, so we need to do our own check. Using
90 # strip() is faster than either a regex or a series of or'd
91 # membership checks via "in", esp. as the length of contiguous
92 # numbers in the value grows.
93 if value.strip() != value:
94 return None
95
96 try:
97 value = int(value)
98 except ValueError:
99 return None
100
101 if self._min is not None and value < self._min:
102 return None
103
104 if self._max is not None and value > self._max:
105 return None
106
107 return value
108
109
110 class DateTimeConverter(BaseConverter):
111 """Converts a field value to a datetime.
112
113 Identifier: `dt`
114
115 Keyword Args:
116 format_string (str): String used to parse the field value
117 into a datetime. Any format recognized by strptime() is
118 supported (default ``'%Y-%m-%dT%H:%M:%SZ'``).
119 """
120
121 __slots__ = ('_format_string',)
122
123 def __init__(self, format_string='%Y-%m-%dT%H:%M:%SZ'):
124 self._format_string = format_string
125
126 def convert(self, value):
127 try:
128 return strptime(value, self._format_string)
129 except ValueError:
130 return None
131
132
133 class UUIDConverter(BaseConverter):
134 """Converts a field value to a uuid.UUID.
135
136 Identifier: `uuid`
137
138 In order to be converted, the field value must consist of a
139 string of 32 hexadecimal digits, as defined in RFC 4122, Section 3.
140 Note, however, that hyphens and the URN prefix are optional.
141 """
142
143 def convert(self, value):
144 try:
145 return uuid.UUID(value)
146 except ValueError:
147 return None
148
149
150 class PathConverter(BaseConverter):
151 """Field converted used to match the rest of the path.
152
153 This field converter matches the remainder of the URL path,
154 returning it as a string.
155
156 This converter is currently supported only when used at the
157 end of the URL template.
158
159 The classic routing rules of falcon apply also to this converter:
160 considering the template ``'/foo/bar/{matched_path:path}'``, the path
161 ``'/foo/bar'`` will *not* match the route; ``'/foo/bar/'`` will
162 match, producing ``matched_path=''``, when
163 :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is ``False``
164 (the default), while it will *not* match when that option is ``True``.
165
166 (See also: :ref:`trailing_slash_in_path`)
167 """
168
169 CONSUME_MULTIPLE_SEGMENTS = True
170
171 def convert(self, value):
172 return '/'.join(value)
173
174
175 BUILTIN = (
176 ('int', IntConverter),
177 ('dt', DateTimeConverter),
178 ('uuid', UUIDConverter),
179 ('path', PathConverter),
180 )
181
[end of falcon/routing/converters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/falcon/routing/converters.py b/falcon/routing/converters.py
--- a/falcon/routing/converters.py
+++ b/falcon/routing/converters.py
@@ -14,6 +14,7 @@
import abc
from datetime import datetime
+from math import isfinite
import uuid
__all__ = (
@@ -21,6 +22,7 @@
'IntConverter',
'DateTimeConverter',
'UUIDConverter',
+ 'FloatConverter',
)
@@ -76,7 +78,6 @@
def __init__(self, num_digits=None, min=None, max=None):
if num_digits is not None and num_digits < 1:
raise ValueError('num_digits must be at least 1')
-
self._num_digits = num_digits
self._min = min
self._max = max
@@ -86,10 +87,10 @@
return None
# NOTE(kgriffs): int() will accept numbers with preceding or
- # trailing whitespace, so we need to do our own check. Using
- # strip() is faster than either a regex or a series of or'd
- # membership checks via "in", esp. as the length of contiguous
- # numbers in the value grows.
+ # trailing whitespace, so we need to do our own check. Using
+ # strip() is faster than either a regex or a series of or'd
+ # membership checks via "in", esp. as the length of contiguous
+ # numbers in the value grows.
if value.strip() != value:
return None
@@ -98,15 +99,52 @@
except ValueError:
return None
+ return self._validate_min_max_value(value)
+
+ def _validate_min_max_value(self, value):
if self._min is not None and value < self._min:
return None
-
if self._max is not None and value > self._max:
return None
return value
+class FloatConverter(IntConverter):
+ """Converts a field value to an float.
+
+ Identifier: `float`
+ Keyword Args:
+ min (float): Reject the value if it is less than this number.
+ max (float): Reject the value if it is greater than this number.
+ finite (bool) : Determines whether or not to only match ordinary
+ finite numbers (default: ``True``). Set to ``False`` to match
+ nan, inf, and -inf in addition to finite numbers.
+ """
+
+ __slots__ = '_finite'
+
+ def __init__(self, min: float = None, max: float = None, finite: bool = True):
+ self._min = min
+ self._max = max
+ self._finite = finite if finite is not None else True
+
+ def convert(self, value: str):
+ if value.strip() != value:
+ return None
+
+ try:
+ value = float(value)
+
+ if self._finite and not isfinite(value):
+ return None
+
+ except ValueError:
+ return None
+
+ return self._validate_min_max_value(value)
+
+
class DateTimeConverter(BaseConverter):
"""Converts a field value to a datetime.
@@ -176,5 +214,6 @@
('int', IntConverter),
('dt', DateTimeConverter),
('uuid', UUIDConverter),
+ ('float', FloatConverter),
('path', PathConverter),
)
| {"golden_diff": "diff --git a/falcon/routing/converters.py b/falcon/routing/converters.py\n--- a/falcon/routing/converters.py\n+++ b/falcon/routing/converters.py\n@@ -14,6 +14,7 @@\n \n import abc\n from datetime import datetime\n+from math import isfinite\n import uuid\n \n __all__ = (\n@@ -21,6 +22,7 @@\n 'IntConverter',\n 'DateTimeConverter',\n 'UUIDConverter',\n+ 'FloatConverter',\n )\n \n \n@@ -76,7 +78,6 @@\n def __init__(self, num_digits=None, min=None, max=None):\n if num_digits is not None and num_digits < 1:\n raise ValueError('num_digits must be at least 1')\n-\n self._num_digits = num_digits\n self._min = min\n self._max = max\n@@ -86,10 +87,10 @@\n return None\n \n # NOTE(kgriffs): int() will accept numbers with preceding or\n- # trailing whitespace, so we need to do our own check. Using\n- # strip() is faster than either a regex or a series of or'd\n- # membership checks via \"in\", esp. as the length of contiguous\n- # numbers in the value grows.\n+ # trailing whitespace, so we need to do our own check. Using\n+ # strip() is faster than either a regex or a series of or'd\n+ # membership checks via \"in\", esp. as the length of contiguous\n+ # numbers in the value grows.\n if value.strip() != value:\n return None\n \n@@ -98,15 +99,52 @@\n except ValueError:\n return None\n \n+ return self._validate_min_max_value(value)\n+\n+ def _validate_min_max_value(self, value):\n if self._min is not None and value < self._min:\n return None\n-\n if self._max is not None and value > self._max:\n return None\n \n return value\n \n \n+class FloatConverter(IntConverter):\n+ \"\"\"Converts a field value to an float.\n+\n+ Identifier: `float`\n+ Keyword Args:\n+ min (float): Reject the value if it is less than this number.\n+ max (float): Reject the value if it is greater than this number.\n+ finite (bool) : Determines whether or not to only match ordinary\n+ finite numbers (default: ``True``). Set to ``False`` to match\n+ nan, inf, and -inf in addition to finite numbers.\n+ \"\"\"\n+\n+ __slots__ = '_finite'\n+\n+ def __init__(self, min: float = None, max: float = None, finite: bool = True):\n+ self._min = min\n+ self._max = max\n+ self._finite = finite if finite is not None else True\n+\n+ def convert(self, value: str):\n+ if value.strip() != value:\n+ return None\n+\n+ try:\n+ value = float(value)\n+\n+ if self._finite and not isfinite(value):\n+ return None\n+\n+ except ValueError:\n+ return None\n+\n+ return self._validate_min_max_value(value)\n+\n+\n class DateTimeConverter(BaseConverter):\n \"\"\"Converts a field value to a datetime.\n \n@@ -176,5 +214,6 @@\n ('int', IntConverter),\n ('dt', DateTimeConverter),\n ('uuid', UUIDConverter),\n+ ('float', FloatConverter),\n ('path', PathConverter),\n )\n", "issue": "Implement FloatConverter\nImplement `FloatConverter` along the lines of [`IntConverter`](https://falcon.readthedocs.io/en/stable/api/routing.html#falcon.routing.IntConverter). Draw inspiration from `IntConverter`, or even find an efficient way to share code between the two!\r\n\r\nAdd the new converter to the list of [Built-in Converters](https://falcon.readthedocs.io/en/stable/api/routing.html#built-in-converters) under the `float` identifier.\r\n\r\nOpen questions: should we support converting `nan`, `inf` & `-inf` from path?\n", "before_files": [{"content": "# Copyright 2017 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport abc\nfrom datetime import datetime\nimport uuid\n\n__all__ = (\n 'BaseConverter',\n 'IntConverter',\n 'DateTimeConverter',\n 'UUIDConverter',\n)\n\n\n# PERF(kgriffs): Avoid an extra namespace lookup when using this function\nstrptime = datetime.strptime\n\n\nclass BaseConverter(metaclass=abc.ABCMeta):\n \"\"\"Abstract base class for URI template field converters.\"\"\"\n\n CONSUME_MULTIPLE_SEGMENTS = False\n \"\"\"When set to ``True`` it indicates that this converter will consume\n multiple URL path segments. Currently a converter with\n ``CONSUME_MULTIPLE_SEGMENTS=True`` must be at the end of the URL template\n effectively meaning that it will consume all of the remaining URL path\n segments.\n \"\"\"\n\n @abc.abstractmethod # pragma: no cover\n def convert(self, value):\n \"\"\"Convert a URI template field value to another format or type.\n\n Args:\n value (str or List[str]): Original string to convert.\n If ``CONSUME_MULTIPLE_SEGMENTS=True`` this value is a\n list of strings containing the path segments matched by\n the converter.\n\n Returns:\n object: Converted field value, or ``None`` if the field\n can not be converted.\n \"\"\"\n\n\ndef _consumes_multiple_segments(converter):\n return getattr(converter, 'CONSUME_MULTIPLE_SEGMENTS', False)\n\n\nclass IntConverter(BaseConverter):\n \"\"\"Converts a field value to an int.\n\n Identifier: `int`\n\n Keyword Args:\n num_digits (int): Require the value to have the given\n number of digits.\n min (int): Reject the value if it is less than this number.\n max (int): Reject the value if it is greater than this number.\n \"\"\"\n\n __slots__ = ('_num_digits', '_min', '_max')\n\n def __init__(self, num_digits=None, min=None, max=None):\n if num_digits is not None and num_digits < 1:\n raise ValueError('num_digits must be at least 1')\n\n self._num_digits = num_digits\n self._min = min\n self._max = max\n\n def convert(self, value):\n if self._num_digits is not None and len(value) != self._num_digits:\n return None\n\n # NOTE(kgriffs): int() will accept numbers with preceding or\n # trailing whitespace, so we need to do our own check. Using\n # strip() is faster than either a regex or a series of or'd\n # membership checks via \"in\", esp. as the length of contiguous\n # numbers in the value grows.\n if value.strip() != value:\n return None\n\n try:\n value = int(value)\n except ValueError:\n return None\n\n if self._min is not None and value < self._min:\n return None\n\n if self._max is not None and value > self._max:\n return None\n\n return value\n\n\nclass DateTimeConverter(BaseConverter):\n \"\"\"Converts a field value to a datetime.\n\n Identifier: `dt`\n\n Keyword Args:\n format_string (str): String used to parse the field value\n into a datetime. Any format recognized by strptime() is\n supported (default ``'%Y-%m-%dT%H:%M:%SZ'``).\n \"\"\"\n\n __slots__ = ('_format_string',)\n\n def __init__(self, format_string='%Y-%m-%dT%H:%M:%SZ'):\n self._format_string = format_string\n\n def convert(self, value):\n try:\n return strptime(value, self._format_string)\n except ValueError:\n return None\n\n\nclass UUIDConverter(BaseConverter):\n \"\"\"Converts a field value to a uuid.UUID.\n\n Identifier: `uuid`\n\n In order to be converted, the field value must consist of a\n string of 32 hexadecimal digits, as defined in RFC 4122, Section 3.\n Note, however, that hyphens and the URN prefix are optional.\n \"\"\"\n\n def convert(self, value):\n try:\n return uuid.UUID(value)\n except ValueError:\n return None\n\n\nclass PathConverter(BaseConverter):\n \"\"\"Field converted used to match the rest of the path.\n\n This field converter matches the remainder of the URL path,\n returning it as a string.\n\n This converter is currently supported only when used at the\n end of the URL template.\n\n The classic routing rules of falcon apply also to this converter:\n considering the template ``'/foo/bar/{matched_path:path}'``, the path\n ``'/foo/bar'`` will *not* match the route; ``'/foo/bar/'`` will\n match, producing ``matched_path=''``, when\n :attr:`~falcon.RequestOptions.strip_url_path_trailing_slash` is ``False``\n (the default), while it will *not* match when that option is ``True``.\n\n (See also: :ref:`trailing_slash_in_path`)\n \"\"\"\n\n CONSUME_MULTIPLE_SEGMENTS = True\n\n def convert(self, value):\n return '/'.join(value)\n\n\nBUILTIN = (\n ('int', IntConverter),\n ('dt', DateTimeConverter),\n ('uuid', UUIDConverter),\n ('path', PathConverter),\n)\n", "path": "falcon/routing/converters.py"}]} | 2,398 | 793 |
gh_patches_debug_3172 | rasdani/github-patches | git_diff | napalm-automation__napalm-1985 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Linting issue with napalm/base/validate.py:165:10 E721 do not compare types, for exact checks use `is` / `is not`, for instance checks use `isinstance()` [pycodestyle]
Last working build
```
Collecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))
Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)
```
failing build
```
Collecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))
Obtaining dependency information for pycodestyle from https://files.pythonhosted.org/packages/31/c2/e1508ed4395793f69e40fd8c6b5a690e1d568e649aae9492076a7b6befb4/pycodestyle-2.11.0-py2.py3-none-any.whl.metadata
Downloading pycodestyle-2.11.0-py2.py3-none-any.whl.metadata (4.5 kB)
```
Guessing we have a new linting rule in pycodestyle 2.11
_Originally posted by @bewing in https://github.com/napalm-automation/napalm/issues/1983#issuecomment-1660467649_
Will provide a fix shortly - please assign this one to me.
</issue>
<code>
[start of napalm/base/validate.py]
1 """
2 Validation methods for the NAPALM base.
3
4 See: https://napalm.readthedocs.io/en/latest/validate.html
5 """
6 import yaml
7 import copy
8 import re
9 from typing import Dict, List, Union, TypeVar, Optional, TYPE_CHECKING
10
11 if TYPE_CHECKING:
12 from napalm.base import NetworkDriver
13 from napalm.base.exceptions import ValidationException
14 from napalm.base import models
15
16
17 # We put it here to compile it only once
18 numeric_compare_regex = re.compile(r"^(<|>|<=|>=|==|!=)(\d+(\.\d+){0,1})$")
19
20
21 def _get_validation_file(validation_file: str) -> Dict[str, Dict]:
22 try:
23 with open(validation_file, "r") as stream:
24 try:
25 validation_source = yaml.safe_load(stream)
26 except yaml.YAMLError as exc:
27 raise ValidationException(exc)
28 except IOError:
29 raise ValidationException("File {0} not found.".format(validation_file))
30 return validation_source
31
32
33 def _mode(mode_string: str) -> Dict[str, bool]:
34 mode = {"strict": False}
35
36 for m in mode_string.split():
37 if m not in mode.keys():
38 raise ValidationException("mode '{}' not recognized".format(m))
39 mode[m] = True
40 return mode
41
42
43 def _compare_getter_list(
44 src: List, dst: List, mode: Dict[str, bool]
45 ) -> models.ListValidationResult:
46 result: models.ListValidationResult = {
47 "complies": True,
48 "present": [],
49 "missing": [],
50 "extra": [],
51 }
52 for src_element in src:
53 found = False
54
55 i = 0
56 while True:
57 try:
58 intermediate_match = compare(src_element, dst[i])
59 if (
60 isinstance(intermediate_match, dict)
61 and intermediate_match["complies"]
62 or not isinstance(intermediate_match, dict)
63 and intermediate_match
64 ):
65 found = True
66 result["present"].append(src_element)
67 dst.pop(i)
68 break
69 else:
70 i += 1
71 except IndexError:
72 break
73
74 if not found:
75 result["complies"] = False
76 result["missing"].append(src_element)
77
78 if mode["strict"] and dst:
79 result["extra"] = dst
80 result["complies"] = False
81
82 return result
83
84
85 def _compare_getter_dict(
86 src: Dict[str, List], dst: Dict[str, List], mode: Dict[str, bool]
87 ) -> models.DictValidationResult:
88 result: models.DictValidationResult = {
89 "complies": True,
90 "present": {},
91 "missing": [],
92 "extra": [],
93 }
94 dst = copy.deepcopy(dst) # Otherwise we are going to modify a "live" object
95
96 for key, src_element in src.items():
97 try:
98 dst_element = dst.pop(key)
99 result["present"][key] = {}
100 intermediate_result = compare(src_element, dst_element)
101
102 if isinstance(intermediate_result, dict):
103 nested = True
104
105 complies = intermediate_result["complies"]
106
107 if not complies:
108 result["present"][key]["diff"] = intermediate_result
109 else:
110 complies = intermediate_result
111 nested = False
112 if not complies:
113 result["present"][key]["expected_value"] = src_element
114 result["present"][key]["actual_value"] = dst_element
115
116 if not complies:
117 result["complies"] = False
118
119 result["present"][key]["complies"] = complies
120 result["present"][key]["nested"] = nested
121 except KeyError:
122 result["missing"].append(key)
123 result["complies"] = False
124
125 if mode["strict"] and dst:
126 result["extra"] = list(dst.keys())
127 result["complies"] = False
128
129 return result
130
131
132 CompareInput = TypeVar("CompareInput", str, Dict, List)
133
134
135 def compare(
136 src: CompareInput, dst: CompareInput
137 ) -> Union[bool, models.DictValidationResult, models.ListValidationResult]:
138 if isinstance(src, str):
139 src = str(src)
140
141 if isinstance(src, dict):
142 mode = _mode(src.pop("_mode", ""))
143 if "list" in src.keys():
144 if not isinstance(dst, list):
145 # This can happen with nested lists
146 return False
147
148 return _compare_getter_list(src["list"], dst, mode)
149 return _compare_getter_dict(src, dst, mode)
150
151 elif isinstance(src, str):
152 if src.startswith("<") or src.startswith(">"):
153 cmp_result = _compare_numeric(src, dst)
154 return cmp_result
155 elif "<->" in src and len(src.split("<->")) == 2:
156 cmp_result = _compare_range(src, dst)
157 return cmp_result
158 else:
159 m = re.search(src, str(dst))
160 if m:
161 return bool(m)
162 else:
163 return src == dst
164
165 elif type(src) == type(dst) == list:
166 pairs = zip(src, dst)
167 diff_lists = [
168 [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]
169 for x, y in pairs
170 if x != y
171 ]
172 return empty_tree(diff_lists)
173
174 else:
175 return src == dst
176
177
178 def _compare_numeric(src_num: str, dst_num: str) -> bool:
179 """Compare numerical values. You can use '<%d','>%d'."""
180 dst_num = float(dst_num)
181
182 match = numeric_compare_regex.match(src_num)
183 if not match:
184 error = "Failed numeric comparison. Collected: {}. Expected: {}".format(
185 dst_num, src_num
186 )
187 raise ValueError(error)
188
189 operand = {
190 "<": "__lt__",
191 ">": "__gt__",
192 ">=": "__ge__",
193 "<=": "__le__",
194 "==": "__eq__",
195 "!=": "__ne__",
196 }
197 return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))
198
199
200 def _compare_range(src_num: str, dst_num: str) -> bool:
201 """Compare value against a range of values. You can use '%d<->%d'."""
202 dst_num = float(dst_num)
203
204 match = src_num.split("<->")
205 if len(match) != 2:
206 error = "Failed range comparison. Collected: {}. Expected: {}".format(
207 dst_num, src_num
208 )
209 raise ValueError(error)
210
211 if float(match[0]) <= dst_num <= float(match[1]):
212 return True
213 else:
214 return False
215
216
217 def empty_tree(input_list: List) -> bool:
218 """Recursively iterate through values in nested lists."""
219 for item in input_list:
220 if not isinstance(item, list) or not empty_tree(item):
221 return False
222 return True
223
224
225 def compliance_report(
226 cls: "NetworkDriver",
227 validation_file: Optional[str] = None,
228 validation_source: Optional[str] = None,
229 ) -> models.ReportResult:
230 report: models.ReportResult = {} # type: ignore
231 if validation_file:
232 validation_source = _get_validation_file(validation_file) # type: ignore
233
234 # Otherwise we are going to modify a "live" object
235 validation_source = copy.deepcopy(validation_source)
236
237 assert isinstance(validation_source, list), validation_source
238
239 for validation_check in validation_source:
240 for getter, expected_results in validation_check.items():
241 if getter == "get_config":
242 # TBD
243 pass
244 else:
245 key = expected_results.pop("_name", "") or getter
246
247 try:
248 kwargs = expected_results.pop("_kwargs", {})
249 actual_results = getattr(cls, getter)(**kwargs)
250 report[key] = compare(expected_results, actual_results)
251 except NotImplementedError:
252 report[key] = {"skipped": True, "reason": "NotImplemented"}
253
254 complies = all([e.get("complies", True) for e in report.values()])
255 report["skipped"] = [k for k, v in report.items() if v.get("skipped", False)]
256 report["complies"] = complies
257 return report
258
[end of napalm/base/validate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napalm/base/validate.py b/napalm/base/validate.py
--- a/napalm/base/validate.py
+++ b/napalm/base/validate.py
@@ -162,7 +162,7 @@
else:
return src == dst
- elif type(src) == type(dst) == list:
+ elif isinstance(src, list) and isinstance(dst, list):
pairs = zip(src, dst)
diff_lists = [
[(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]
| {"golden_diff": "diff --git a/napalm/base/validate.py b/napalm/base/validate.py\n--- a/napalm/base/validate.py\n+++ b/napalm/base/validate.py\n@@ -162,7 +162,7 @@\n else:\n return src == dst\n \n- elif type(src) == type(dst) == list:\n+ elif isinstance(src, list) and isinstance(dst, list):\n pairs = zip(src, dst)\n diff_lists = [\n [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]\n", "issue": "Linting issue with napalm/base/validate.py:165:10 E721 do not compare types, for exact checks use `is` / `is not`, for instance checks use `isinstance()` [pycodestyle]\nLast working build\r\n```\r\nCollecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))\r\n Downloading pycodestyle-2.10.0-py2.py3-none-any.whl (41 kB)\r\n```\r\n\r\nfailing build\r\n```\r\nCollecting pycodestyle (from flake8-import-order==0.18.2->-r requirements-dev.txt (line 4))\r\n Obtaining dependency information for pycodestyle from https://files.pythonhosted.org/packages/31/c2/e1508ed4395793f69e40fd8c6b5a690e1d568e649aae9492076a7b6befb4/pycodestyle-2.11.0-py2.py3-none-any.whl.metadata\r\n Downloading pycodestyle-2.11.0-py2.py3-none-any.whl.metadata (4.5 kB)\r\n```\r\nGuessing we have a new linting rule in pycodestyle 2.11\r\n\r\n_Originally posted by @bewing in https://github.com/napalm-automation/napalm/issues/1983#issuecomment-1660467649_\r\n\r\nWill provide a fix shortly - please assign this one to me.\n", "before_files": [{"content": "\"\"\"\nValidation methods for the NAPALM base.\n\nSee: https://napalm.readthedocs.io/en/latest/validate.html\n\"\"\"\nimport yaml\nimport copy\nimport re\nfrom typing import Dict, List, Union, TypeVar, Optional, TYPE_CHECKING\n\nif TYPE_CHECKING:\n from napalm.base import NetworkDriver\nfrom napalm.base.exceptions import ValidationException\nfrom napalm.base import models\n\n\n# We put it here to compile it only once\nnumeric_compare_regex = re.compile(r\"^(<|>|<=|>=|==|!=)(\\d+(\\.\\d+){0,1})$\")\n\n\ndef _get_validation_file(validation_file: str) -> Dict[str, Dict]:\n try:\n with open(validation_file, \"r\") as stream:\n try:\n validation_source = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise ValidationException(exc)\n except IOError:\n raise ValidationException(\"File {0} not found.\".format(validation_file))\n return validation_source\n\n\ndef _mode(mode_string: str) -> Dict[str, bool]:\n mode = {\"strict\": False}\n\n for m in mode_string.split():\n if m not in mode.keys():\n raise ValidationException(\"mode '{}' not recognized\".format(m))\n mode[m] = True\n return mode\n\n\ndef _compare_getter_list(\n src: List, dst: List, mode: Dict[str, bool]\n) -> models.ListValidationResult:\n result: models.ListValidationResult = {\n \"complies\": True,\n \"present\": [],\n \"missing\": [],\n \"extra\": [],\n }\n for src_element in src:\n found = False\n\n i = 0\n while True:\n try:\n intermediate_match = compare(src_element, dst[i])\n if (\n isinstance(intermediate_match, dict)\n and intermediate_match[\"complies\"]\n or not isinstance(intermediate_match, dict)\n and intermediate_match\n ):\n found = True\n result[\"present\"].append(src_element)\n dst.pop(i)\n break\n else:\n i += 1\n except IndexError:\n break\n\n if not found:\n result[\"complies\"] = False\n result[\"missing\"].append(src_element)\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = dst\n result[\"complies\"] = False\n\n return result\n\n\ndef _compare_getter_dict(\n src: Dict[str, List], dst: Dict[str, List], mode: Dict[str, bool]\n) -> models.DictValidationResult:\n result: models.DictValidationResult = {\n \"complies\": True,\n \"present\": {},\n \"missing\": [],\n \"extra\": [],\n }\n dst = copy.deepcopy(dst) # Otherwise we are going to modify a \"live\" object\n\n for key, src_element in src.items():\n try:\n dst_element = dst.pop(key)\n result[\"present\"][key] = {}\n intermediate_result = compare(src_element, dst_element)\n\n if isinstance(intermediate_result, dict):\n nested = True\n\n complies = intermediate_result[\"complies\"]\n\n if not complies:\n result[\"present\"][key][\"diff\"] = intermediate_result\n else:\n complies = intermediate_result\n nested = False\n if not complies:\n result[\"present\"][key][\"expected_value\"] = src_element\n result[\"present\"][key][\"actual_value\"] = dst_element\n\n if not complies:\n result[\"complies\"] = False\n\n result[\"present\"][key][\"complies\"] = complies\n result[\"present\"][key][\"nested\"] = nested\n except KeyError:\n result[\"missing\"].append(key)\n result[\"complies\"] = False\n\n if mode[\"strict\"] and dst:\n result[\"extra\"] = list(dst.keys())\n result[\"complies\"] = False\n\n return result\n\n\nCompareInput = TypeVar(\"CompareInput\", str, Dict, List)\n\n\ndef compare(\n src: CompareInput, dst: CompareInput\n) -> Union[bool, models.DictValidationResult, models.ListValidationResult]:\n if isinstance(src, str):\n src = str(src)\n\n if isinstance(src, dict):\n mode = _mode(src.pop(\"_mode\", \"\"))\n if \"list\" in src.keys():\n if not isinstance(dst, list):\n # This can happen with nested lists\n return False\n\n return _compare_getter_list(src[\"list\"], dst, mode)\n return _compare_getter_dict(src, dst, mode)\n\n elif isinstance(src, str):\n if src.startswith(\"<\") or src.startswith(\">\"):\n cmp_result = _compare_numeric(src, dst)\n return cmp_result\n elif \"<->\" in src and len(src.split(\"<->\")) == 2:\n cmp_result = _compare_range(src, dst)\n return cmp_result\n else:\n m = re.search(src, str(dst))\n if m:\n return bool(m)\n else:\n return src == dst\n\n elif type(src) == type(dst) == list:\n pairs = zip(src, dst)\n diff_lists = [\n [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])]\n for x, y in pairs\n if x != y\n ]\n return empty_tree(diff_lists)\n\n else:\n return src == dst\n\n\ndef _compare_numeric(src_num: str, dst_num: str) -> bool:\n \"\"\"Compare numerical values. You can use '<%d','>%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = numeric_compare_regex.match(src_num)\n if not match:\n error = \"Failed numeric comparison. Collected: {}. Expected: {}\".format(\n dst_num, src_num\n )\n raise ValueError(error)\n\n operand = {\n \"<\": \"__lt__\",\n \">\": \"__gt__\",\n \">=\": \"__ge__\",\n \"<=\": \"__le__\",\n \"==\": \"__eq__\",\n \"!=\": \"__ne__\",\n }\n return getattr(dst_num, operand[match.group(1)])(float(match.group(2)))\n\n\ndef _compare_range(src_num: str, dst_num: str) -> bool:\n \"\"\"Compare value against a range of values. You can use '%d<->%d'.\"\"\"\n dst_num = float(dst_num)\n\n match = src_num.split(\"<->\")\n if len(match) != 2:\n error = \"Failed range comparison. Collected: {}. Expected: {}\".format(\n dst_num, src_num\n )\n raise ValueError(error)\n\n if float(match[0]) <= dst_num <= float(match[1]):\n return True\n else:\n return False\n\n\ndef empty_tree(input_list: List) -> bool:\n \"\"\"Recursively iterate through values in nested lists.\"\"\"\n for item in input_list:\n if not isinstance(item, list) or not empty_tree(item):\n return False\n return True\n\n\ndef compliance_report(\n cls: \"NetworkDriver\",\n validation_file: Optional[str] = None,\n validation_source: Optional[str] = None,\n) -> models.ReportResult:\n report: models.ReportResult = {} # type: ignore\n if validation_file:\n validation_source = _get_validation_file(validation_file) # type: ignore\n\n # Otherwise we are going to modify a \"live\" object\n validation_source = copy.deepcopy(validation_source)\n\n assert isinstance(validation_source, list), validation_source\n\n for validation_check in validation_source:\n for getter, expected_results in validation_check.items():\n if getter == \"get_config\":\n # TBD\n pass\n else:\n key = expected_results.pop(\"_name\", \"\") or getter\n\n try:\n kwargs = expected_results.pop(\"_kwargs\", {})\n actual_results = getattr(cls, getter)(**kwargs)\n report[key] = compare(expected_results, actual_results)\n except NotImplementedError:\n report[key] = {\"skipped\": True, \"reason\": \"NotImplemented\"}\n\n complies = all([e.get(\"complies\", True) for e in report.values()])\n report[\"skipped\"] = [k for k, v in report.items() if v.get(\"skipped\", False)]\n report[\"complies\"] = complies\n return report\n", "path": "napalm/base/validate.py"}]} | 3,353 | 127 |
gh_patches_debug_29035 | rasdani/github-patches | git_diff | streamlit__streamlit-3495 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auto-Complete Password with Password Manager
### Summary
When filling a username/password combo from a password manager, it only fills the username, not the password, because the HTML password field has the property `autocomplete="new-password"`. Removing this property results in the expected behaviour.
### Steps to reproduce
Code snippet:
```python
import streamlit as st
sender_email = st.text_input('username', value='user', key='email')
sender_password = st.text_input('password', type='password', key='password')
if st.button('Login'):
st.success('Logged in')
else:
st.write('Not logged in')
```
1. Run the code
2. Notice that 1Password 7 displays for the user field the saved password and for the password field only suggests a new password
3. Fill saved password from the user field. Notice that the password field does not get filled.
4. Reload the page.
5. Remove the `autocomplete="new-password"` property with the web inspector.
6. Fill the password from the user field. Notice that both fields get filled (as expected).
**Expected behavior:**
The password manager fills both fields
**Actual behavior:**
The password manager only fills the user field.
### Is this a regression?
Don't know.
### Debug info
- Streamlit version: 0.79.0
- Python version: 3.9.2
- Using Conda? PipEnv? PyEnv? Pex? => Conda
- OS version: macOS 10.15.7
- Browser version: Tested with Safari 14.0.3 and Chrome 89.0.4389.90
Is there a workaround to remove this HTML property? Or does it required a code change within Streamlit?
For my tool the user needs to provide a API authentication for the backend (username/password), which will be stored in a password manager (1Password 7) in this case. It is inconvenient having to copy the password manually.
Thank you!
</issue>
<code>
[start of lib/streamlit/elements/text_widgets.py]
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import cast
16
17 import streamlit
18 from streamlit.errors import StreamlitAPIException
19 from streamlit.proto.TextArea_pb2 import TextArea as TextAreaProto
20 from streamlit.proto.TextInput_pb2 import TextInput as TextInputProto
21 from streamlit.state.widgets import register_widget
22 from .form import current_form_id
23 from .utils import check_callback_rules, check_session_state_rules
24
25
26 class TextWidgetsMixin:
27 def text_input(
28 self,
29 label,
30 value="",
31 max_chars=None,
32 key=None,
33 type="default",
34 help=None,
35 on_change=None,
36 args=None,
37 kwargs=None,
38 ):
39 """Display a single-line text input widget.
40
41 Parameters
42 ----------
43 label : str
44 A short label explaining to the user what this input is for.
45 value : any
46 The text value of this widget when it first renders. This will be
47 cast to str internally.
48 max_chars : int or None
49 Max number of characters allowed in text input.
50 key : str
51 An optional string to use as the unique key for the widget.
52 If this is omitted, a key will be generated for the widget
53 based on its content. Multiple widgets of the same type may
54 not share the same key.
55 type : str
56 The type of the text input. This can be either "default" (for
57 a regular text input), or "password" (for a text input that
58 masks the user's typed value). Defaults to "default".
59 help : str
60 An optional tooltip that gets displayed next to the input.
61 on_change : callable
62 An optional callback invoked when this text_input's value changes.
63 args : tuple
64 An optional tuple of args to pass to the callback.
65 kwargs : dict
66 An optional dict of kwargs to pass to the callback.
67
68 Returns
69 -------
70 str
71 The current value of the text input widget.
72
73 Example
74 -------
75 >>> title = st.text_input('Movie title', 'Life of Brian')
76 >>> st.write('The current movie title is', title)
77
78 """
79 check_callback_rules(self.dg, on_change)
80 check_session_state_rules(default_value=None if value == "" else value, key=key)
81
82 text_input_proto = TextInputProto()
83 text_input_proto.label = label
84 text_input_proto.default = str(value)
85 text_input_proto.form_id = current_form_id(self.dg)
86 if help is not None:
87 text_input_proto.help = help
88
89 if max_chars is not None:
90 text_input_proto.max_chars = max_chars
91
92 if type == "default":
93 text_input_proto.type = TextInputProto.DEFAULT
94 elif type == "password":
95 text_input_proto.type = TextInputProto.PASSWORD
96 else:
97 raise StreamlitAPIException(
98 "'%s' is not a valid text_input type. Valid types are 'default' and 'password'."
99 % type
100 )
101
102 def deserialize_text_input(ui_value) -> str:
103 return str(ui_value if ui_value is not None else value)
104
105 current_value, set_frontend_value = register_widget(
106 "text_input",
107 text_input_proto,
108 user_key=key,
109 on_change_handler=on_change,
110 args=args,
111 kwargs=kwargs,
112 deserializer=deserialize_text_input,
113 serializer=lambda x: x,
114 )
115
116 if set_frontend_value:
117 text_input_proto.value = current_value
118 text_input_proto.set_value = True
119
120 self.dg._enqueue("text_input", text_input_proto)
121 return current_value
122
123 def text_area(
124 self,
125 label,
126 value="",
127 height=None,
128 max_chars=None,
129 key=None,
130 help=None,
131 on_change=None,
132 args=None,
133 kwargs=None,
134 ):
135 """Display a multi-line text input widget.
136
137 Parameters
138 ----------
139 label : str
140 A short label explaining to the user what this input is for.
141 value : any
142 The text value of this widget when it first renders. This will be
143 cast to str internally.
144 height : int or None
145 Desired height of the UI element expressed in pixels. If None, a
146 default height is used.
147 max_chars : int or None
148 Maximum number of characters allowed in text area.
149 key : str
150 An optional string to use as the unique key for the widget.
151 If this is omitted, a key will be generated for the widget
152 based on its content. Multiple widgets of the same type may
153 not share the same key.
154 help : str
155 An optional tooltip that gets displayed next to the textarea.
156 on_change : callable
157 An optional callback invoked when this text_area's value changes.
158 args : tuple
159 An optional tuple of args to pass to the callback.
160 kwargs : dict
161 An optional dict of kwargs to pass to the callback.
162
163 Returns
164 -------
165 str
166 The current value of the text input widget.
167
168 Example
169 -------
170 >>> txt = st.text_area('Text to analyze', '''
171 ... It was the best of times, it was the worst of times, it was
172 ... the age of wisdom, it was the age of foolishness, it was
173 ... the epoch of belief, it was the epoch of incredulity, it
174 ... was the season of Light, it was the season of Darkness, it
175 ... was the spring of hope, it was the winter of despair, (...)
176 ... ''')
177 >>> st.write('Sentiment:', run_sentiment_analysis(txt))
178
179 """
180 check_callback_rules(self.dg, on_change)
181 check_session_state_rules(default_value=None if value == "" else value, key=key)
182
183 text_area_proto = TextAreaProto()
184 text_area_proto.label = label
185 text_area_proto.default = str(value)
186 text_area_proto.form_id = current_form_id(self.dg)
187 if help is not None:
188 text_area_proto.help = help
189
190 if height is not None:
191 text_area_proto.height = height
192
193 if max_chars is not None:
194 text_area_proto.max_chars = max_chars
195
196 def deserialize_text_area(ui_value) -> str:
197 return str(ui_value if ui_value is not None else value)
198
199 current_value, set_frontend_value = register_widget(
200 "text_area",
201 text_area_proto,
202 user_key=key,
203 on_change_handler=on_change,
204 args=args,
205 kwargs=kwargs,
206 deserializer=deserialize_text_area,
207 serializer=lambda x: x,
208 )
209
210 if set_frontend_value:
211 text_area_proto.value = current_value
212 text_area_proto.set_value = True
213
214 self.dg._enqueue("text_area", text_area_proto)
215 return current_value
216
217 @property
218 def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
219 """Get our DeltaGenerator."""
220 return cast("streamlit.delta_generator.DeltaGenerator", self)
221
[end of lib/streamlit/elements/text_widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/elements/text_widgets.py b/lib/streamlit/elements/text_widgets.py
--- a/lib/streamlit/elements/text_widgets.py
+++ b/lib/streamlit/elements/text_widgets.py
@@ -32,6 +32,7 @@
key=None,
type="default",
help=None,
+ autocomplete=None,
on_change=None,
args=None,
kwargs=None,
@@ -58,6 +59,11 @@
masks the user's typed value). Defaults to "default".
help : str
An optional tooltip that gets displayed next to the input.
+ autocomplete : str
+ An optional value that will be passed to the <input> element's
+ autocomplete property. If unspecified, this value will be set to
+ "new-password" for "password" inputs, and the empty string for
+ "default" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete
on_change : callable
An optional callback invoked when this text_input's value changes.
args : tuple
@@ -99,6 +105,12 @@
% type
)
+ # Marshall the autocomplete param. If unspecified, this will be
+ # set to "new-password" for password inputs.
+ if autocomplete is None:
+ autocomplete = "new-password" if type == "password" else ""
+ text_input_proto.autocomplete = autocomplete
+
def deserialize_text_input(ui_value) -> str:
return str(ui_value if ui_value is not None else value)
| {"golden_diff": "diff --git a/lib/streamlit/elements/text_widgets.py b/lib/streamlit/elements/text_widgets.py\n--- a/lib/streamlit/elements/text_widgets.py\n+++ b/lib/streamlit/elements/text_widgets.py\n@@ -32,6 +32,7 @@\n key=None,\n type=\"default\",\n help=None,\n+ autocomplete=None,\n on_change=None,\n args=None,\n kwargs=None,\n@@ -58,6 +59,11 @@\n masks the user's typed value). Defaults to \"default\".\n help : str\n An optional tooltip that gets displayed next to the input.\n+ autocomplete : str\n+ An optional value that will be passed to the <input> element's\n+ autocomplete property. If unspecified, this value will be set to\n+ \"new-password\" for \"password\" inputs, and the empty string for\n+ \"default\" inputs. For more details, see https://developer.mozilla.org/en-US/docs/Web/HTML/Attributes/autocomplete\n on_change : callable\n An optional callback invoked when this text_input's value changes.\n args : tuple\n@@ -99,6 +105,12 @@\n % type\n )\n \n+ # Marshall the autocomplete param. If unspecified, this will be\n+ # set to \"new-password\" for password inputs.\n+ if autocomplete is None:\n+ autocomplete = \"new-password\" if type == \"password\" else \"\"\n+ text_input_proto.autocomplete = autocomplete\n+\n def deserialize_text_input(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n", "issue": "Auto-Complete Password with Password Manager\n### Summary\r\n\r\nWhen filling a username/password combo from a password manager, it only fills the username, not the password, because the HTML password field has the property `autocomplete=\"new-password\"`. Removing this property results in the expected behaviour.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```python\r\nimport streamlit as st\r\n\r\nsender_email = st.text_input('username', value='user', key='email')\r\nsender_password = st.text_input('password', type='password', key='password')\r\nif st.button('Login'):\r\n st.success('Logged in')\r\nelse:\r\n st.write('Not logged in')\r\n```\r\n\r\n1. Run the code\r\n2. Notice that 1Password 7 displays for the user field the saved password and for the password field only suggests a new password\r\n3. Fill saved password from the user field. Notice that the password field does not get filled.\r\n4. Reload the page.\r\n5. Remove the `autocomplete=\"new-password\"` property with the web inspector.\r\n6. Fill the password from the user field. Notice that both fields get filled (as expected).\r\n\r\n**Expected behavior:**\r\n\r\nThe password manager fills both fields\r\n\r\n**Actual behavior:**\r\n\r\nThe password manager only fills the user field.\r\n\r\n### Is this a regression?\r\n\r\nDon't know.\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 0.79.0\r\n- Python version: 3.9.2\r\n- Using Conda? PipEnv? PyEnv? Pex? => Conda\r\n- OS version: macOS 10.15.7\r\n- Browser version: Tested with Safari 14.0.3 and Chrome 89.0.4389.90\r\n\r\n\r\n\r\nIs there a workaround to remove this HTML property? Or does it required a code change within Streamlit?\r\nFor my tool the user needs to provide a API authentication for the backend (username/password), which will be stored in a password manager (1Password 7) in this case. It is inconvenient having to copy the password manually.\r\n\r\nThank you!\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.TextArea_pb2 import TextArea as TextAreaProto\nfrom streamlit.proto.TextInput_pb2 import TextInput as TextInputProto\nfrom streamlit.state.widgets import register_widget\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass TextWidgetsMixin:\n def text_input(\n self,\n label,\n value=\"\",\n max_chars=None,\n key=None,\n type=\"default\",\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a single-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n max_chars : int or None\n Max number of characters allowed in text input.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n type : str\n The type of the text input. This can be either \"default\" (for\n a regular text input), or \"password\" (for a text input that\n masks the user's typed value). Defaults to \"default\".\n help : str\n An optional tooltip that gets displayed next to the input.\n on_change : callable\n An optional callback invoked when this text_input's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> title = st.text_input('Movie title', 'Life of Brian')\n >>> st.write('The current movie title is', title)\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if value == \"\" else value, key=key)\n\n text_input_proto = TextInputProto()\n text_input_proto.label = label\n text_input_proto.default = str(value)\n text_input_proto.form_id = current_form_id(self.dg)\n if help is not None:\n text_input_proto.help = help\n\n if max_chars is not None:\n text_input_proto.max_chars = max_chars\n\n if type == \"default\":\n text_input_proto.type = TextInputProto.DEFAULT\n elif type == \"password\":\n text_input_proto.type = TextInputProto.PASSWORD\n else:\n raise StreamlitAPIException(\n \"'%s' is not a valid text_input type. Valid types are 'default' and 'password'.\"\n % type\n )\n\n def deserialize_text_input(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n\n current_value, set_frontend_value = register_widget(\n \"text_input\",\n text_input_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_text_input,\n serializer=lambda x: x,\n )\n\n if set_frontend_value:\n text_input_proto.value = current_value\n text_input_proto.set_value = True\n\n self.dg._enqueue(\"text_input\", text_input_proto)\n return current_value\n\n def text_area(\n self,\n label,\n value=\"\",\n height=None,\n max_chars=None,\n key=None,\n help=None,\n on_change=None,\n args=None,\n kwargs=None,\n ):\n \"\"\"Display a multi-line text input widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this input is for.\n value : any\n The text value of this widget when it first renders. This will be\n cast to str internally.\n height : int or None\n Desired height of the UI element expressed in pixels. If None, a\n default height is used.\n max_chars : int or None\n Maximum number of characters allowed in text area.\n key : str\n An optional string to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the textarea.\n on_change : callable\n An optional callback invoked when this text_area's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n\n Returns\n -------\n str\n The current value of the text input widget.\n\n Example\n -------\n >>> txt = st.text_area('Text to analyze', '''\n ... It was the best of times, it was the worst of times, it was\n ... the age of wisdom, it was the age of foolishness, it was\n ... the epoch of belief, it was the epoch of incredulity, it\n ... was the season of Light, it was the season of Darkness, it\n ... was the spring of hope, it was the winter of despair, (...)\n ... ''')\n >>> st.write('Sentiment:', run_sentiment_analysis(txt))\n\n \"\"\"\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if value == \"\" else value, key=key)\n\n text_area_proto = TextAreaProto()\n text_area_proto.label = label\n text_area_proto.default = str(value)\n text_area_proto.form_id = current_form_id(self.dg)\n if help is not None:\n text_area_proto.help = help\n\n if height is not None:\n text_area_proto.height = height\n\n if max_chars is not None:\n text_area_proto.max_chars = max_chars\n\n def deserialize_text_area(ui_value) -> str:\n return str(ui_value if ui_value is not None else value)\n\n current_value, set_frontend_value = register_widget(\n \"text_area\",\n text_area_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_text_area,\n serializer=lambda x: x,\n )\n\n if set_frontend_value:\n text_area_proto.value = current_value\n text_area_proto.set_value = True\n\n self.dg._enqueue(\"text_area\", text_area_proto)\n return current_value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/text_widgets.py"}]} | 3,147 | 346 |
gh_patches_debug_29113 | rasdani/github-patches | git_diff | oppia__oppia-8108 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CircleCi tests are failing unexpectedly while downloading node modules
Screenshot:

Link:
https://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link
CircleCi tests are failing unexpectedly while downloading node modules
Screenshot:

Link:
https://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link
</issue>
<code>
[start of scripts/setup.py]
1 # Copyright 2019 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the 'License');
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an 'AS-IS' BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Python execution environent set up for all scripts."""
16
17 from __future__ import absolute_import # pylint: disable=import-only-modules
18 from __future__ import unicode_literals # pylint: disable=import-only-modules
19
20 import argparse
21 import os
22 import sys
23 import tarfile
24
25 import python_utils
26
27 from . import clean
28 from . import common
29
30 _PARSER = argparse.ArgumentParser(description="""
31 Python execution environent set up for all scripts.
32 """)
33
34
35 def create_directory(directory_path):
36 """Creates a new directory. Does not do anything if directory already
37 exists.
38
39 Args:
40 directory_path: str. Directory path to be created.
41 """
42 if os.path.exists(directory_path):
43 return
44 os.makedirs(directory_path)
45
46
47 # This function takes a command for python as its only input.
48 # It checks this input for a specific version of python and returns false
49 # if it does not match the expected prefix.
50 def test_python_version():
51 running_python_version = '{0[0]}.{0[1]}'.format(sys.version_info)
52 if running_python_version != '2.7':
53 python_utils.PRINT('Please use Python2.7. Exiting...')
54 # If OS is Windows, print helpful error message about adding Python to
55 # path.
56 os_info = os.uname()
57 if os_info[0] != 'Darwin' and os_info[0] != 'Linux':
58 common.print_each_string_after_two_new_lines([
59 'It looks like you are using Windows. If you have Python '
60 'installed,',
61 'make sure it is in your PATH and that PYTHONPATH is set.',
62 'If you have two versions of Python (ie, Python 2.7 and 3), '
63 'specify 2.7 before other versions of Python when setting the '
64 'PATH.',
65 'Here are some helpful articles:',
66 'http://docs.python-guide.org/en/latest/starting/install/win/',
67 'https://stackoverflow.com/questions/3701646/how-to-add-to-the-'
68 'pythonpath-in-windows-7'])
69 # Exit when no suitable Python environment can be found.
70 raise Exception
71
72
73 def download_and_install_package(url_to_retrieve, filename):
74 """Downloads and installs package in Oppia tools directory.
75
76 Args:
77 url_to_retrieve: string. The url from which package is to be
78 downloaded.
79 filename: string. The name of the tar file.
80 """
81 python_utils.url_retrieve(url_to_retrieve, filename=filename)
82 tar = tarfile.open(name=filename)
83 tar.extractall(path=common.OPPIA_TOOLS_DIR)
84 tar.close()
85 os.remove(filename)
86
87
88 def main(args=None):
89 """Runs the script to setup Oppia."""
90 unused_parsed_args = _PARSER.parse_args(args=args)
91 test_python_version()
92
93 # The second option allows this script to also be run from deployment
94 # folders.
95 if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(
96 'deploy-'):
97 python_utils.PRINT('')
98 python_utils.PRINT(
99 'WARNING This script should be run from the oppia/ root folder.')
100 python_utils.PRINT('')
101 raise Exception
102
103 # Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This
104 # is necessary becaue COMMON_DIR (or subsequent variables which refer to it)
105 # may use it in a situation where relative paths won't work as expected(such
106 # as $PYTHONPATH).
107 create_directory(common.OPPIA_TOOLS_DIR)
108 create_directory(common.THIRD_PARTY_DIR)
109 create_directory(common.NODE_MODULES_PATH)
110
111 os_info = os.uname()
112 if os_info[0] != 'Darwin' and os_info[0] != 'Linux':
113 # Node is a requirement for all installation scripts. Here, we check if
114 # the OS supports node.js installation; if not, we exit with an error.
115 common.print_each_string_after_two_new_lines([
116 'WARNING: Unsupported OS for installation of node.js.',
117 'If you are running this script on Windows, see the instructions',
118 'here regarding installation of node.js:',
119 'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Windows'
120 '%29',
121 'STATUS: Installation completed except for node.js. Exiting.'])
122 raise Exception
123
124 # Download and install node.js.
125 python_utils.PRINT(
126 'Checking if node.js is installed in %s' % common.OPPIA_TOOLS_DIR)
127 if not os.path.exists(common.NODE_PATH):
128 python_utils.PRINT('Installing Node.js')
129 if os_info[0] == 'Darwin':
130 if os_info[4] == 'x86_64':
131 node_file_name = 'node-v10.15.3-darwin-x64'
132 else:
133 node_file_name = 'node-v10.15.3-darwin-x86'
134 elif os_info[0] == 'Linux':
135 if os_info[4] == 'x86_64':
136 node_file_name = 'node-v10.15.3-linux-x64'
137 else:
138 node_file_name = 'node-v10.15.3-linux-x86'
139
140 download_and_install_package(
141 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,
142 'node-download.tgz')
143 os.rename(
144 os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),
145 common.NODE_PATH)
146
147 # Change ownership of node_modules.
148 # Note: on some machines, these commands seem to take quite a long time.
149 common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)
150 common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)
151
152 # Download and install yarn.
153 python_utils.PRINT(
154 'Checking if yarn is installed in %s' % common.OPPIA_TOOLS_DIR)
155 if not os.path.exists(common.YARN_PATH):
156 python_utils.PRINT('Removing package-lock.json')
157 clean.delete_file('package-lock.json')
158 common.print_each_string_after_two_new_lines([
159 'Installing yarn',
160 'WARNING: Please note that Oppia uses Yarn to manage node packages',
161 'do *NOT* use npm. For more information on how to use yarn,',
162 'visit https://yarnpkg.com/en/docs/usage.'])
163
164 # NB: Update .yarnrc if the yarn version below is changed.
165 yarn_version = 'v1.17.3'
166 yarn_file_name = 'yarn-%s.tar.gz' % yarn_version
167 download_and_install_package(
168 'https://github.com/yarnpkg/yarn/releases/download/%s/%s'
169 % (yarn_version, yarn_file_name), yarn_file_name)
170
171 # Adjust path to support the default Chrome locations for Unix, Windows and
172 # Mac OS.
173 if os.environ.get('TRAVIS'):
174 chrome_bin = '/usr/bin/chromium-browser'
175 elif os.path.isfile('/usr/bin/google-chrome'):
176 # Unix.
177 chrome_bin = '/usr/bin/google-chrome'
178 elif os.path.isfile('/usr/bin/chromium-browser'):
179 # Unix.
180 chrome_bin = '/usr/bin/chromium-browser'
181 elif os.path.isfile(
182 '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
183 # Windows.
184 chrome_bin = (
185 '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
186 elif os.path.isfile(
187 '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):
188 # WSL.
189 chrome_bin = (
190 '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')
191 elif os.path.isfile(
192 '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):
193 # Mac OS.
194 chrome_bin = (
195 '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')
196 else:
197 python_utils.PRINT('Chrome is not found, stopping ...')
198 raise Exception
199
200 os.environ['CHROME_BIN'] = chrome_bin
201 python_utils.PRINT('Environment setup completed.')
202
203
204 # The 'no coverage' pragma is used as this line is un-testable. This is because
205 # it will only be called when setup.py is used as a script.
206 if __name__ == '__main__': # pragma: no cover
207 main()
208
[end of scripts/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/setup.py b/scripts/setup.py
--- a/scripts/setup.py
+++ b/scripts/setup.py
@@ -19,6 +19,7 @@
import argparse
import os
+import subprocess
import sys
import tarfile
@@ -130,12 +131,12 @@
if os_info[4] == 'x86_64':
node_file_name = 'node-v10.15.3-darwin-x64'
else:
- node_file_name = 'node-v10.15.3-darwin-x86'
+ node_file_name = 'node-v10.15.3'
elif os_info[0] == 'Linux':
if os_info[4] == 'x86_64':
node_file_name = 'node-v10.15.3-linux-x64'
else:
- node_file_name = 'node-v10.15.3-linux-x86'
+ node_file_name = 'node-v10.15.3'
download_and_install_package(
'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,
@@ -144,6 +145,11 @@
os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),
common.NODE_PATH)
+ if node_file_name == 'node-v10.15.3':
+ with common.CD(common.NODE_PATH):
+ subprocess.check_call(['./configure'])
+ subprocess.check_call(['make'])
+
# Change ownership of node_modules.
# Note: on some machines, these commands seem to take quite a long time.
common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)
| {"golden_diff": "diff --git a/scripts/setup.py b/scripts/setup.py\n--- a/scripts/setup.py\n+++ b/scripts/setup.py\n@@ -19,6 +19,7 @@\n \n import argparse\n import os\n+import subprocess\n import sys\n import tarfile\n \n@@ -130,12 +131,12 @@\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-darwin-x64'\n else:\n- node_file_name = 'node-v10.15.3-darwin-x86'\n+ node_file_name = 'node-v10.15.3'\n elif os_info[0] == 'Linux':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-linux-x64'\n else:\n- node_file_name = 'node-v10.15.3-linux-x86'\n+ node_file_name = 'node-v10.15.3'\n \n download_and_install_package(\n 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,\n@@ -144,6 +145,11 @@\n os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),\n common.NODE_PATH)\n \n+ if node_file_name == 'node-v10.15.3':\n+ with common.CD(common.NODE_PATH):\n+ subprocess.check_call(['./configure'])\n+ subprocess.check_call(['make'])\n+\n # Change ownership of node_modules.\n # Note: on some machines, these commands seem to take quite a long time.\n common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)\n", "issue": "CircleCi tests are failing unexpectedly while downloading node modules\nScreenshot:\r\n\r\n\r\nLink:\r\nhttps://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link\r\n\nCircleCi tests are failing unexpectedly while downloading node modules\nScreenshot:\r\n\r\n\r\nLink:\r\nhttps://circleci.com/gh/oppia/oppia/22306?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-link\r\n\n", "before_files": [{"content": "# Copyright 2019 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the 'License');\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an 'AS-IS' BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Python execution environent set up for all scripts.\"\"\"\n\nfrom __future__ import absolute_import # pylint: disable=import-only-modules\nfrom __future__ import unicode_literals # pylint: disable=import-only-modules\n\nimport argparse\nimport os\nimport sys\nimport tarfile\n\nimport python_utils\n\nfrom . import clean\nfrom . import common\n\n_PARSER = argparse.ArgumentParser(description=\"\"\"\nPython execution environent set up for all scripts.\n\"\"\")\n\n\ndef create_directory(directory_path):\n \"\"\"Creates a new directory. Does not do anything if directory already\n exists.\n\n Args:\n directory_path: str. Directory path to be created.\n \"\"\"\n if os.path.exists(directory_path):\n return\n os.makedirs(directory_path)\n\n\n# This function takes a command for python as its only input.\n# It checks this input for a specific version of python and returns false\n# if it does not match the expected prefix.\ndef test_python_version():\n running_python_version = '{0[0]}.{0[1]}'.format(sys.version_info)\n if running_python_version != '2.7':\n python_utils.PRINT('Please use Python2.7. Exiting...')\n # If OS is Windows, print helpful error message about adding Python to\n # path.\n os_info = os.uname()\n if os_info[0] != 'Darwin' and os_info[0] != 'Linux':\n common.print_each_string_after_two_new_lines([\n 'It looks like you are using Windows. If you have Python '\n 'installed,',\n 'make sure it is in your PATH and that PYTHONPATH is set.',\n 'If you have two versions of Python (ie, Python 2.7 and 3), '\n 'specify 2.7 before other versions of Python when setting the '\n 'PATH.',\n 'Here are some helpful articles:',\n 'http://docs.python-guide.org/en/latest/starting/install/win/',\n 'https://stackoverflow.com/questions/3701646/how-to-add-to-the-'\n 'pythonpath-in-windows-7'])\n # Exit when no suitable Python environment can be found.\n raise Exception\n\n\ndef download_and_install_package(url_to_retrieve, filename):\n \"\"\"Downloads and installs package in Oppia tools directory.\n\n Args:\n url_to_retrieve: string. The url from which package is to be\n downloaded.\n filename: string. The name of the tar file.\n \"\"\"\n python_utils.url_retrieve(url_to_retrieve, filename=filename)\n tar = tarfile.open(name=filename)\n tar.extractall(path=common.OPPIA_TOOLS_DIR)\n tar.close()\n os.remove(filename)\n\n\ndef main(args=None):\n \"\"\"Runs the script to setup Oppia.\"\"\"\n unused_parsed_args = _PARSER.parse_args(args=args)\n test_python_version()\n\n # The second option allows this script to also be run from deployment\n # folders.\n if not os.getcwd().endswith('oppia') and not os.getcwd().endswith(\n 'deploy-'):\n python_utils.PRINT('')\n python_utils.PRINT(\n 'WARNING This script should be run from the oppia/ root folder.')\n python_utils.PRINT('')\n raise Exception\n\n # Set COMMON_DIR to the absolute path of the directory above OPPIA_DIR. This\n # is necessary becaue COMMON_DIR (or subsequent variables which refer to it)\n # may use it in a situation where relative paths won't work as expected(such\n # as $PYTHONPATH).\n create_directory(common.OPPIA_TOOLS_DIR)\n create_directory(common.THIRD_PARTY_DIR)\n create_directory(common.NODE_MODULES_PATH)\n\n os_info = os.uname()\n if os_info[0] != 'Darwin' and os_info[0] != 'Linux':\n # Node is a requirement for all installation scripts. Here, we check if\n # the OS supports node.js installation; if not, we exit with an error.\n common.print_each_string_after_two_new_lines([\n 'WARNING: Unsupported OS for installation of node.js.',\n 'If you are running this script on Windows, see the instructions',\n 'here regarding installation of node.js:',\n 'https://github.com/oppia/oppia/wiki/Installing-Oppia-%28Windows'\n '%29',\n 'STATUS: Installation completed except for node.js. Exiting.'])\n raise Exception\n\n # Download and install node.js.\n python_utils.PRINT(\n 'Checking if node.js is installed in %s' % common.OPPIA_TOOLS_DIR)\n if not os.path.exists(common.NODE_PATH):\n python_utils.PRINT('Installing Node.js')\n if os_info[0] == 'Darwin':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-darwin-x64'\n else:\n node_file_name = 'node-v10.15.3-darwin-x86'\n elif os_info[0] == 'Linux':\n if os_info[4] == 'x86_64':\n node_file_name = 'node-v10.15.3-linux-x64'\n else:\n node_file_name = 'node-v10.15.3-linux-x86'\n\n download_and_install_package(\n 'https://nodejs.org/dist/v10.15.3/%s.tar.gz' % node_file_name,\n 'node-download.tgz')\n os.rename(\n os.path.join(common.OPPIA_TOOLS_DIR, node_file_name),\n common.NODE_PATH)\n\n # Change ownership of node_modules.\n # Note: on some machines, these commands seem to take quite a long time.\n common.recursive_chown(common.NODE_MODULES_PATH, os.getuid(), -1)\n common.recursive_chmod(common.NODE_MODULES_PATH, 0o744)\n\n # Download and install yarn.\n python_utils.PRINT(\n 'Checking if yarn is installed in %s' % common.OPPIA_TOOLS_DIR)\n if not os.path.exists(common.YARN_PATH):\n python_utils.PRINT('Removing package-lock.json')\n clean.delete_file('package-lock.json')\n common.print_each_string_after_two_new_lines([\n 'Installing yarn',\n 'WARNING: Please note that Oppia uses Yarn to manage node packages',\n 'do *NOT* use npm. For more information on how to use yarn,',\n 'visit https://yarnpkg.com/en/docs/usage.'])\n\n # NB: Update .yarnrc if the yarn version below is changed.\n yarn_version = 'v1.17.3'\n yarn_file_name = 'yarn-%s.tar.gz' % yarn_version\n download_and_install_package(\n 'https://github.com/yarnpkg/yarn/releases/download/%s/%s'\n % (yarn_version, yarn_file_name), yarn_file_name)\n\n # Adjust path to support the default Chrome locations for Unix, Windows and\n # Mac OS.\n if os.environ.get('TRAVIS'):\n chrome_bin = '/usr/bin/chromium-browser'\n elif os.path.isfile('/usr/bin/google-chrome'):\n # Unix.\n chrome_bin = '/usr/bin/google-chrome'\n elif os.path.isfile('/usr/bin/chromium-browser'):\n # Unix.\n chrome_bin = '/usr/bin/chromium-browser'\n elif os.path.isfile(\n '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):\n # Windows.\n chrome_bin = (\n '/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')\n elif os.path.isfile(\n '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe'):\n # WSL.\n chrome_bin = (\n '/mnt/c/Program Files (x86)/Google/Chrome/Application/chrome.exe')\n elif os.path.isfile(\n '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'):\n # Mac OS.\n chrome_bin = (\n '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome')\n else:\n python_utils.PRINT('Chrome is not found, stopping ...')\n raise Exception\n\n os.environ['CHROME_BIN'] = chrome_bin\n python_utils.PRINT('Environment setup completed.')\n\n\n# The 'no coverage' pragma is used as this line is un-testable. This is because\n# it will only be called when setup.py is used as a script.\nif __name__ == '__main__': # pragma: no cover\n main()\n", "path": "scripts/setup.py"}]} | 3,291 | 399 |
gh_patches_debug_3537 | rasdani/github-patches | git_diff | ethereum__web3.py-1334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Python `collections` Deprecation warnings
* Python: 3.7 and below
### What was wrong?
Python 3.8 is changing the way imports from `collections` are being handled. The following Deprecation warning describes the issue:
`DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working`
### How can it be fixed?
Go through the codebase and swap out any `collections` imports for `collections.abc`
</issue>
<code>
[start of web3/_utils/formatters.py]
1 from collections import (
2 Iterable,
3 Mapping,
4 )
5
6 from eth_utils import (
7 is_dict,
8 is_list_like,
9 is_string,
10 to_dict,
11 to_list,
12 )
13
14 from web3._utils.decorators import (
15 reject_recursive_repeats,
16 )
17 from web3._utils.toolz import (
18 compose,
19 curry,
20 dissoc,
21 )
22
23
24 def hex_to_integer(value):
25 return int(value, 16)
26
27
28 integer_to_hex = hex
29
30
31 @curry
32 @to_list
33 def apply_formatter_at_index(formatter, at_index, value):
34 if at_index + 1 > len(value):
35 raise IndexError(
36 "Not enough values in iterable to apply formatter. Got: {0}. "
37 "Need: {1}".format(len(value), at_index + 1)
38 )
39 for index, item in enumerate(value):
40 if index == at_index:
41 yield formatter(item)
42 else:
43 yield item
44
45
46 def apply_formatters_to_args(*formatters):
47 return compose(*(
48 apply_formatter_at_index(formatter, index)
49 for index, formatter
50 in enumerate(formatters)
51 ))
52
53
54 @curry
55 def apply_formatter_if(condition, formatter, value):
56 if condition(value):
57 return formatter(value)
58 else:
59 return value
60
61
62 @curry
63 @to_dict
64 def apply_formatters_to_dict(formatters, value):
65 for key, item in value.items():
66 if key in formatters:
67 try:
68 yield key, formatters[key](item)
69 except (TypeError, ValueError) as exc:
70 raise type(exc)("Could not format value %r as field %r" % (item, key)) from exc
71 else:
72 yield key, item
73
74
75 @curry
76 @to_list
77 def apply_formatter_to_array(formatter, value):
78 for item in value:
79 yield formatter(item)
80
81
82 @curry
83 def apply_one_of_formatters(formatter_condition_pairs, value):
84 for formatter, condition in formatter_condition_pairs:
85 if condition(value):
86 return formatter(value)
87 else:
88 raise ValueError("The provided value did not satisfy any of the formatter conditions")
89
90
91 def map_collection(func, collection):
92 """
93 Apply func to each element of a collection, or value of a dictionary.
94 If the value is not a collection, return it unmodified
95 """
96 datatype = type(collection)
97 if isinstance(collection, Mapping):
98 return datatype((key, func(val)) for key, val in collection.items())
99 if is_string(collection):
100 return collection
101 elif isinstance(collection, Iterable):
102 return datatype(map(func, collection))
103 else:
104 return collection
105
106
107 @reject_recursive_repeats
108 def recursive_map(func, data):
109 """
110 Apply func to data, and any collection items inside data (using map_collection).
111 Define func so that it only applies to the type of value that you want it to apply to.
112 """
113 def recurse(item):
114 return recursive_map(func, item)
115 items_mapped = map_collection(recurse, data)
116 return func(items_mapped)
117
118
119 def static_return(value):
120 def inner(*args, **kwargs):
121 return value
122 return inner
123
124
125 def static_result(value):
126 def inner(*args, **kwargs):
127 return {'result': value}
128 return inner
129
130
131 @curry
132 @to_dict
133 def apply_key_map(key_mappings, value):
134 for key, item in value.items():
135 if key in key_mappings:
136 yield key_mappings[key], item
137 else:
138 yield key, item
139
140
141 def is_array_of_strings(value):
142 if not is_list_like(value):
143 return False
144 return all((is_string(item) for item in value))
145
146
147 def is_array_of_dicts(value):
148 if not is_list_like(value):
149 return False
150 return all((is_dict(item) for item in value))
151
152
153 @curry
154 def remove_key_if(key, remove_if, input_dict):
155 if key in input_dict and remove_if(input_dict):
156 return dissoc(input_dict, key)
157 else:
158 return input_dict
159
[end of web3/_utils/formatters.py]
[start of web3/datastructures.py]
1 from collections import (
2 Hashable,
3 Mapping,
4 MutableMapping,
5 OrderedDict,
6 Sequence,
7 )
8
9 from eth_utils import (
10 is_integer,
11 )
12
13 from web3._utils.formatters import (
14 recursive_map,
15 )
16
17 # Hashable must be immutable:
18 # "the implementation of hashable collections requires that a key's hash value is immutable"
19 # https://docs.python.org/3/reference/datamodel.html#object.__hash__
20
21
22 class ReadableAttributeDict(Mapping):
23 """
24 The read attributes for the AttributeDict types
25 """
26
27 def __init__(self, dictionary, *args, **kwargs):
28 self.__dict__ = dict(dictionary)
29 self.__dict__.update(dict(*args, **kwargs))
30
31 def __getitem__(self, key):
32 return self.__dict__[key]
33
34 def __iter__(self):
35 return iter(self.__dict__)
36
37 def __len__(self):
38 return len(self.__dict__)
39
40 def __repr__(self):
41 return self.__class__.__name__ + "(%r)" % self.__dict__
42
43 def _repr_pretty_(self, builder, cycle):
44 """
45 Custom pretty output for the IPython console
46 """
47 builder.text(self.__class__.__name__ + "(")
48 if cycle:
49 builder.text("<cycle>")
50 else:
51 builder.pretty(self.__dict__)
52 builder.text(")")
53
54 @classmethod
55 def _apply_if_mapping(cls, value):
56 if isinstance(value, Mapping):
57 return cls(value)
58 else:
59 return value
60
61 @classmethod
62 def recursive(cls, value):
63 return recursive_map(cls._apply_if_mapping, value)
64
65
66 class MutableAttributeDict(MutableMapping, ReadableAttributeDict):
67
68 def __setitem__(self, key, val):
69 self.__dict__[key] = val
70
71 def __delitem__(self, key):
72 del self.__dict__[key]
73
74
75 class AttributeDict(ReadableAttributeDict, Hashable):
76 """
77 This provides superficial immutability, someone could hack around it
78 """
79
80 def __setattr__(self, attr, val):
81 if attr == '__dict__':
82 super().__setattr__(attr, val)
83 else:
84 raise TypeError('This data is immutable -- create a copy instead of modifying')
85
86 def __delattr__(self, key):
87 raise TypeError('This data is immutable -- create a copy instead of modifying')
88
89 def __hash__(self):
90 return hash(tuple(sorted(self.items())))
91
92 def __eq__(self, other):
93 if isinstance(other, Mapping):
94 return self.__dict__ == dict(other)
95 else:
96 return False
97
98
99 class NamedElementOnion(Mapping):
100 """
101 Add layers to an onion-shaped structure. Optionally, inject to a specific layer.
102 This structure is iterable, where the outermost layer is first, and innermost is last.
103 """
104
105 def __init__(self, init_elements, valid_element=callable):
106 self._queue = OrderedDict()
107 for element in reversed(init_elements):
108 if valid_element(element):
109 self.add(element)
110 else:
111 self.add(*element)
112
113 def add(self, element, name=None):
114 if name is None:
115 name = element
116
117 if name in self._queue:
118 if name is element:
119 raise ValueError("You can't add the same un-named instance twice")
120 else:
121 raise ValueError("You can't add the same name again, use replace instead")
122
123 self._queue[name] = element
124
125 def inject(self, element, name=None, layer=None):
126 """
127 Inject a named element to an arbitrary layer in the onion.
128
129 The current implementation only supports insertion at the innermost layer,
130 or at the outermost layer. Note that inserting to the outermost is equivalent
131 to calling :meth:`add` .
132 """
133 if not is_integer(layer):
134 raise TypeError("The layer for insertion must be an int.")
135 elif layer != 0 and layer != len(self._queue):
136 raise NotImplementedError(
137 "You can only insert to the beginning or end of a %s, currently. "
138 "You tried to insert to %d, but only 0 and %d are permitted. " % (
139 type(self),
140 layer,
141 len(self._queue),
142 )
143 )
144
145 self.add(element, name=name)
146
147 if layer == 0:
148 if name is None:
149 name = element
150 self._queue.move_to_end(name, last=False)
151 elif layer == len(self._queue):
152 return
153 else:
154 raise AssertionError("Impossible to reach: earlier validation raises an error")
155
156 def clear(self):
157 self._queue.clear()
158
159 def replace(self, old, new):
160 if old not in self._queue:
161 raise ValueError("You can't replace unless one already exists, use add instead")
162 to_be_replaced = self._queue[old]
163 if to_be_replaced is old:
164 # re-insert with new name in old slot
165 self._replace_with_new_name(old, new)
166 else:
167 self._queue[old] = new
168 return to_be_replaced
169
170 def remove(self, old):
171 if old not in self._queue:
172 raise ValueError("You can only remove something that has been added")
173 del self._queue[old]
174
175 def _replace_with_new_name(self, old, new):
176 self._queue[new] = new
177 found_old = False
178 for key in list(self._queue.keys()):
179 if not found_old:
180 if key == old:
181 found_old = True
182 continue
183 elif key != new:
184 self._queue.move_to_end(key)
185 del self._queue[old]
186
187 def __iter__(self):
188 elements = self._queue.values()
189 if not isinstance(elements, Sequence):
190 elements = list(elements)
191 return iter(reversed(elements))
192
193 def __add__(self, other):
194 if not isinstance(other, NamedElementOnion):
195 raise NotImplementedError("You can only combine with another NamedElementOnion")
196 combined = self._queue.copy()
197 combined.update(other._queue)
198 return NamedElementOnion(combined.items())
199
200 def __contains__(self, element):
201 return element in self._queue
202
203 def __getitem__(self, element):
204 return self._queue[element]
205
206 def __len__(self):
207 return len(self._queue)
208
209 def __reversed__(self):
210 elements = self._queue.values()
211 if not isinstance(elements, Sequence):
212 elements = list(elements)
213 return iter(elements)
214
[end of web3/datastructures.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/_utils/formatters.py b/web3/_utils/formatters.py
--- a/web3/_utils/formatters.py
+++ b/web3/_utils/formatters.py
@@ -1,4 +1,4 @@
-from collections import (
+from collections.abc import (
Iterable,
Mapping,
)
diff --git a/web3/datastructures.py b/web3/datastructures.py
--- a/web3/datastructures.py
+++ b/web3/datastructures.py
@@ -1,8 +1,10 @@
from collections import (
+ OrderedDict,
+)
+from collections.abc import (
Hashable,
Mapping,
MutableMapping,
- OrderedDict,
Sequence,
)
| {"golden_diff": "diff --git a/web3/_utils/formatters.py b/web3/_utils/formatters.py\n--- a/web3/_utils/formatters.py\n+++ b/web3/_utils/formatters.py\n@@ -1,4 +1,4 @@\n-from collections import (\n+from collections.abc import (\n Iterable,\n Mapping,\n )\ndiff --git a/web3/datastructures.py b/web3/datastructures.py\n--- a/web3/datastructures.py\n+++ b/web3/datastructures.py\n@@ -1,8 +1,10 @@\n from collections import (\n+ OrderedDict,\n+)\n+from collections.abc import (\n Hashable,\n Mapping,\n MutableMapping,\n- OrderedDict,\n Sequence,\n )\n", "issue": "Remove Python `collections` Deprecation warnings\n* Python: 3.7 and below\r\n\r\n### What was wrong?\r\nPython 3.8 is changing the way imports from `collections` are being handled. The following Deprecation warning describes the issue: \r\n`DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working`\r\n\r\n### How can it be fixed?\r\nGo through the codebase and swap out any `collections` imports for `collections.abc`\n", "before_files": [{"content": "from collections import (\n Iterable,\n Mapping,\n)\n\nfrom eth_utils import (\n is_dict,\n is_list_like,\n is_string,\n to_dict,\n to_list,\n)\n\nfrom web3._utils.decorators import (\n reject_recursive_repeats,\n)\nfrom web3._utils.toolz import (\n compose,\n curry,\n dissoc,\n)\n\n\ndef hex_to_integer(value):\n return int(value, 16)\n\n\ninteger_to_hex = hex\n\n\n@curry\n@to_list\ndef apply_formatter_at_index(formatter, at_index, value):\n if at_index + 1 > len(value):\n raise IndexError(\n \"Not enough values in iterable to apply formatter. Got: {0}. \"\n \"Need: {1}\".format(len(value), at_index + 1)\n )\n for index, item in enumerate(value):\n if index == at_index:\n yield formatter(item)\n else:\n yield item\n\n\ndef apply_formatters_to_args(*formatters):\n return compose(*(\n apply_formatter_at_index(formatter, index)\n for index, formatter\n in enumerate(formatters)\n ))\n\n\n@curry\ndef apply_formatter_if(condition, formatter, value):\n if condition(value):\n return formatter(value)\n else:\n return value\n\n\n@curry\n@to_dict\ndef apply_formatters_to_dict(formatters, value):\n for key, item in value.items():\n if key in formatters:\n try:\n yield key, formatters[key](item)\n except (TypeError, ValueError) as exc:\n raise type(exc)(\"Could not format value %r as field %r\" % (item, key)) from exc\n else:\n yield key, item\n\n\n@curry\n@to_list\ndef apply_formatter_to_array(formatter, value):\n for item in value:\n yield formatter(item)\n\n\n@curry\ndef apply_one_of_formatters(formatter_condition_pairs, value):\n for formatter, condition in formatter_condition_pairs:\n if condition(value):\n return formatter(value)\n else:\n raise ValueError(\"The provided value did not satisfy any of the formatter conditions\")\n\n\ndef map_collection(func, collection):\n \"\"\"\n Apply func to each element of a collection, or value of a dictionary.\n If the value is not a collection, return it unmodified\n \"\"\"\n datatype = type(collection)\n if isinstance(collection, Mapping):\n return datatype((key, func(val)) for key, val in collection.items())\n if is_string(collection):\n return collection\n elif isinstance(collection, Iterable):\n return datatype(map(func, collection))\n else:\n return collection\n\n\n@reject_recursive_repeats\ndef recursive_map(func, data):\n \"\"\"\n Apply func to data, and any collection items inside data (using map_collection).\n Define func so that it only applies to the type of value that you want it to apply to.\n \"\"\"\n def recurse(item):\n return recursive_map(func, item)\n items_mapped = map_collection(recurse, data)\n return func(items_mapped)\n\n\ndef static_return(value):\n def inner(*args, **kwargs):\n return value\n return inner\n\n\ndef static_result(value):\n def inner(*args, **kwargs):\n return {'result': value}\n return inner\n\n\n@curry\n@to_dict\ndef apply_key_map(key_mappings, value):\n for key, item in value.items():\n if key in key_mappings:\n yield key_mappings[key], item\n else:\n yield key, item\n\n\ndef is_array_of_strings(value):\n if not is_list_like(value):\n return False\n return all((is_string(item) for item in value))\n\n\ndef is_array_of_dicts(value):\n if not is_list_like(value):\n return False\n return all((is_dict(item) for item in value))\n\n\n@curry\ndef remove_key_if(key, remove_if, input_dict):\n if key in input_dict and remove_if(input_dict):\n return dissoc(input_dict, key)\n else:\n return input_dict\n", "path": "web3/_utils/formatters.py"}, {"content": "from collections import (\n Hashable,\n Mapping,\n MutableMapping,\n OrderedDict,\n Sequence,\n)\n\nfrom eth_utils import (\n is_integer,\n)\n\nfrom web3._utils.formatters import (\n recursive_map,\n)\n\n# Hashable must be immutable:\n# \"the implementation of hashable collections requires that a key's hash value is immutable\"\n# https://docs.python.org/3/reference/datamodel.html#object.__hash__\n\n\nclass ReadableAttributeDict(Mapping):\n \"\"\"\n The read attributes for the AttributeDict types\n \"\"\"\n\n def __init__(self, dictionary, *args, **kwargs):\n self.__dict__ = dict(dictionary)\n self.__dict__.update(dict(*args, **kwargs))\n\n def __getitem__(self, key):\n return self.__dict__[key]\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def __len__(self):\n return len(self.__dict__)\n\n def __repr__(self):\n return self.__class__.__name__ + \"(%r)\" % self.__dict__\n\n def _repr_pretty_(self, builder, cycle):\n \"\"\"\n Custom pretty output for the IPython console\n \"\"\"\n builder.text(self.__class__.__name__ + \"(\")\n if cycle:\n builder.text(\"<cycle>\")\n else:\n builder.pretty(self.__dict__)\n builder.text(\")\")\n\n @classmethod\n def _apply_if_mapping(cls, value):\n if isinstance(value, Mapping):\n return cls(value)\n else:\n return value\n\n @classmethod\n def recursive(cls, value):\n return recursive_map(cls._apply_if_mapping, value)\n\n\nclass MutableAttributeDict(MutableMapping, ReadableAttributeDict):\n\n def __setitem__(self, key, val):\n self.__dict__[key] = val\n\n def __delitem__(self, key):\n del self.__dict__[key]\n\n\nclass AttributeDict(ReadableAttributeDict, Hashable):\n \"\"\"\n This provides superficial immutability, someone could hack around it\n \"\"\"\n\n def __setattr__(self, attr, val):\n if attr == '__dict__':\n super().__setattr__(attr, val)\n else:\n raise TypeError('This data is immutable -- create a copy instead of modifying')\n\n def __delattr__(self, key):\n raise TypeError('This data is immutable -- create a copy instead of modifying')\n\n def __hash__(self):\n return hash(tuple(sorted(self.items())))\n\n def __eq__(self, other):\n if isinstance(other, Mapping):\n return self.__dict__ == dict(other)\n else:\n return False\n\n\nclass NamedElementOnion(Mapping):\n \"\"\"\n Add layers to an onion-shaped structure. Optionally, inject to a specific layer.\n This structure is iterable, where the outermost layer is first, and innermost is last.\n \"\"\"\n\n def __init__(self, init_elements, valid_element=callable):\n self._queue = OrderedDict()\n for element in reversed(init_elements):\n if valid_element(element):\n self.add(element)\n else:\n self.add(*element)\n\n def add(self, element, name=None):\n if name is None:\n name = element\n\n if name in self._queue:\n if name is element:\n raise ValueError(\"You can't add the same un-named instance twice\")\n else:\n raise ValueError(\"You can't add the same name again, use replace instead\")\n\n self._queue[name] = element\n\n def inject(self, element, name=None, layer=None):\n \"\"\"\n Inject a named element to an arbitrary layer in the onion.\n\n The current implementation only supports insertion at the innermost layer,\n or at the outermost layer. Note that inserting to the outermost is equivalent\n to calling :meth:`add` .\n \"\"\"\n if not is_integer(layer):\n raise TypeError(\"The layer for insertion must be an int.\")\n elif layer != 0 and layer != len(self._queue):\n raise NotImplementedError(\n \"You can only insert to the beginning or end of a %s, currently. \"\n \"You tried to insert to %d, but only 0 and %d are permitted. \" % (\n type(self),\n layer,\n len(self._queue),\n )\n )\n\n self.add(element, name=name)\n\n if layer == 0:\n if name is None:\n name = element\n self._queue.move_to_end(name, last=False)\n elif layer == len(self._queue):\n return\n else:\n raise AssertionError(\"Impossible to reach: earlier validation raises an error\")\n\n def clear(self):\n self._queue.clear()\n\n def replace(self, old, new):\n if old not in self._queue:\n raise ValueError(\"You can't replace unless one already exists, use add instead\")\n to_be_replaced = self._queue[old]\n if to_be_replaced is old:\n # re-insert with new name in old slot\n self._replace_with_new_name(old, new)\n else:\n self._queue[old] = new\n return to_be_replaced\n\n def remove(self, old):\n if old not in self._queue:\n raise ValueError(\"You can only remove something that has been added\")\n del self._queue[old]\n\n def _replace_with_new_name(self, old, new):\n self._queue[new] = new\n found_old = False\n for key in list(self._queue.keys()):\n if not found_old:\n if key == old:\n found_old = True\n continue\n elif key != new:\n self._queue.move_to_end(key)\n del self._queue[old]\n\n def __iter__(self):\n elements = self._queue.values()\n if not isinstance(elements, Sequence):\n elements = list(elements)\n return iter(reversed(elements))\n\n def __add__(self, other):\n if not isinstance(other, NamedElementOnion):\n raise NotImplementedError(\"You can only combine with another NamedElementOnion\")\n combined = self._queue.copy()\n combined.update(other._queue)\n return NamedElementOnion(combined.items())\n\n def __contains__(self, element):\n return element in self._queue\n\n def __getitem__(self, element):\n return self._queue[element]\n\n def __len__(self):\n return len(self._queue)\n\n def __reversed__(self):\n elements = self._queue.values()\n if not isinstance(elements, Sequence):\n elements = list(elements)\n return iter(elements)\n", "path": "web3/datastructures.py"}]} | 3,847 | 147 |
gh_patches_debug_37508 | rasdani/github-patches | git_diff | optuna__optuna-4973 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TensorBoard integration fails if None is suggested
### Expected behavior
`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.
### Environment
- Optuna version:3.2.0
- Python version:3.9.13
- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31
### Error messages, stack traces, or logs
```shell
File /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)
505 raise ValueError("Empty domain with no dtype specified")
506 if dtype not in (int, float, bool, str):
--> 507 raise ValueError("Unknown dtype: %r" % (dtype,))
508 self._dtype = dtype
509 for value in self._values:
ValueError: Unknown dtype: <class 'NoneType'>
```
```
### Steps to reproduce
1. If I run this
```python
import optuna
from optuna.integration.tensorboard import TensorBoardCallback
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
X, y = make_classification(n_samples=1000, n_features=4,
n_informative=2, n_redundant=0,
random_state=0, shuffle=False)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)
def objective(trial):
params = {
'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),
}
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_val_pred = clf.predict(X_val)
f1 = f1_score(y_val, y_val_pred)
return f1
tensorboard_callback = TensorBoardCallback("logs/", metric_name="f1")
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])
```
It would return an error above.
2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.
### Additional context (optional)
_No response_
TensorBoard integration fails if None is suggested
### Expected behavior
`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.
### Environment
- Optuna version:3.2.0
- Python version:3.9.13
- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31
### Error messages, stack traces, or logs
```shell
File /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)
505 raise ValueError("Empty domain with no dtype specified")
506 if dtype not in (int, float, bool, str):
--> 507 raise ValueError("Unknown dtype: %r" % (dtype,))
508 self._dtype = dtype
509 for value in self._values:
ValueError: Unknown dtype: <class 'NoneType'>
```
```
### Steps to reproduce
1. If I run this
```python
import optuna
from optuna.integration.tensorboard import TensorBoardCallback
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
X, y = make_classification(n_samples=1000, n_features=4,
n_informative=2, n_redundant=0,
random_state=0, shuffle=False)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)
def objective(trial):
params = {
'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),
}
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_val_pred = clf.predict(X_val)
f1 = f1_score(y_val, y_val_pred)
return f1
tensorboard_callback = TensorBoardCallback("logs/", metric_name="f1")
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])
```
It would return an error above.
2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.
### Additional context (optional)
_No response_
</issue>
<code>
[start of optuna/integration/tensorboard.py]
1 import os
2 from typing import Dict
3
4 import optuna
5 from optuna._experimental import experimental_class
6 from optuna._imports import try_import
7
8
9 with try_import() as _imports:
10 from tensorboard.plugins.hparams import api as hp
11 import tensorflow as tf
12
13
14 @experimental_class("2.0.0")
15 class TensorBoardCallback:
16 """Callback to track Optuna trials with TensorBoard.
17
18 This callback adds relevant information that is tracked by Optuna to TensorBoard.
19
20 See `the example <https://github.com/optuna/optuna-examples/blob/main/
21 tensorboard/tensorboard_simple.py>`_.
22
23 Args:
24 dirname:
25 Directory to store TensorBoard logs.
26 metric_name:
27 Name of the metric. Since the metric itself is just a number,
28 `metric_name` can be used to give it a name. So you know later
29 if it was roc-auc or accuracy.
30
31 """
32
33 def __init__(self, dirname: str, metric_name: str) -> None:
34 _imports.check()
35 self._dirname = dirname
36 self._metric_name = metric_name
37 self._hp_params: Dict[str, hp.HParam] = {}
38
39 def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:
40 if len(self._hp_params) == 0:
41 self._initialization(study)
42 if trial.state != optuna.trial.TrialState.COMPLETE:
43 return
44 trial_value = trial.value if trial.value is not None else float("nan")
45 hparams = {}
46 for param_name, param_value in trial.params.items():
47 if param_name not in self._hp_params:
48 self._add_distributions(trial.distributions)
49 hparams[self._hp_params[param_name]] = param_value
50 run_name = "trial-%d" % trial.number
51 run_dir = os.path.join(self._dirname, run_name)
52 with tf.summary.create_file_writer(run_dir).as_default():
53 hp.hparams(hparams, trial_id=run_name) # record the values used in this trial
54 tf.summary.scalar(self._metric_name, trial_value, step=trial.number)
55
56 def _add_distributions(
57 self, distributions: Dict[str, optuna.distributions.BaseDistribution]
58 ) -> None:
59 supported_distributions = (
60 optuna.distributions.CategoricalDistribution,
61 optuna.distributions.FloatDistribution,
62 optuna.distributions.IntDistribution,
63 )
64
65 for param_name, param_distribution in distributions.items():
66 if isinstance(param_distribution, optuna.distributions.FloatDistribution):
67 self._hp_params[param_name] = hp.HParam(
68 param_name,
69 hp.RealInterval(float(param_distribution.low), float(param_distribution.high)),
70 )
71 elif isinstance(param_distribution, optuna.distributions.IntDistribution):
72 self._hp_params[param_name] = hp.HParam(
73 param_name,
74 hp.IntInterval(param_distribution.low, param_distribution.high),
75 )
76 elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):
77 self._hp_params[param_name] = hp.HParam(
78 param_name,
79 hp.Discrete(param_distribution.choices),
80 )
81 else:
82 distribution_list = [
83 distribution.__name__ for distribution in supported_distributions
84 ]
85 raise NotImplementedError(
86 "The distribution {} is not implemented. "
87 "The parameter distribution should be one of the {}".format(
88 param_distribution, distribution_list
89 )
90 )
91
92 def _initialization(self, study: optuna.Study) -> None:
93 completed_trials = [
94 trial
95 for trial in study.get_trials(deepcopy=False)
96 if trial.state == optuna.trial.TrialState.COMPLETE
97 ]
98 for trial in completed_trials:
99 self._add_distributions(trial.distributions)
100
[end of optuna/integration/tensorboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/integration/tensorboard.py b/optuna/integration/tensorboard.py
--- a/optuna/integration/tensorboard.py
+++ b/optuna/integration/tensorboard.py
@@ -4,12 +4,15 @@
import optuna
from optuna._experimental import experimental_class
from optuna._imports import try_import
+from optuna.logging import get_logger
with try_import() as _imports:
from tensorboard.plugins.hparams import api as hp
import tensorflow as tf
+_logger = get_logger(__name__)
+
@experimental_class("2.0.0")
class TensorBoardCallback:
@@ -46,7 +49,11 @@
for param_name, param_value in trial.params.items():
if param_name not in self._hp_params:
self._add_distributions(trial.distributions)
- hparams[self._hp_params[param_name]] = param_value
+ param = self._hp_params[param_name]
+ if isinstance(param.domain, hp.Discrete):
+ hparams[param] = param.domain.dtype(param_value)
+ else:
+ hparams[param] = param_value
run_name = "trial-%d" % trial.number
run_dir = os.path.join(self._dirname, run_name)
with tf.summary.create_file_writer(run_dir).as_default():
@@ -74,9 +81,24 @@
hp.IntInterval(param_distribution.low, param_distribution.high),
)
elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):
+ choices = param_distribution.choices
+ dtype = type(choices[0])
+ if any(not isinstance(choice, dtype) for choice in choices):
+ _logger.warning(
+ "Choices contains mixed types, which is not supported by TensorBoard. "
+ "Converting all choices to strings."
+ )
+ choices = tuple(map(str, choices))
+ elif dtype not in (int, float, bool, str):
+ _logger.warning(
+ f"Choices are of type {dtype}, which is not supported by TensorBoard. "
+ "Converting all choices to strings."
+ )
+ choices = tuple(map(str, choices))
+
self._hp_params[param_name] = hp.HParam(
param_name,
- hp.Discrete(param_distribution.choices),
+ hp.Discrete(choices),
)
else:
distribution_list = [
| {"golden_diff": "diff --git a/optuna/integration/tensorboard.py b/optuna/integration/tensorboard.py\n--- a/optuna/integration/tensorboard.py\n+++ b/optuna/integration/tensorboard.py\n@@ -4,12 +4,15 @@\n import optuna\n from optuna._experimental import experimental_class\n from optuna._imports import try_import\n+from optuna.logging import get_logger\n \n \n with try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n \n+_logger = get_logger(__name__)\n+\n \n @experimental_class(\"2.0.0\")\n class TensorBoardCallback:\n@@ -46,7 +49,11 @@\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n- hparams[self._hp_params[param_name]] = param_value\n+ param = self._hp_params[param_name]\n+ if isinstance(param.domain, hp.Discrete):\n+ hparams[param] = param.domain.dtype(param_value)\n+ else:\n+ hparams[param] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n@@ -74,9 +81,24 @@\n hp.IntInterval(param_distribution.low, param_distribution.high),\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n+ choices = param_distribution.choices\n+ dtype = type(choices[0])\n+ if any(not isinstance(choice, dtype) for choice in choices):\n+ _logger.warning(\n+ \"Choices contains mixed types, which is not supported by TensorBoard. \"\n+ \"Converting all choices to strings.\"\n+ )\n+ choices = tuple(map(str, choices))\n+ elif dtype not in (int, float, bool, str):\n+ _logger.warning(\n+ f\"Choices are of type {dtype}, which is not supported by TensorBoard. \"\n+ \"Converting all choices to strings.\"\n+ )\n+ choices = tuple(map(str, choices))\n+\n self._hp_params[param_name] = hp.HParam(\n param_name,\n- hp.Discrete(param_distribution.choices),\n+ hp.Discrete(choices),\n )\n else:\n distribution_list = [\n", "issue": "TensorBoard integration fails if None is suggested\n### Expected behavior\n\n`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.\n\n### Environment\n\n- Optuna version:3.2.0\r\n- Python version:3.9.13\r\n- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31\n\n### Error messages, stack traces, or logs\n\n```shell\nFile /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)\r\n 505 raise ValueError(\"Empty domain with no dtype specified\")\r\n 506 if dtype not in (int, float, bool, str):\r\n--> 507 raise ValueError(\"Unknown dtype: %r\" % (dtype,))\r\n 508 self._dtype = dtype\r\n 509 for value in self._values:\r\n\r\nValueError: Unknown dtype: <class 'NoneType'>\r\n```\n```\n\n\n### Steps to reproduce\n\n1. If I run this\r\n```python\r\nimport optuna\r\nfrom optuna.integration.tensorboard import TensorBoardCallback\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import f1_score\r\n\r\nX, y = make_classification(n_samples=1000, n_features=4,\r\n n_informative=2, n_redundant=0,\r\n random_state=0, shuffle=False)\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)\r\n\r\ndef objective(trial):\r\n params = {\r\n 'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),\r\n }\r\n clf = RandomForestClassifier()\r\n clf.fit(X_train, y_train)\r\n\r\n y_val_pred = clf.predict(X_val)\r\n f1 = f1_score(y_val, y_val_pred)\r\n return f1\r\n\r\ntensorboard_callback = TensorBoardCallback(\"logs/\", metric_name=\"f1\")\r\nstudy = optuna.create_study(direction=\"minimize\")\r\n\r\nstudy.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])\r\n```\r\n\r\nIt would return an error above.\r\n\r\n2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.\n\n### Additional context (optional)\n\n_No response_\nTensorBoard integration fails if None is suggested\n### Expected behavior\n\n`Optuna` with `TensorBoardCallback` should allow `None` as a suggestion.\n\n### Environment\n\n- Optuna version:3.2.0\r\n- Python version:3.9.13\r\n- OS:Linux-5.4.228-132.418.amzn2.x86_64-x86_64-with-glibc2.31\n\n### Error messages, stack traces, or logs\n\n```shell\nFile /opt/python3.9/site-packages/tensorboard/plugins/hparams/summary_v2.py:507, in Discrete.__init__(self, values, dtype)\r\n 505 raise ValueError(\"Empty domain with no dtype specified\")\r\n 506 if dtype not in (int, float, bool, str):\r\n--> 507 raise ValueError(\"Unknown dtype: %r\" % (dtype,))\r\n 508 self._dtype = dtype\r\n 509 for value in self._values:\r\n\r\nValueError: Unknown dtype: <class 'NoneType'>\r\n```\n```\n\n\n### Steps to reproduce\n\n1. If I run this\r\n```python\r\nimport optuna\r\nfrom optuna.integration.tensorboard import TensorBoardCallback\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.datasets import make_classification\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import f1_score\r\n\r\nX, y = make_classification(n_samples=1000, n_features=4,\r\n n_informative=2, n_redundant=0,\r\n random_state=0, shuffle=False)\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33, random_state=0)\r\n\r\ndef objective(trial):\r\n params = {\r\n 'max_leaf_nodes': trial.suggest_categorical('max_leaf_nodes', [None, 2, 5, 10, 20]),\r\n }\r\n clf = RandomForestClassifier()\r\n clf.fit(X_train, y_train)\r\n\r\n y_val_pred = clf.predict(X_val)\r\n f1 = f1_score(y_val, y_val_pred)\r\n return f1\r\n\r\ntensorboard_callback = TensorBoardCallback(\"logs/\", metric_name=\"f1\")\r\nstudy = optuna.create_study(direction=\"minimize\")\r\n\r\nstudy.optimize(objective, n_trials=10, timeout=600, callbacks=[tensorboard_callback])\r\n```\r\n\r\nIt would return an error above.\r\n\r\n2. If I don't have `None` in the suggestion or not use `TensorBoardCallback` then it would work fine.\n\n### Additional context (optional)\n\n_No response_\n", "before_files": [{"content": "import os\nfrom typing import Dict\n\nimport optuna\nfrom optuna._experimental import experimental_class\nfrom optuna._imports import try_import\n\n\nwith try_import() as _imports:\n from tensorboard.plugins.hparams import api as hp\n import tensorflow as tf\n\n\n@experimental_class(\"2.0.0\")\nclass TensorBoardCallback:\n \"\"\"Callback to track Optuna trials with TensorBoard.\n\n This callback adds relevant information that is tracked by Optuna to TensorBoard.\n\n See `the example <https://github.com/optuna/optuna-examples/blob/main/\n tensorboard/tensorboard_simple.py>`_.\n\n Args:\n dirname:\n Directory to store TensorBoard logs.\n metric_name:\n Name of the metric. Since the metric itself is just a number,\n `metric_name` can be used to give it a name. So you know later\n if it was roc-auc or accuracy.\n\n \"\"\"\n\n def __init__(self, dirname: str, metric_name: str) -> None:\n _imports.check()\n self._dirname = dirname\n self._metric_name = metric_name\n self._hp_params: Dict[str, hp.HParam] = {}\n\n def __call__(self, study: optuna.study.Study, trial: optuna.trial.FrozenTrial) -> None:\n if len(self._hp_params) == 0:\n self._initialization(study)\n if trial.state != optuna.trial.TrialState.COMPLETE:\n return\n trial_value = trial.value if trial.value is not None else float(\"nan\")\n hparams = {}\n for param_name, param_value in trial.params.items():\n if param_name not in self._hp_params:\n self._add_distributions(trial.distributions)\n hparams[self._hp_params[param_name]] = param_value\n run_name = \"trial-%d\" % trial.number\n run_dir = os.path.join(self._dirname, run_name)\n with tf.summary.create_file_writer(run_dir).as_default():\n hp.hparams(hparams, trial_id=run_name) # record the values used in this trial\n tf.summary.scalar(self._metric_name, trial_value, step=trial.number)\n\n def _add_distributions(\n self, distributions: Dict[str, optuna.distributions.BaseDistribution]\n ) -> None:\n supported_distributions = (\n optuna.distributions.CategoricalDistribution,\n optuna.distributions.FloatDistribution,\n optuna.distributions.IntDistribution,\n )\n\n for param_name, param_distribution in distributions.items():\n if isinstance(param_distribution, optuna.distributions.FloatDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.RealInterval(float(param_distribution.low), float(param_distribution.high)),\n )\n elif isinstance(param_distribution, optuna.distributions.IntDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.IntInterval(param_distribution.low, param_distribution.high),\n )\n elif isinstance(param_distribution, optuna.distributions.CategoricalDistribution):\n self._hp_params[param_name] = hp.HParam(\n param_name,\n hp.Discrete(param_distribution.choices),\n )\n else:\n distribution_list = [\n distribution.__name__ for distribution in supported_distributions\n ]\n raise NotImplementedError(\n \"The distribution {} is not implemented. \"\n \"The parameter distribution should be one of the {}\".format(\n param_distribution, distribution_list\n )\n )\n\n def _initialization(self, study: optuna.Study) -> None:\n completed_trials = [\n trial\n for trial in study.get_trials(deepcopy=False)\n if trial.state == optuna.trial.TrialState.COMPLETE\n ]\n for trial in completed_trials:\n self._add_distributions(trial.distributions)\n", "path": "optuna/integration/tensorboard.py"}]} | 2,673 | 517 |
gh_patches_debug_11727 | rasdani/github-patches | git_diff | pypa__setuptools-1607 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Preserving read-only flags on package data causing access denied when building
I'm working with a read-only source directory on Windows and running into access denied errors when calling `python setup.py bdist_wheel`. I noticed that it was only files included in package_data that were causing this, which helped me track it down to:
[setuptools/setuptools/command/build_py.py](
https://github.com/pypa/setuptools/blob/89155abb4222cf5a9dc81120e5c71e26b5af68f9/setuptools/command/build_py.py#L123)
`outf, copied = self.copy_file(srcfile, target)`
which is slightly different from its distutils version:
[cpython/Lib/distutils/command/build_py.py](
https://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L141)
`self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False)`
So it looks like it comes down to this preserve_mode flag.
I don't know if there is a good reason to preserve things like read-only flags? There's a [comment](https://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L74) about preserving mode further up in the distutils code, which seems to suggest not preserving is the way to go.
</issue>
<code>
[start of setuptools/command/build_py.py]
1 from glob import glob
2 from distutils.util import convert_path
3 import distutils.command.build_py as orig
4 import os
5 import fnmatch
6 import textwrap
7 import io
8 import distutils.errors
9 import itertools
10
11 from setuptools.extern import six
12 from setuptools.extern.six.moves import map, filter, filterfalse
13
14 try:
15 from setuptools.lib2to3_ex import Mixin2to3
16 except ImportError:
17
18 class Mixin2to3:
19 def run_2to3(self, files, doctests=True):
20 "do nothing"
21
22
23 class build_py(orig.build_py, Mixin2to3):
24 """Enhanced 'build_py' command that includes data files with packages
25
26 The data files are specified via a 'package_data' argument to 'setup()'.
27 See 'setuptools.dist.Distribution' for more details.
28
29 Also, this version of the 'build_py' command allows you to specify both
30 'py_modules' and 'packages' in the same setup operation.
31 """
32
33 def finalize_options(self):
34 orig.build_py.finalize_options(self)
35 self.package_data = self.distribution.package_data
36 self.exclude_package_data = (self.distribution.exclude_package_data or
37 {})
38 if 'data_files' in self.__dict__:
39 del self.__dict__['data_files']
40 self.__updated_files = []
41 self.__doctests_2to3 = []
42
43 def run(self):
44 """Build modules, packages, and copy data files to build directory"""
45 if not self.py_modules and not self.packages:
46 return
47
48 if self.py_modules:
49 self.build_modules()
50
51 if self.packages:
52 self.build_packages()
53 self.build_package_data()
54
55 self.run_2to3(self.__updated_files, False)
56 self.run_2to3(self.__updated_files, True)
57 self.run_2to3(self.__doctests_2to3, True)
58
59 # Only compile actual .py files, using our base class' idea of what our
60 # output files are.
61 self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
62
63 def __getattr__(self, attr):
64 "lazily compute data files"
65 if attr == 'data_files':
66 self.data_files = self._get_data_files()
67 return self.data_files
68 return orig.build_py.__getattr__(self, attr)
69
70 def build_module(self, module, module_file, package):
71 if six.PY2 and isinstance(package, six.string_types):
72 # avoid errors on Python 2 when unicode is passed (#190)
73 package = package.split('.')
74 outfile, copied = orig.build_py.build_module(self, module, module_file,
75 package)
76 if copied:
77 self.__updated_files.append(outfile)
78 return outfile, copied
79
80 def _get_data_files(self):
81 """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
82 self.analyze_manifest()
83 return list(map(self._get_pkg_data_files, self.packages or ()))
84
85 def _get_pkg_data_files(self, package):
86 # Locate package source directory
87 src_dir = self.get_package_dir(package)
88
89 # Compute package build directory
90 build_dir = os.path.join(*([self.build_lib] + package.split('.')))
91
92 # Strip directory from globbed filenames
93 filenames = [
94 os.path.relpath(file, src_dir)
95 for file in self.find_data_files(package, src_dir)
96 ]
97 return package, src_dir, build_dir, filenames
98
99 def find_data_files(self, package, src_dir):
100 """Return filenames for package's data files in 'src_dir'"""
101 patterns = self._get_platform_patterns(
102 self.package_data,
103 package,
104 src_dir,
105 )
106 globs_expanded = map(glob, patterns)
107 # flatten the expanded globs into an iterable of matches
108 globs_matches = itertools.chain.from_iterable(globs_expanded)
109 glob_files = filter(os.path.isfile, globs_matches)
110 files = itertools.chain(
111 self.manifest_files.get(package, []),
112 glob_files,
113 )
114 return self.exclude_data_files(package, src_dir, files)
115
116 def build_package_data(self):
117 """Copy data files into build directory"""
118 for package, src_dir, build_dir, filenames in self.data_files:
119 for filename in filenames:
120 target = os.path.join(build_dir, filename)
121 self.mkpath(os.path.dirname(target))
122 srcfile = os.path.join(src_dir, filename)
123 outf, copied = self.copy_file(srcfile, target)
124 srcfile = os.path.abspath(srcfile)
125 if (copied and
126 srcfile in self.distribution.convert_2to3_doctests):
127 self.__doctests_2to3.append(outf)
128
129 def analyze_manifest(self):
130 self.manifest_files = mf = {}
131 if not self.distribution.include_package_data:
132 return
133 src_dirs = {}
134 for package in self.packages or ():
135 # Locate package source directory
136 src_dirs[assert_relative(self.get_package_dir(package))] = package
137
138 self.run_command('egg_info')
139 ei_cmd = self.get_finalized_command('egg_info')
140 for path in ei_cmd.filelist.files:
141 d, f = os.path.split(assert_relative(path))
142 prev = None
143 oldf = f
144 while d and d != prev and d not in src_dirs:
145 prev = d
146 d, df = os.path.split(d)
147 f = os.path.join(df, f)
148 if d in src_dirs:
149 if path.endswith('.py') and f == oldf:
150 continue # it's a module, not data
151 mf.setdefault(src_dirs[d], []).append(path)
152
153 def get_data_files(self):
154 pass # Lazily compute data files in _get_data_files() function.
155
156 def check_package(self, package, package_dir):
157 """Check namespace packages' __init__ for declare_namespace"""
158 try:
159 return self.packages_checked[package]
160 except KeyError:
161 pass
162
163 init_py = orig.build_py.check_package(self, package, package_dir)
164 self.packages_checked[package] = init_py
165
166 if not init_py or not self.distribution.namespace_packages:
167 return init_py
168
169 for pkg in self.distribution.namespace_packages:
170 if pkg == package or pkg.startswith(package + '.'):
171 break
172 else:
173 return init_py
174
175 with io.open(init_py, 'rb') as f:
176 contents = f.read()
177 if b'declare_namespace' not in contents:
178 raise distutils.errors.DistutilsError(
179 "Namespace package problem: %s is a namespace package, but "
180 "its\n__init__.py does not call declare_namespace()! Please "
181 'fix it.\n(See the setuptools manual under '
182 '"Namespace Packages" for details.)\n"' % (package,)
183 )
184 return init_py
185
186 def initialize_options(self):
187 self.packages_checked = {}
188 orig.build_py.initialize_options(self)
189
190 def get_package_dir(self, package):
191 res = orig.build_py.get_package_dir(self, package)
192 if self.distribution.src_root is not None:
193 return os.path.join(self.distribution.src_root, res)
194 return res
195
196 def exclude_data_files(self, package, src_dir, files):
197 """Filter filenames for package's data files in 'src_dir'"""
198 files = list(files)
199 patterns = self._get_platform_patterns(
200 self.exclude_package_data,
201 package,
202 src_dir,
203 )
204 match_groups = (
205 fnmatch.filter(files, pattern)
206 for pattern in patterns
207 )
208 # flatten the groups of matches into an iterable of matches
209 matches = itertools.chain.from_iterable(match_groups)
210 bad = set(matches)
211 keepers = (
212 fn
213 for fn in files
214 if fn not in bad
215 )
216 # ditch dupes
217 return list(_unique_everseen(keepers))
218
219 @staticmethod
220 def _get_platform_patterns(spec, package, src_dir):
221 """
222 yield platform-specific path patterns (suitable for glob
223 or fn_match) from a glob-based spec (such as
224 self.package_data or self.exclude_package_data)
225 matching package in src_dir.
226 """
227 raw_patterns = itertools.chain(
228 spec.get('', []),
229 spec.get(package, []),
230 )
231 return (
232 # Each pattern has to be converted to a platform-specific path
233 os.path.join(src_dir, convert_path(pattern))
234 for pattern in raw_patterns
235 )
236
237
238 # from Python docs
239 def _unique_everseen(iterable, key=None):
240 "List unique elements, preserving order. Remember all elements ever seen."
241 # unique_everseen('AAAABBBCCDAABBB') --> A B C D
242 # unique_everseen('ABBCcAD', str.lower) --> A B C D
243 seen = set()
244 seen_add = seen.add
245 if key is None:
246 for element in filterfalse(seen.__contains__, iterable):
247 seen_add(element)
248 yield element
249 else:
250 for element in iterable:
251 k = key(element)
252 if k not in seen:
253 seen_add(k)
254 yield element
255
256
257 def assert_relative(path):
258 if not os.path.isabs(path):
259 return path
260 from distutils.errors import DistutilsSetupError
261
262 msg = textwrap.dedent("""
263 Error: setup script specifies an absolute path:
264
265 %s
266
267 setup() arguments must *always* be /-separated paths relative to the
268 setup.py directory, *never* absolute paths.
269 """).lstrip() % path
270 raise DistutilsSetupError(msg)
271
[end of setuptools/command/build_py.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py
--- a/setuptools/command/build_py.py
+++ b/setuptools/command/build_py.py
@@ -120,7 +120,7 @@
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
- outf, copied = self.copy_file(srcfile, target)
+ outf, copied = self.copy_file(srcfile, target, preserve_mode=False)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
| {"golden_diff": "diff --git a/setuptools/command/build_py.py b/setuptools/command/build_py.py\n--- a/setuptools/command/build_py.py\n+++ b/setuptools/command/build_py.py\n@@ -120,7 +120,7 @@\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n- outf, copied = self.copy_file(srcfile, target)\n+ outf, copied = self.copy_file(srcfile, target, preserve_mode=False)\n srcfile = os.path.abspath(srcfile)\n if (copied and\n srcfile in self.distribution.convert_2to3_doctests):\n", "issue": "Preserving read-only flags on package data causing access denied when building\nI'm working with a read-only source directory on Windows and running into access denied errors when calling `python setup.py bdist_wheel`. I noticed that it was only files included in package_data that were causing this, which helped me track it down to:\r\n\r\n[setuptools/setuptools/command/build_py.py](\r\nhttps://github.com/pypa/setuptools/blob/89155abb4222cf5a9dc81120e5c71e26b5af68f9/setuptools/command/build_py.py#L123)\r\n`outf, copied = self.copy_file(srcfile, target)`\r\n\r\nwhich is slightly different from its distutils version:\r\n\r\n[cpython/Lib/distutils/command/build_py.py](\r\nhttps://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L141)\r\n`self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False)`\r\n\r\nSo it looks like it comes down to this preserve_mode flag.\r\n\r\nI don't know if there is a good reason to preserve things like read-only flags? There's a [comment](https://github.com/python/cpython/blob/d6345def68d3a0227253da26213dadb247f786db/Lib/distutils/command/build_py.py#L74) about preserving mode further up in the distutils code, which seems to suggest not preserving is the way to go.\n", "before_files": [{"content": "from glob import glob\nfrom distutils.util import convert_path\nimport distutils.command.build_py as orig\nimport os\nimport fnmatch\nimport textwrap\nimport io\nimport distutils.errors\nimport itertools\n\nfrom setuptools.extern import six\nfrom setuptools.extern.six.moves import map, filter, filterfalse\n\ntry:\n from setuptools.lib2to3_ex import Mixin2to3\nexcept ImportError:\n\n class Mixin2to3:\n def run_2to3(self, files, doctests=True):\n \"do nothing\"\n\n\nclass build_py(orig.build_py, Mixin2to3):\n \"\"\"Enhanced 'build_py' command that includes data files with packages\n\n The data files are specified via a 'package_data' argument to 'setup()'.\n See 'setuptools.dist.Distribution' for more details.\n\n Also, this version of the 'build_py' command allows you to specify both\n 'py_modules' and 'packages' in the same setup operation.\n \"\"\"\n\n def finalize_options(self):\n orig.build_py.finalize_options(self)\n self.package_data = self.distribution.package_data\n self.exclude_package_data = (self.distribution.exclude_package_data or\n {})\n if 'data_files' in self.__dict__:\n del self.__dict__['data_files']\n self.__updated_files = []\n self.__doctests_2to3 = []\n\n def run(self):\n \"\"\"Build modules, packages, and copy data files to build directory\"\"\"\n if not self.py_modules and not self.packages:\n return\n\n if self.py_modules:\n self.build_modules()\n\n if self.packages:\n self.build_packages()\n self.build_package_data()\n\n self.run_2to3(self.__updated_files, False)\n self.run_2to3(self.__updated_files, True)\n self.run_2to3(self.__doctests_2to3, True)\n\n # Only compile actual .py files, using our base class' idea of what our\n # output files are.\n self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))\n\n def __getattr__(self, attr):\n \"lazily compute data files\"\n if attr == 'data_files':\n self.data_files = self._get_data_files()\n return self.data_files\n return orig.build_py.__getattr__(self, attr)\n\n def build_module(self, module, module_file, package):\n if six.PY2 and isinstance(package, six.string_types):\n # avoid errors on Python 2 when unicode is passed (#190)\n package = package.split('.')\n outfile, copied = orig.build_py.build_module(self, module, module_file,\n package)\n if copied:\n self.__updated_files.append(outfile)\n return outfile, copied\n\n def _get_data_files(self):\n \"\"\"Generate list of '(package,src_dir,build_dir,filenames)' tuples\"\"\"\n self.analyze_manifest()\n return list(map(self._get_pkg_data_files, self.packages or ()))\n\n def _get_pkg_data_files(self, package):\n # Locate package source directory\n src_dir = self.get_package_dir(package)\n\n # Compute package build directory\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\n\n # Strip directory from globbed filenames\n filenames = [\n os.path.relpath(file, src_dir)\n for file in self.find_data_files(package, src_dir)\n ]\n return package, src_dir, build_dir, filenames\n\n def find_data_files(self, package, src_dir):\n \"\"\"Return filenames for package's data files in 'src_dir'\"\"\"\n patterns = self._get_platform_patterns(\n self.package_data,\n package,\n src_dir,\n )\n globs_expanded = map(glob, patterns)\n # flatten the expanded globs into an iterable of matches\n globs_matches = itertools.chain.from_iterable(globs_expanded)\n glob_files = filter(os.path.isfile, globs_matches)\n files = itertools.chain(\n self.manifest_files.get(package, []),\n glob_files,\n )\n return self.exclude_data_files(package, src_dir, files)\n\n def build_package_data(self):\n \"\"\"Copy data files into build directory\"\"\"\n for package, src_dir, build_dir, filenames in self.data_files:\n for filename in filenames:\n target = os.path.join(build_dir, filename)\n self.mkpath(os.path.dirname(target))\n srcfile = os.path.join(src_dir, filename)\n outf, copied = self.copy_file(srcfile, target)\n srcfile = os.path.abspath(srcfile)\n if (copied and\n srcfile in self.distribution.convert_2to3_doctests):\n self.__doctests_2to3.append(outf)\n\n def analyze_manifest(self):\n self.manifest_files = mf = {}\n if not self.distribution.include_package_data:\n return\n src_dirs = {}\n for package in self.packages or ():\n # Locate package source directory\n src_dirs[assert_relative(self.get_package_dir(package))] = package\n\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n for path in ei_cmd.filelist.files:\n d, f = os.path.split(assert_relative(path))\n prev = None\n oldf = f\n while d and d != prev and d not in src_dirs:\n prev = d\n d, df = os.path.split(d)\n f = os.path.join(df, f)\n if d in src_dirs:\n if path.endswith('.py') and f == oldf:\n continue # it's a module, not data\n mf.setdefault(src_dirs[d], []).append(path)\n\n def get_data_files(self):\n pass # Lazily compute data files in _get_data_files() function.\n\n def check_package(self, package, package_dir):\n \"\"\"Check namespace packages' __init__ for declare_namespace\"\"\"\n try:\n return self.packages_checked[package]\n except KeyError:\n pass\n\n init_py = orig.build_py.check_package(self, package, package_dir)\n self.packages_checked[package] = init_py\n\n if not init_py or not self.distribution.namespace_packages:\n return init_py\n\n for pkg in self.distribution.namespace_packages:\n if pkg == package or pkg.startswith(package + '.'):\n break\n else:\n return init_py\n\n with io.open(init_py, 'rb') as f:\n contents = f.read()\n if b'declare_namespace' not in contents:\n raise distutils.errors.DistutilsError(\n \"Namespace package problem: %s is a namespace package, but \"\n \"its\\n__init__.py does not call declare_namespace()! Please \"\n 'fix it.\\n(See the setuptools manual under '\n '\"Namespace Packages\" for details.)\\n\"' % (package,)\n )\n return init_py\n\n def initialize_options(self):\n self.packages_checked = {}\n orig.build_py.initialize_options(self)\n\n def get_package_dir(self, package):\n res = orig.build_py.get_package_dir(self, package)\n if self.distribution.src_root is not None:\n return os.path.join(self.distribution.src_root, res)\n return res\n\n def exclude_data_files(self, package, src_dir, files):\n \"\"\"Filter filenames for package's data files in 'src_dir'\"\"\"\n files = list(files)\n patterns = self._get_platform_patterns(\n self.exclude_package_data,\n package,\n src_dir,\n )\n match_groups = (\n fnmatch.filter(files, pattern)\n for pattern in patterns\n )\n # flatten the groups of matches into an iterable of matches\n matches = itertools.chain.from_iterable(match_groups)\n bad = set(matches)\n keepers = (\n fn\n for fn in files\n if fn not in bad\n )\n # ditch dupes\n return list(_unique_everseen(keepers))\n\n @staticmethod\n def _get_platform_patterns(spec, package, src_dir):\n \"\"\"\n yield platform-specific path patterns (suitable for glob\n or fn_match) from a glob-based spec (such as\n self.package_data or self.exclude_package_data)\n matching package in src_dir.\n \"\"\"\n raw_patterns = itertools.chain(\n spec.get('', []),\n spec.get(package, []),\n )\n return (\n # Each pattern has to be converted to a platform-specific path\n os.path.join(src_dir, convert_path(pattern))\n for pattern in raw_patterns\n )\n\n\n# from Python docs\ndef _unique_everseen(iterable, key=None):\n \"List unique elements, preserving order. Remember all elements ever seen.\"\n # unique_everseen('AAAABBBCCDAABBB') --> A B C D\n # unique_everseen('ABBCcAD', str.lower) --> A B C D\n seen = set()\n seen_add = seen.add\n if key is None:\n for element in filterfalse(seen.__contains__, iterable):\n seen_add(element)\n yield element\n else:\n for element in iterable:\n k = key(element)\n if k not in seen:\n seen_add(k)\n yield element\n\n\ndef assert_relative(path):\n if not os.path.isabs(path):\n return path\n from distutils.errors import DistutilsSetupError\n\n msg = textwrap.dedent(\"\"\"\n Error: setup script specifies an absolute path:\n\n %s\n\n setup() arguments must *always* be /-separated paths relative to the\n setup.py directory, *never* absolute paths.\n \"\"\").lstrip() % path\n raise DistutilsSetupError(msg)\n", "path": "setuptools/command/build_py.py"}]} | 3,689 | 147 |
gh_patches_debug_62498 | rasdani/github-patches | git_diff | DDMAL__CantusDB-1464 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django debug toolbar is not working
I just noticed that while working in my development environment (devcontainer setup), the Django debug toolbar doesn't show. Perhaps this was as a result of #1454?
</issue>
<code>
[start of django/cantusdb_project/cantusdb/settings.py]
1 """
2 Django settings for cantusdb project.
3
4 Generated by 'django-admin startproject' using Django 3.0.6.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.0/ref/settings/
11 """
12
13 import os
14 from distutils.util import strtobool
15 from django.contrib.messages import constants as messages
16
17 # https://ordinarycoders.com/blog/article/django-messages-framework
18 MESSAGE_TAGS = {
19 messages.DEBUG: "alert-secondary",
20 messages.INFO: "alert-info",
21 messages.SUCCESS: "alert-success",
22 messages.WARNING: "alert-warning",
23 messages.ERROR: "alert-danger",
24 }
25
26 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
27 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
28
29 STATIC_ROOT = os.getenv("CANTUSDB_STATIC_ROOT")
30 MEDIA_ROOT = os.getenv("CANTUSDB_MEDIA_ROOT")
31
32 # Quick-start development settings - unsuitable for production
33 # See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
34
35 # SECURITY WARNING: keep the secret key used in production secret!
36 SECRET_KEY = os.getenv("CANTUSDB_SECRET_KEY")
37
38 PROJECT_ENVIRONMENT = os.getenv("PROJECT_ENVIRONMENT")
39
40 # SECURITY WARNING: don't run with debug turned on in production!
41 DEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT=="DEVELOPMENT"
42
43 ALLOWED_HOSTS = [os.getenv("CANTUSDB_HOST")]
44 CSRF_TRUSTED_ORIGINS = [f'https://{os.getenv("CANTUSDB_HOST")}']
45 if PROJECT_ENVIRONMENT == "DEVELOPMENT":
46 DEBUG = True
47
48 # Application definition
49
50 INSTALLED_APPS = [
51 "dal",
52 "dal_select2",
53 "django.contrib.admin",
54 "django.contrib.auth",
55 "django.contrib.contenttypes",
56 "django.contrib.sessions",
57 "django.contrib.messages",
58 "django.contrib.staticfiles",
59 "django.contrib.sites",
60 "django.contrib.flatpages",
61 "django.contrib.humanize",
62 "django.contrib.postgres",
63 "extra_views",
64 "main_app",
65 "articles",
66 "django_quill", # to provide rich-text field for articles
67 "reversion", # django-reversion, for version history of objects in database
68 "users",
69 ]
70
71 MIDDLEWARE = [
72 "django.middleware.security.SecurityMiddleware",
73 "django.contrib.sessions.middleware.SessionMiddleware",
74 "django.middleware.common.CommonMiddleware",
75 "django.middleware.csrf.CsrfViewMiddleware",
76 "django.contrib.auth.middleware.AuthenticationMiddleware",
77 "django.contrib.messages.middleware.MessageMiddleware",
78 "django.middleware.clickjacking.XFrameOptionsMiddleware",
79 "django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
80 "reversion.middleware.RevisionMiddleware",
81 ]
82
83 ROOT_URLCONF = "cantusdb.urls"
84
85 TEMPLATES = [
86 {
87 "BACKEND": "django.template.backends.django.DjangoTemplates",
88 "DIRS": [os.path.join(BASE_DIR, "templates")],
89 "APP_DIRS": True,
90 "OPTIONS": {
91 "context_processors": [
92 "django.template.context_processors.debug",
93 "django.template.context_processors.request",
94 "django.contrib.auth.context_processors.auth",
95 "django.contrib.messages.context_processors.messages",
96 "main_app.context_processors.determine_project_environment",
97 ],
98 },
99 },
100 ]
101
102 TEMPLATE_LOADERS = "django.template.loaders.app_directories.load_template_source"
103
104 WSGI_APPLICATION = "cantusdb.wsgi.application"
105
106
107 # Database
108 # https://docs.djangoproject.com/en/3.0/ref/settings/#databases
109
110 DATABASES = {
111 "default": {
112 "ENGINE": "django.db.backends.postgresql",
113 "NAME": os.getenv("POSTGRES_DB"),
114 "USER": os.getenv("POSTGRES_USER"),
115 "HOST": os.getenv("POSTGRES_HOST"),
116 "PORT": os.getenv("POSTGRES_PORT"),
117 "PASSWORD": os.getenv("POSTGRES_PASSWORD"),
118 }
119 }
120
121
122 # Password validation
123 # https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
124
125 AUTH_PASSWORD_VALIDATORS = [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139
140
141 # Internationalization
142 # https://docs.djangoproject.com/en/3.0/topics/i18n/
143
144 LANGUAGE_CODE = "en-us"
145
146 TIME_ZONE = "UTC"
147
148 USE_I18N = True
149
150 USE_L10N = True
151
152 USE_TZ = True
153
154
155 # Static files (CSS, JavaScript, Images)
156 # https://docs.djangoproject.com/en/3.0/howto/static-files/
157
158 STATIC_URL = "/static/"
159
160 STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
161
162 AUTH_USER_MODEL = "users.User"
163 LOGIN_REDIRECT_URL = "/"
164 LOGIN_URL = "/login/"
165 LOGOUT_REDIRECT_URL = "/login/"
166
167 SITE_ID = 4
168
169 # New in django 3.2: specify the default type of auto-created primary keys
170 # https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
171 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
172
173 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
174 EMAIL_HOST = "email-smtp.us-west-2.amazonaws.com"
175 EMAIL_PORT = 587
176 EMAIL_HOST_USER = os.getenv("AWS_EMAIL_HOST_USER")
177 EMAIL_HOST_PASSWORD = os.getenv("AWS_EMAIL_HOST_PASSWORD")
178 EMAIL_USE_TLS = True
179
180 DEFAULT_FROM_EMAIL = "[email protected]"
181
182 # automatically disable all panels which user can then manually enable
183 DEBUG_TOOLBAR_CONFIG = {
184 "DISABLE_PANELS": {
185 "debug_toolbar.panels.history.HistoryPanel",
186 "debug_toolbar.panels.versions.VersionsPanel",
187 "debug_toolbar.panels.timer.TimerPanel",
188 "debug_toolbar.panels.settings.SettingsPanel",
189 "debug_toolbar.panels.headers.HeadersPanel",
190 "debug_toolbar.panels.request.RequestPanel",
191 "debug_toolbar.panels.sql.SQLPanel",
192 "debug_toolbar.panels.staticfiles.StaticFilesPanel",
193 "debug_toolbar.panels.templates.TemplatesPanel",
194 "debug_toolbar.panels.cache.CachePanel",
195 "debug_toolbar.panels.signals.SignalsPanel",
196 "debug_toolbar.panels.logging.LoggingPanel",
197 "debug_toolbar.panels.redirects.RedirectsPanel",
198 "debug_toolbar.panels.profiling.ProfilingPanel",
199 },
200 }
201
202 INTERNAL_IPS = [
203 "127.0.0.1",
204 ]
205
206 if DEBUG:
207 INSTALLED_APPS.append("debug_toolbar")
208 # debug toolbar must be inserted as early in the middleware as possible
209 MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
210
[end of django/cantusdb_project/cantusdb/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py
--- a/django/cantusdb_project/cantusdb/settings.py
+++ b/django/cantusdb_project/cantusdb/settings.py
@@ -197,6 +197,9 @@
"debug_toolbar.panels.redirects.RedirectsPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
},
+ "SHOW_TOOLBAR_CALLBACK": lambda request: (
+ False if request.headers.get("x-requested-with") == "XMLHttpRequest" else True
+ ),
}
INTERNAL_IPS = [
| {"golden_diff": "diff --git a/django/cantusdb_project/cantusdb/settings.py b/django/cantusdb_project/cantusdb/settings.py\n--- a/django/cantusdb_project/cantusdb/settings.py\n+++ b/django/cantusdb_project/cantusdb/settings.py\n@@ -197,6 +197,9 @@\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n+ \"SHOW_TOOLBAR_CALLBACK\": lambda request: (\n+ False if request.headers.get(\"x-requested-with\") == \"XMLHttpRequest\" else True\n+ ),\n }\n \n INTERNAL_IPS = [\n", "issue": "Django debug toolbar is not working\nI just noticed that while working in my development environment (devcontainer setup), the Django debug toolbar doesn't show. Perhaps this was as a result of #1454? \n", "before_files": [{"content": "\"\"\"\nDjango settings for cantusdb project.\n\nGenerated by 'django-admin startproject' using Django 3.0.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.0/ref/settings/\n\"\"\"\n\nimport os\nfrom distutils.util import strtobool\nfrom django.contrib.messages import constants as messages\n\n# https://ordinarycoders.com/blog/article/django-messages-framework\nMESSAGE_TAGS = {\n messages.DEBUG: \"alert-secondary\",\n messages.INFO: \"alert-info\",\n messages.SUCCESS: \"alert-success\",\n messages.WARNING: \"alert-warning\",\n messages.ERROR: \"alert-danger\",\n}\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSTATIC_ROOT = os.getenv(\"CANTUSDB_STATIC_ROOT\")\nMEDIA_ROOT = os.getenv(\"CANTUSDB_MEDIA_ROOT\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"CANTUSDB_SECRET_KEY\")\n\nPROJECT_ENVIRONMENT = os.getenv(\"PROJECT_ENVIRONMENT\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False # this is switched to True below when PROJECT_ENVIRONMENT==\"DEVELOPMENT\"\n\nALLOWED_HOSTS = [os.getenv(\"CANTUSDB_HOST\")]\nCSRF_TRUSTED_ORIGINS = [f'https://{os.getenv(\"CANTUSDB_HOST\")}']\nif PROJECT_ENVIRONMENT == \"DEVELOPMENT\":\n DEBUG = True\n\n# Application definition\n\nINSTALLED_APPS = [\n \"dal\",\n \"dal_select2\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sites\",\n \"django.contrib.flatpages\",\n \"django.contrib.humanize\",\n \"django.contrib.postgres\",\n \"extra_views\",\n \"main_app\",\n \"articles\",\n \"django_quill\", # to provide rich-text field for articles\n \"reversion\", # django-reversion, for version history of objects in database\n \"users\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware\",\n \"reversion.middleware.RevisionMiddleware\",\n]\n\nROOT_URLCONF = \"cantusdb.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"main_app.context_processors.determine_project_environment\",\n ],\n },\n },\n]\n\nTEMPLATE_LOADERS = \"django.template.loaders.app_directories.load_template_source\"\n\nWSGI_APPLICATION = \"cantusdb.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_DB\"),\n \"USER\": os.getenv(\"POSTGRES_USER\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\"),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.0/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.0/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"static\")]\n\nAUTH_USER_MODEL = \"users.User\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGIN_URL = \"/login/\"\nLOGOUT_REDIRECT_URL = \"/login/\"\n\nSITE_ID = 4\n\n# New in django 3.2: specify the default type of auto-created primary keys\n# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_HOST = \"email-smtp.us-west-2.amazonaws.com\"\nEMAIL_PORT = 587\nEMAIL_HOST_USER = os.getenv(\"AWS_EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = os.getenv(\"AWS_EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = \"[email protected]\"\n\n# automatically disable all panels which user can then manually enable\nDEBUG_TOOLBAR_CONFIG = {\n \"DISABLE_PANELS\": {\n \"debug_toolbar.panels.history.HistoryPanel\",\n \"debug_toolbar.panels.versions.VersionsPanel\",\n \"debug_toolbar.panels.timer.TimerPanel\",\n \"debug_toolbar.panels.settings.SettingsPanel\",\n \"debug_toolbar.panels.headers.HeadersPanel\",\n \"debug_toolbar.panels.request.RequestPanel\",\n \"debug_toolbar.panels.sql.SQLPanel\",\n \"debug_toolbar.panels.staticfiles.StaticFilesPanel\",\n \"debug_toolbar.panels.templates.TemplatesPanel\",\n \"debug_toolbar.panels.cache.CachePanel\",\n \"debug_toolbar.panels.signals.SignalsPanel\",\n \"debug_toolbar.panels.logging.LoggingPanel\",\n \"debug_toolbar.panels.redirects.RedirectsPanel\",\n \"debug_toolbar.panels.profiling.ProfilingPanel\",\n },\n}\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nif DEBUG:\n INSTALLED_APPS.append(\"debug_toolbar\")\n # debug toolbar must be inserted as early in the middleware as possible\n MIDDLEWARE.insert(0, \"debug_toolbar.middleware.DebugToolbarMiddleware\")\n", "path": "django/cantusdb_project/cantusdb/settings.py"}]} | 2,618 | 148 |
gh_patches_debug_6639 | rasdani/github-patches | git_diff | ethereum__web3.py-2217 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using typing-extensions 4 with web3.py and python < 3.8
Hey guys,
This is question on typing-extensions package. In setup.py https://github.com/ethereum/web3.py/blob/74fbcd1761407ee37808173b089d8e44e537fb99/setup.py#L88-L89
you require <4 and python <3.8. From what I understand that is since these extensions you use have made it into stdlib's typing module from 3.8 and on.
But from what I see this requirement creates a probem for projects like ours which are still in python 3.7, use web3.py and want to use typing-extensions 4. Is there any reason for the < 4 requirement?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8 extras_require = {
9 'tester': [
10 "eth-tester[py-evm]==v0.6.0-beta.4",
11 "py-geth>=3.6.0,<4",
12 ],
13 'linter': [
14 "flake8==3.8.3",
15 "isort>=4.2.15,<4.3.5",
16 "mypy==0.812",
17 ],
18 'docs': [
19 "mock",
20 "sphinx-better-theme>=0.1.4",
21 "click>=5.1",
22 "configparser==3.5.0",
23 "contextlib2>=0.5.4",
24 "py-geth>=3.6.0,<4",
25 "py-solc>=0.4.0",
26 "pytest>=4.4.0,<5.0.0",
27 "sphinx>=3.0,<4",
28 "sphinx_rtd_theme>=0.1.9",
29 "toposort>=1.4",
30 "towncrier==18.5.0",
31 "urllib3",
32 "wheel"
33 ],
34 'dev': [
35 "bumpversion",
36 "flaky>=3.7.0,<4",
37 "hypothesis>=3.31.2,<6",
38 "pytest>=4.4.0,<5.0.0",
39 "pytest-asyncio>=0.10.0,<0.11",
40 "pytest-mock>=1.10,<2",
41 "pytest-pythonpath>=0.3",
42 "pytest-watch>=4.2,<5",
43 "pytest-xdist>=1.29,<2",
44 "setuptools>=38.6.0",
45 "tox>=1.8.0",
46 "tqdm>4.32,<5",
47 "twine>=1.13,<2",
48 "pluggy==0.13.1",
49 "when-changed>=0.3.0,<0.4"
50 ]
51 }
52
53 extras_require['dev'] = (
54 extras_require['tester']
55 + extras_require['linter']
56 + extras_require['docs']
57 + extras_require['dev']
58 )
59
60 with open('./README.md') as readme:
61 long_description = readme.read()
62
63 setup(
64 name='web3',
65 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
66 version='5.25.0',
67 description="""Web3.py""",
68 long_description_content_type='text/markdown',
69 long_description=long_description,
70 author='Piper Merriam',
71 author_email='[email protected]',
72 url='https://github.com/ethereum/web3.py',
73 include_package_data=True,
74 install_requires=[
75 "aiohttp>=3.7.4.post0,<4",
76 "eth-abi>=2.0.0b6,<3.0.0",
77 "eth-account>=0.5.6,<0.6.0",
78 "eth-hash[pycryptodome]>=0.2.0,<1.0.0",
79 "eth-typing>=2.0.0,<3.0.0",
80 "eth-utils>=1.9.5,<2.0.0",
81 "hexbytes>=0.1.0,<1.0.0",
82 "ipfshttpclient==0.8.0a2",
83 "jsonschema>=3.2.0,<4.0.0",
84 "lru-dict>=1.1.6,<2.0.0",
85 "protobuf>=3.10.0,<4",
86 "pywin32>=223;platform_system=='Windows'",
87 "requests>=2.16.0,<3.0.0",
88 # remove typing_extensions after python_requires>=3.8, see web3._utils.compat
89 "typing-extensions>=3.7.4.1,<4;python_version<'3.8'",
90 "websockets>=9.1,<10",
91 ],
92 python_requires='>=3.6,<4',
93 extras_require=extras_require,
94 py_modules=['web3', 'ens', 'ethpm'],
95 entry_points={"pytest11": ["pytest_ethereum = web3.tools.pytest_ethereum.plugins"]},
96 license="MIT",
97 zip_safe=False,
98 keywords='ethereum',
99 packages=find_packages(exclude=["tests", "tests.*"]),
100 package_data={"web3": ["py.typed"]},
101 classifiers=[
102 'Development Status :: 5 - Production/Stable',
103 'Intended Audience :: Developers',
104 'License :: OSI Approved :: MIT License',
105 'Natural Language :: English',
106 'Programming Language :: Python :: 3',
107 'Programming Language :: Python :: 3.6',
108 'Programming Language :: Python :: 3.7',
109 'Programming Language :: Python :: 3.8',
110 'Programming Language :: Python :: 3.9',
111 ],
112 )
113
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -86,7 +86,7 @@
"pywin32>=223;platform_system=='Windows'",
"requests>=2.16.0,<3.0.0",
# remove typing_extensions after python_requires>=3.8, see web3._utils.compat
- "typing-extensions>=3.7.4.1,<4;python_version<'3.8'",
+ "typing-extensions>=3.7.4.1,<5;python_version<'3.8'",
"websockets>=9.1,<10",
],
python_requires='>=3.6,<4',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -86,7 +86,7 @@\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n- \"typing-extensions>=3.7.4.1,<4;python_version<'3.8'\",\n+ \"typing-extensions>=3.7.4.1,<5;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n", "issue": "Using typing-extensions 4 with web3.py and python < 3.8\nHey guys,\r\n\r\nThis is question on typing-extensions package. In setup.py https://github.com/ethereum/web3.py/blob/74fbcd1761407ee37808173b089d8e44e537fb99/setup.py#L88-L89\r\n\r\nyou require <4 and python <3.8. From what I understand that is since these extensions you use have made it into stdlib's typing module from 3.8 and on.\r\n\r\nBut from what I see this requirement creates a probem for projects like ours which are still in python 3.7, use web3.py and want to use typing-extensions 4. Is there any reason for the < 4 requirement?\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nextras_require = {\n 'tester': [\n \"eth-tester[py-evm]==v0.6.0-beta.4\",\n \"py-geth>=3.6.0,<4\",\n ],\n 'linter': [\n \"flake8==3.8.3\",\n \"isort>=4.2.15,<4.3.5\",\n \"mypy==0.812\",\n ],\n 'docs': [\n \"mock\",\n \"sphinx-better-theme>=0.1.4\",\n \"click>=5.1\",\n \"configparser==3.5.0\",\n \"contextlib2>=0.5.4\",\n \"py-geth>=3.6.0,<4\",\n \"py-solc>=0.4.0\",\n \"pytest>=4.4.0,<5.0.0\",\n \"sphinx>=3.0,<4\",\n \"sphinx_rtd_theme>=0.1.9\",\n \"toposort>=1.4\",\n \"towncrier==18.5.0\",\n \"urllib3\",\n \"wheel\"\n ],\n 'dev': [\n \"bumpversion\",\n \"flaky>=3.7.0,<4\",\n \"hypothesis>=3.31.2,<6\",\n \"pytest>=4.4.0,<5.0.0\",\n \"pytest-asyncio>=0.10.0,<0.11\",\n \"pytest-mock>=1.10,<2\",\n \"pytest-pythonpath>=0.3\",\n \"pytest-watch>=4.2,<5\",\n \"pytest-xdist>=1.29,<2\",\n \"setuptools>=38.6.0\",\n \"tox>=1.8.0\",\n \"tqdm>4.32,<5\",\n \"twine>=1.13,<2\",\n \"pluggy==0.13.1\",\n \"when-changed>=0.3.0,<0.4\"\n ]\n}\n\nextras_require['dev'] = (\n extras_require['tester']\n + extras_require['linter']\n + extras_require['docs']\n + extras_require['dev']\n)\n\nwith open('./README.md') as readme:\n long_description = readme.read()\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='5.25.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_content_type='text/markdown',\n long_description=long_description,\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"aiohttp>=3.7.4.post0,<4\",\n \"eth-abi>=2.0.0b6,<3.0.0\",\n \"eth-account>=0.5.6,<0.6.0\",\n \"eth-hash[pycryptodome]>=0.2.0,<1.0.0\",\n \"eth-typing>=2.0.0,<3.0.0\",\n \"eth-utils>=1.9.5,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"ipfshttpclient==0.8.0a2\",\n \"jsonschema>=3.2.0,<4.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"protobuf>=3.10.0,<4\",\n \"pywin32>=223;platform_system=='Windows'\",\n \"requests>=2.16.0,<3.0.0\",\n # remove typing_extensions after python_requires>=3.8, see web3._utils.compat\n \"typing-extensions>=3.7.4.1,<4;python_version<'3.8'\",\n \"websockets>=9.1,<10\",\n ],\n python_requires='>=3.6,<4',\n extras_require=extras_require,\n py_modules=['web3', 'ens', 'ethpm'],\n entry_points={\"pytest11\": [\"pytest_ethereum = web3.tools.pytest_ethereum.plugins\"]},\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"web3\": [\"py.typed\"]},\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n)\n", "path": "setup.py"}]} | 2,059 | 158 |
gh_patches_debug_670 | rasdani/github-patches | git_diff | pytorch__examples-182 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Division error
Training a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization.
- python2.7
- torch==0.1.12.post2
- torchvision==0.1.8
````
Traceback (most recent call last):
File "neural_style/neural_style.py", line 226, in <module>
main()
File "neural_style/neural_style.py", line 220, in main
train(args)
File "neural_style/neural_style.py", line 65, in train
style_v = utils.normalize_batch(style_v)
File "/home/paperspace/embro/neural_style/utils.py", line 42, in normalize_batch
batch /= Variable(std)
File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 793, in __idiv__
return self.div_(other)
File "/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py", line 323, in div_
raise RuntimeError("div_ only supports scalar multiplication")
````
</issue>
<code>
[start of fast_neural_style/neural_style/utils.py]
1 import torch
2 from PIL import Image
3 from torch.autograd import Variable
4
5
6 def load_image(filename, size=None, scale=None):
7 img = Image.open(filename)
8 if size is not None:
9 img = img.resize((size, size), Image.ANTIALIAS)
10 elif scale is not None:
11 img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
12 return img
13
14
15 def save_image(filename, data):
16 img = data.clone().clamp(0, 255).numpy()
17 img = img.transpose(1, 2, 0).astype("uint8")
18 img = Image.fromarray(img)
19 img.save(filename)
20
21
22 def gram_matrix(y):
23 (b, ch, h, w) = y.size()
24 features = y.view(b, ch, w * h)
25 features_t = features.transpose(1, 2)
26 gram = features.bmm(features_t) / (ch * h * w)
27 return gram
28
29
30 def normalize_batch(batch):
31 # normalize using imagenet mean and std
32 mean = batch.data.new(batch.data.size())
33 std = batch.data.new(batch.data.size())
34 mean[:, 0, :, :] = 0.485
35 mean[:, 1, :, :] = 0.456
36 mean[:, 2, :, :] = 0.406
37 std[:, 0, :, :] = 0.229
38 std[:, 1, :, :] = 0.224
39 std[:, 2, :, :] = 0.225
40 batch = torch.div(batch, 255.0)
41 batch -= Variable(mean)
42 batch /= Variable(std)
43 return batch
44
[end of fast_neural_style/neural_style/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py
--- a/fast_neural_style/neural_style/utils.py
+++ b/fast_neural_style/neural_style/utils.py
@@ -39,5 +39,5 @@
std[:, 2, :, :] = 0.225
batch = torch.div(batch, 255.0)
batch -= Variable(mean)
- batch /= Variable(std)
+ batch = batch / Variable(std)
return batch
| {"golden_diff": "diff --git a/fast_neural_style/neural_style/utils.py b/fast_neural_style/neural_style/utils.py\n--- a/fast_neural_style/neural_style/utils.py\n+++ b/fast_neural_style/neural_style/utils.py\n@@ -39,5 +39,5 @@\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n- batch /= Variable(std)\n+ batch = batch / Variable(std)\n return batch\n", "issue": "Division error\nTraining a model for `fast-neural-style` raises a RuntimeError from variable division during input normalization. \r\n\r\n- python2.7\r\n- torch==0.1.12.post2\r\n- torchvision==0.1.8\r\n\r\n````\r\nTraceback (most recent call last):\r\n File \"neural_style/neural_style.py\", line 226, in <module>\r\n main()\r\n File \"neural_style/neural_style.py\", line 220, in main\r\n train(args)\r\n File \"neural_style/neural_style.py\", line 65, in train\r\n style_v = utils.normalize_batch(style_v)\r\n File \"/home/paperspace/embro/neural_style/utils.py\", line 42, in normalize_batch\r\n batch /= Variable(std)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py\", line 793, in __idiv__\r\n return self.div_(other)\r\n File \"/usr/local/lib/python2.7/dist-packages/torch/autograd/variable.py\", line 323, in div_\r\n raise RuntimeError(\"div_ only supports scalar multiplication\")\r\n````\r\n\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom PIL import Image\nfrom torch.autograd import Variable\n\n\ndef load_image(filename, size=None, scale=None):\n img = Image.open(filename)\n if size is not None:\n img = img.resize((size, size), Image.ANTIALIAS)\n elif scale is not None:\n img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)\n return img\n\n\ndef save_image(filename, data):\n img = data.clone().clamp(0, 255).numpy()\n img = img.transpose(1, 2, 0).astype(\"uint8\")\n img = Image.fromarray(img)\n img.save(filename)\n\n\ndef gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / (ch * h * w)\n return gram\n\n\ndef normalize_batch(batch):\n # normalize using imagenet mean and std\n mean = batch.data.new(batch.data.size())\n std = batch.data.new(batch.data.size())\n mean[:, 0, :, :] = 0.485\n mean[:, 1, :, :] = 0.456\n mean[:, 2, :, :] = 0.406\n std[:, 0, :, :] = 0.229\n std[:, 1, :, :] = 0.224\n std[:, 2, :, :] = 0.225\n batch = torch.div(batch, 255.0)\n batch -= Variable(mean)\n batch /= Variable(std)\n return batch\n", "path": "fast_neural_style/neural_style/utils.py"}]} | 1,263 | 116 |
gh_patches_debug_17674 | rasdani/github-patches | git_diff | feast-dev__feast-1504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.6 Local Mode breaks due to usage of pathlib.Path and sqlite3
## Expected Behavior
Python 3.6 in local mode works when following the quickstart
## Current Behavior
Unable to follow quickstart due to the error as shown below
## Steps to reproduce
1. Create a Python 3.6 environment. E.g. `conda create --name=feast36 python=3.6`
2. Install feast and other deps `pip install feast`
3. Follow the quickstart
When running the quick start it will fail with the following msg.
```
(feast36) ➜ temp_feat$ feast init feat1
Creating a new Feast repository in /home/user/Documents/temp_feat/feat1.
(feast36) ➜ temp_feat$ cd feat1
(feast36) ➜ feat1$ feast apply
Registered entity driver_id
Registered feature view driver_hourly_stats
Deploying infrastructure for driver_hourly_stats
Traceback (most recent call last):
File "/home/user/anaconda3/envs/feast36/bin/feast", line 8, in <module>
sys.exit(cli())
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/cli.py", line 160, in apply_total_command
apply_total(repo_config, Path.cwd())
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/repo_operations.py", line 148, in apply_total
partial=False,
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py", line 55, in update_infra
conn = self._get_conn()
File "/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py", line 45, in _get_conn
self._db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
TypeError: argument 1 must be str, not PosixPath
```
### Specifications
- Version: 3.6
- Platform: Ubuntu 20.04, also tested on Ubuntu 18.05
- Subsystem:
## Possible Solution
The sqlite3 issue is resolved in Python 3.7 as shown here:
https://bugs.python.org/issue33496
A solution could be to add `self._db_path = str(self._db_path)` or similar in the `infra/local.py` file
I couldn't find a similar issue - in the case its resolved in an upstream commit...
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import glob
15 import os
16 import re
17 import subprocess
18
19 from distutils.cmd import Command
20 from setuptools import find_packages
21
22 try:
23 from setuptools import setup
24 from setuptools.command.install import install
25 from setuptools.command.develop import develop
26 from setuptools.command.egg_info import egg_info
27 from setuptools.command.sdist import sdist
28 from setuptools.command.build_py import build_py
29 except ImportError:
30 from distutils.core import setup
31 from distutils.command.install import install
32 from distutils.command.build_py import build_py
33
34 NAME = "feast"
35 DESCRIPTION = "Python SDK for Feast"
36 URL = "https://github.com/feast-dev/feast"
37 AUTHOR = "Feast"
38 REQUIRES_PYTHON = ">=3.6.0"
39
40 REQUIRED = [
41 "Click==7.*",
42 "colorama>=0.3.9",
43 "fastavro>=0.22.11,<0.23",
44 "google-api-core>=1.23.0",
45 "google-cloud-bigquery>=2.0.*",
46 "google-cloud-bigquery-storage >= 2.0.0",
47 "google-cloud-storage>=1.20.*",
48 "google-cloud-core==1.4.*",
49 "googleapis-common-protos==1.52.*",
50 "grpcio==1.31.0",
51 "Jinja2>=2.0.0",
52 "jsonschema",
53 "mmh3",
54 "numpy<1.20.0",
55 "pandas~=1.0.0",
56 "pandavro==1.5.*",
57 "protobuf>=3.10",
58 "pyarrow==2.0.0",
59 "pydantic>=1.0.0",
60 "PyYAML==5.3.*",
61 "tabulate==0.8.*",
62 "toml==0.10.*",
63 "tqdm==4.*",
64 ]
65
66 CI_REQUIRED = [
67 "cryptography==3.3.2",
68 "flake8",
69 "black==19.10b0",
70 "isort>=5",
71 "grpcio-tools==1.31.0",
72 "grpcio-testing==1.31.0",
73 "mock==2.0.0",
74 "moto",
75 "mypy==0.790",
76 "mypy-protobuf==1.24",
77 "avro==1.10.0",
78 "gcsfs",
79 "urllib3>=1.25.4",
80 "pytest==6.0.0",
81 "pytest-lazy-fixture==0.6.3",
82 "pytest-timeout==1.4.2",
83 "pytest-ordering==0.6.*",
84 "pytest-mock==1.10.4",
85 "Sphinx",
86 "sphinx-rtd-theme",
87 "tenacity",
88 "adlfs==0.5.9",
89 "firebase-admin==4.5.2",
90 "google-cloud-datastore==2.1.0",
91 "pre-commit"
92 ]
93
94 # README file from Feast repo root directory
95 repo_root = (
96 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
97 .communicate()[0]
98 .rstrip()
99 .decode("utf-8")
100 )
101 README_FILE = os.path.join(repo_root, "README.md")
102 with open(os.path.join(README_FILE), "r") as f:
103 LONG_DESCRIPTION = f.read()
104
105 # Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
106 # Regex modified from default tag regex in:
107 # https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
108 TAG_REGEX = re.compile(
109 r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
110 )
111
112
113 class BuildProtoCommand(Command):
114 description = "Builds the proto files into python files."
115
116 def initialize_options(self):
117 self.protoc = ["python", "-m", "grpc_tools.protoc"] # find_executable("protoc")
118 self.proto_folder = os.path.join(repo_root, "protos")
119 self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')
120 self.sub_folders = ["core", "serving", "types", "storage"]
121
122 def finalize_options(self):
123 pass
124
125 def _generate_protos(self, path):
126 proto_files = glob.glob(os.path.join(self.proto_folder, path))
127
128 subprocess.check_call(self.protoc + [
129 '-I', self.proto_folder,
130 '--python_out', self.this_package,
131 '--grpc_python_out', self.this_package,
132 '--mypy_out', self.this_package] + proto_files)
133
134 def run(self):
135 for sub_folder in self.sub_folders:
136 self._generate_protos(f'feast/{sub_folder}/*.proto')
137
138 from pathlib import Path
139
140 for path in Path('feast/protos').rglob('*.py'):
141 for folder in self.sub_folders:
142 # Read in the file
143 with open(path, 'r') as file:
144 filedata = file.read()
145
146 # Replace the target string
147 filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')
148
149 # Write the file out again
150 with open(path, 'w') as file:
151 file.write(filedata)
152
153
154 class BuildCommand(build_py):
155 """Custom build command."""
156
157 def run(self):
158 self.run_command('build_proto')
159 build_py.run(self)
160
161
162 class DevelopCommand(develop):
163 """Custom develop command."""
164
165 def run(self):
166 self.run_command('build_proto')
167 develop.run(self)
168
169
170 setup(
171 name=NAME,
172 author=AUTHOR,
173 description=DESCRIPTION,
174 long_description=LONG_DESCRIPTION,
175 long_description_content_type="text/markdown",
176 python_requires=REQUIRES_PYTHON,
177 url=URL,
178 packages=find_packages(exclude=("tests",)),
179 install_requires=REQUIRED,
180 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
181 # Install dev requirements with: pip install -e .[dev]
182 extras_require={
183 "dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"],
184 "ci": CI_REQUIRED
185 },
186 include_package_data=True,
187 license="Apache",
188 classifiers=[
189 # Trove classifiers
190 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
191 "License :: OSI Approved :: Apache Software License",
192 "Programming Language :: Python",
193 "Programming Language :: Python :: 3",
194 "Programming Language :: Python :: 3.6",
195 ],
196 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
197 use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
198 setup_requires=["setuptools_scm", "grpcio", "grpcio-tools==1.31.0", "mypy-protobuf", "sphinx"],
199 package_data={
200 "": [
201 "protos/feast/**/*.proto",
202 "protos/feast/third_party/grpc/health/v1/*.proto",
203 "protos/tensorflow_metadata/proto/v0/*.proto",
204 "feast/protos/feast/**/*.py",
205 "tensorflow_metadata/proto/v0/*.py"
206 ],
207 },
208 cmdclass={
209 "build_proto": BuildProtoCommand,
210 "build_py": BuildCommand,
211 "develop": DevelopCommand,
212 },
213 )
214
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -35,7 +35,7 @@
DESCRIPTION = "Python SDK for Feast"
URL = "https://github.com/feast-dev/feast"
AUTHOR = "Feast"
-REQUIRES_PYTHON = ">=3.6.0"
+REQUIRES_PYTHON = ">=3.7.0"
REQUIRED = [
"Click==7.*",
@@ -191,7 +191,7 @@
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -35,7 +35,7 @@\n DESCRIPTION = \"Python SDK for Feast\"\n URL = \"https://github.com/feast-dev/feast\"\n AUTHOR = \"Feast\"\n-REQUIRES_PYTHON = \">=3.6.0\"\n+REQUIRES_PYTHON = \">=3.7.0\"\n \n REQUIRED = [\n \"Click==7.*\",\n@@ -191,7 +191,7 @@\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n", "issue": "Python 3.6 Local Mode breaks due to usage of pathlib.Path and sqlite3\n## Expected Behavior \r\n\r\nPython 3.6 in local mode works when following the quickstart \r\n\r\n## Current Behavior\r\n\r\nUnable to follow quickstart due to the error as shown below\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a Python 3.6 environment. E.g. `conda create --name=feast36 python=3.6`\r\n2. Install feast and other deps `pip install feast`\r\n3. Follow the quickstart\r\n\r\nWhen running the quick start it will fail with the following msg.\r\n\r\n```\r\n(feast36) \u279c temp_feat$ feast init feat1\r\n\r\nCreating a new Feast repository in /home/user/Documents/temp_feat/feat1.\r\n\r\n(feast36) \u279c temp_feat$ cd feat1\r\n(feast36) \u279c feat1$ feast apply\r\nRegistered entity driver_id\r\nRegistered feature view driver_hourly_stats\r\nDeploying infrastructure for driver_hourly_stats\r\nTraceback (most recent call last):\r\n File \"/home/user/anaconda3/envs/feast36/bin/feast\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 782, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 1259, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 1066, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/click/core.py\", line 610, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/cli.py\", line 160, in apply_total_command\r\n apply_total(repo_config, Path.cwd())\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/repo_operations.py\", line 148, in apply_total\r\n partial=False,\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py\", line 55, in update_infra\r\n conn = self._get_conn()\r\n File \"/home/user/anaconda3/envs/feast36/lib/python3.6/site-packages/feast/infra/local.py\", line 45, in _get_conn\r\n self._db_path, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES\r\nTypeError: argument 1 must be str, not PosixPath\r\n```\r\n\r\n\r\n### Specifications\r\n\r\n- Version: 3.6\r\n- Platform: Ubuntu 20.04, also tested on Ubuntu 18.05\r\n- Subsystem:\r\n\r\n## Possible Solution\r\n\r\nThe sqlite3 issue is resolved in Python 3.7 as shown here:\r\nhttps://bugs.python.org/issue33496\r\n\r\nA solution could be to add `self._db_path = str(self._db_path)` or similar in the `infra/local.py` file\r\n\r\nI couldn't find a similar issue - in the case its resolved in an upstream commit...\r\n\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport os\nimport re\nimport subprocess\n\nfrom distutils.cmd import Command\nfrom setuptools import find_packages\n\ntry:\n from setuptools import setup\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n from setuptools.command.egg_info import egg_info\n from setuptools.command.sdist import sdist\n from setuptools.command.build_py import build_py\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.install import install\n from distutils.command.build_py import build_py\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"colorama>=0.3.9\",\n \"fastavro>=0.22.11,<0.23\",\n \"google-api-core>=1.23.0\",\n \"google-cloud-bigquery>=2.0.*\",\n \"google-cloud-bigquery-storage >= 2.0.0\",\n \"google-cloud-storage>=1.20.*\",\n \"google-cloud-core==1.4.*\",\n \"googleapis-common-protos==1.52.*\",\n \"grpcio==1.31.0\",\n \"Jinja2>=2.0.0\",\n \"jsonschema\",\n \"mmh3\",\n \"numpy<1.20.0\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"pyarrow==2.0.0\",\n \"pydantic>=1.0.0\",\n \"PyYAML==5.3.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n]\n\nCI_REQUIRED = [\n \"cryptography==3.3.2\",\n \"flake8\",\n \"black==19.10b0\",\n \"isort>=5\",\n \"grpcio-tools==1.31.0\",\n \"grpcio-testing==1.31.0\",\n \"mock==2.0.0\",\n \"moto\",\n \"mypy==0.790\",\n \"mypy-protobuf==1.24\",\n \"avro==1.10.0\",\n \"gcsfs\",\n \"urllib3>=1.25.4\",\n \"pytest==6.0.0\",\n \"pytest-lazy-fixture==0.6.3\",\n \"pytest-timeout==1.4.2\",\n \"pytest-ordering==0.6.*\",\n \"pytest-mock==1.10.4\",\n \"Sphinx\",\n \"sphinx-rtd-theme\",\n \"tenacity\",\n \"adlfs==0.5.9\",\n \"firebase-admin==4.5.2\",\n \"google-cloud-datastore==2.1.0\",\n \"pre-commit\"\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n# Regex modified from default tag regex in:\n# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\nTAG_REGEX = re.compile(\n r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n)\n\n\nclass BuildProtoCommand(Command):\n description = \"Builds the proto files into python files.\"\n\n def initialize_options(self):\n self.protoc = [\"python\", \"-m\", \"grpc_tools.protoc\"] # find_executable(\"protoc\")\n self.proto_folder = os.path.join(repo_root, \"protos\")\n self.this_package = os.path.join(os.path.dirname(__file__) or os.getcwd(), 'feast/protos')\n self.sub_folders = [\"core\", \"serving\", \"types\", \"storage\"]\n\n def finalize_options(self):\n pass\n\n def _generate_protos(self, path):\n proto_files = glob.glob(os.path.join(self.proto_folder, path))\n\n subprocess.check_call(self.protoc + [\n '-I', self.proto_folder,\n '--python_out', self.this_package,\n '--grpc_python_out', self.this_package,\n '--mypy_out', self.this_package] + proto_files)\n\n def run(self):\n for sub_folder in self.sub_folders:\n self._generate_protos(f'feast/{sub_folder}/*.proto')\n\n from pathlib import Path\n\n for path in Path('feast/protos').rglob('*.py'):\n for folder in self.sub_folders:\n # Read in the file\n with open(path, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(f'from feast.{folder}', f'from feast.protos.feast.{folder}')\n\n # Write the file out again\n with open(path, 'w') as file:\n file.write(filedata)\n\n\nclass BuildCommand(build_py):\n \"\"\"Custom build command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n build_py.run(self)\n\n\nclass DevelopCommand(develop):\n \"\"\"Custom develop command.\"\"\"\n\n def run(self):\n self.run_command('build_proto')\n develop.run(self)\n\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\n \"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"],\n \"ci\": CI_REQUIRED\n },\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\", \"grpcio\", \"grpcio-tools==1.31.0\", \"mypy-protobuf\", \"sphinx\"],\n package_data={\n \"\": [\n \"protos/feast/**/*.proto\",\n \"protos/feast/third_party/grpc/health/v1/*.proto\",\n \"protos/tensorflow_metadata/proto/v0/*.proto\",\n \"feast/protos/feast/**/*.py\",\n \"tensorflow_metadata/proto/v0/*.py\"\n ],\n },\n cmdclass={\n \"build_proto\": BuildProtoCommand,\n \"build_py\": BuildCommand,\n \"develop\": DevelopCommand,\n },\n)\n", "path": "sdk/python/setup.py"}]} | 3,759 | 221 |
gh_patches_debug_21824 | rasdani/github-patches | git_diff | kartoza__prj.app-476 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Certificate URL needs to include domain
See the url below - it is not useful since it does not specify a domain.
<img width="1285" alt="screen shot 2017-07-24 at 10 52 15 pm" src="https://user-images.githubusercontent.com/178003/28544268-e2f26940-70c2-11e7-8d08-d4c39b466600.png">
</issue>
<code>
[start of django_project/certification/views/certificate.py]
1 # coding=utf-8
2 from django.http import Http404, HttpResponse
3 from django.views.generic import CreateView, DetailView
4 from django.core.urlresolvers import reverse
5 from braces.views import LoginRequiredMixin
6 from reportlab.pdfgen import canvas
7 from reportlab.lib.pagesizes import A4, landscape
8 from reportlab.lib.utils import ImageReader
9 from ..models import Certificate, Course, Attendee
10 from ..forms import CertificateForm
11 from base.models.project import Project
12
13
14 class CertificateMixin(object):
15 """Mixin class to provide standard settings for Certificate."""
16
17 model = Certificate
18 form_class = CertificateForm
19
20
21 class CertificateCreateView(
22 LoginRequiredMixin, CertificateMixin, CreateView):
23 """Create view for Certificate."""
24
25 context_object_name = 'certificate'
26 template_name = 'certificate/create.html'
27
28 def get_success_url(self):
29 """Define the redirect URL.
30
31 After successful creation of the object, the User will be redirected
32 to the Course detail page.
33
34 :returns: URL
35 :rtype: HttpResponse
36 """
37
38 return reverse('course-detail', kwargs={
39 'project_slug': self.project_slug,
40 'organisation_slug': self.organisation_slug,
41 'slug': self.course_slug
42 })
43
44 def get_context_data(self, **kwargs):
45 """Get the context data which is passed to a template.
46
47 :param kwargs: Any arguments to pass to the superclass.
48 :type kwargs: dict
49
50 :returns: Context data which will be passed to the template.
51 :rtype: dict
52 """
53
54 context = super(
55 CertificateCreateView, self).get_context_data(**kwargs)
56 context['course'] = Course.objects.get(slug=self.course_slug)
57 context['attendee'] = Attendee.objects.get(pk=self.pk)
58 return context
59
60 def get_form_kwargs(self):
61 """Get keyword arguments from form.
62
63 :returns keyword argument from the form
64 :rtype: dict
65 """
66
67 kwargs = super(CertificateCreateView, self).get_form_kwargs()
68 self.project_slug = self.kwargs.get('project_slug', None)
69 self.organisation_slug = self.kwargs.get('organisation_slug', None)
70 self.course_slug = self.kwargs.get('course_slug', None)
71 self.pk = self.kwargs.get('pk', None)
72 self.course = Course.objects.get(slug=self.course_slug)
73 self.attendee = Attendee.objects.get(pk=self.pk)
74 kwargs.update({
75 'user': self.request.user,
76 'course': self.course,
77 'attendee': self.attendee,
78 })
79 return kwargs
80
81
82 class CertificateDetailView(DetailView):
83 """Detail view for Certificate."""
84
85 model = Certificate
86 context_object_name = 'certificate'
87 template_name = 'certificate/detail.html'
88
89 def get_context_data(self, **kwargs):
90 """Get the context data which is passed to a template.
91
92 :param kwargs: Any arguments to pass to the superclass.
93 :type kwargs: dict
94
95 :returns: Context data which will be passed to the template.
96 :rtype: dict
97 """
98
99 self.certificateID = self.kwargs.get('id', None)
100 self.project_slug = self.kwargs.get('project_slug', None)
101 context = super(
102 CertificateDetailView, self).get_context_data(**kwargs)
103 issued_id = \
104 Certificate.objects.all().values_list('certificateID', flat=True)
105 if self.certificateID in issued_id:
106 context['certificate'] = \
107 Certificate.objects.get(certificateID=self.certificateID)
108 context['project_slug'] = self.project_slug
109 return context
110
111 def get_queryset(self):
112 """Get the queryset for this view.
113
114 :returns: Queryset which is all certificate in the
115 corresponding organisation.
116 :rtype: QuerySet
117 """
118
119 qs = Certificate.objects.all()
120 return qs
121
122 def get_object(self, queryset=None):
123 """Get the object for this view.
124
125 :param queryset: A query set
126 :type queryset: QuerySet
127
128 :returns: Queryset which is filtered to only show a certificate
129 depends on the input certificate ID.
130 :rtype: QuerySet
131 :raises: Http404
132 """
133
134 if queryset is None:
135 queryset = self.get_queryset()
136 certificateID = self.kwargs.get('id', None)
137 if certificateID:
138 try:
139 obj = queryset.get(certificateID=certificateID)
140 return obj
141 except Certificate.DoesNotExist:
142 return None
143 else:
144 raise Http404('Sorry! Certificate by this ID is not exist.')
145
146
147 def certificate_pdf_view(request, **kwargs):
148
149 project_slug = kwargs.pop('project_slug')
150 course_slug = kwargs.pop('course_slug')
151 pk = kwargs.pop('pk')
152 project = Project.objects.get(slug=project_slug)
153 course = Course.objects.get(slug=course_slug)
154 attendee = Attendee.objects.get(pk=pk)
155 certificate = Certificate.objects.get(course=course, attendee=attendee)
156
157 # Create the HttpResponse object with the appropriate PDF headers.
158 response = HttpResponse(content_type='application/pdf')
159 response['Content-Disposition'] = 'filename="certificate.pdf"'
160
161 # Create the PDF object, using the response object as its "file."
162 page = canvas.Canvas(response, pagesize=landscape(A4))
163 width, height = A4
164 center = height * 0.5
165
166 if project.image_file:
167 project_logo = ImageReader(project.image_file)
168 else:
169 project_logo = None
170
171 if course.certifying_organisation.logo:
172 organisation_logo = ImageReader(course.certifying_organisation.logo)
173 else:
174 organisation_logo = None
175
176 if project.signature:
177 project_owner_signature = ImageReader(project.signature)
178 else:
179 project_owner_signature = None
180
181 if course.course_convener.signature:
182 convener_signature = ImageReader(course.course_convener.signature)
183 else:
184 convener_signature = None
185
186 if course.template_certificate:
187 background = ImageReader(course.template_certificate)
188 else:
189 background = None
190
191 # Certificate margin.
192 margin_right = height - 50
193 margin_left = 50
194 margin_bottom = 50
195 max_left = margin_right - 50
196
197 # Draw things on the PDF. Here's where the PDF generation happens.
198 # See the ReportLab documentation for the full list of functionality.
199 if background is not None:
200 page.drawImage(
201 background, 0, 0, height=width, width=height,
202 preserveAspectRatio=True, mask='auto')
203 page.setFillColorRGB(0.1, 0.1, 0.1)
204 page.setFont('Times-Roman', 18)
205 # page.drawString(margin_left, 480, project.name)
206 # page.drawRightString(
207 # (margin_right), 480, course.certifying_organisation.name)
208
209 if project_logo is not None:
210 page.drawImage(
211 project_logo, 50, 500, width=50, height=50,
212 preserveAspectRatio=True, mask='auto')
213
214 if organisation_logo is not None:
215 page.drawImage(
216 organisation_logo, max_left, 500, height=50, width=50,
217 preserveAspectRatio=True, anchor='c', mask='auto')
218
219 page.setFont('Times-Bold', 26)
220 page.drawCentredString(center, 480, 'Certificate of Completion')
221 page.drawCentredString(
222 center, 400, '%s %s' % (attendee.firstname, attendee.surname))
223 page.setFont('Times-Roman', 16)
224 page.drawCentredString(
225 center, 360, 'Has attended and completed the course:')
226 page.setFont('Times-Bold', 20)
227 page.drawCentredString(center, 300, course.course_type.name)
228 page.setFont('Times-Roman', 16)
229 page.drawCentredString(
230 center, 270,
231 'From %s %s %s to %s %s %s'
232 % (course.start_date.day, course.start_date.strftime('%B'),
233 course.start_date.year, course.end_date.day,
234 course.end_date.strftime('%B'), course.end_date.year))
235 page.setFillColorRGB(0.1, 0.1, 0.1)
236 page.drawCentredString(
237 center, 220, 'Convened by %s %s at %s' % (
238 course.course_convener.user.first_name,
239 course.course_convener.user.last_name, course.training_center))
240
241 if project_owner_signature is not None:
242 page.drawImage(
243 project_owner_signature,
244 (margin_left + 100), (margin_bottom + 70), width=100, height=70,
245 preserveAspectRatio=True, anchor='s', mask='auto')
246
247 if convener_signature is not None:
248 page.drawImage(
249 convener_signature, (margin_right - 200), (margin_bottom + 70),
250 width=100, height=70, preserveAspectRatio=True, anchor='s',
251 mask='auto')
252
253 page.setFont('Times-Italic', 12)
254 page.drawCentredString(
255 (margin_left + 150), (margin_bottom + 60),
256 '%s %s' % (project.owner.first_name, project.owner.last_name))
257 page.drawCentredString(
258 (margin_right - 150), (margin_bottom + 60),
259 '%s %s' % (
260 course.course_convener.user.first_name,
261 course.course_convener.user.last_name))
262 page.line(
263 (margin_left + 70), (margin_bottom + 55),
264 (margin_left + 230), (margin_bottom + 55))
265 page.line(
266 (margin_right - 70), (margin_bottom + 55),
267 (margin_right - 230), (margin_bottom + 55))
268 page.setFont('Times-Roman', 13)
269 page.drawCentredString(
270 (margin_left + 150), (margin_bottom + 40), 'Project Owner')
271 page.drawCentredString(
272 (margin_right - 150), (margin_bottom + 40), 'Convener')
273
274 # Footnotes.
275 page.setFont('Times-Roman', 14)
276 page.drawString(
277 margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID)
278 page.setFont('Times-Roman', 8)
279 page.drawString(
280 margin_left, (margin_bottom - 20),
281 'You can verify this certificate by visiting /%s/certificate/%s/.'
282 % (project.slug, certificate.certificateID))
283
284 # Close the PDF object cleanly.
285 page.showPage()
286 page.save()
287 return response
288
[end of django_project/certification/views/certificate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py
--- a/django_project/certification/views/certificate.py
+++ b/django_project/certification/views/certificate.py
@@ -153,6 +153,7 @@
course = Course.objects.get(slug=course_slug)
attendee = Attendee.objects.get(pk=pk)
certificate = Certificate.objects.get(course=course, attendee=attendee)
+ current_site = request.META['HTTP_HOST']
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
@@ -278,8 +279,9 @@
page.setFont('Times-Roman', 8)
page.drawString(
margin_left, (margin_bottom - 20),
- 'You can verify this certificate by visiting /%s/certificate/%s/.'
- % (project.slug, certificate.certificateID))
+ 'You can verify this certificate by visiting '
+ 'http://%s/en/%s/certificate/%s/.'
+ % (current_site, project.slug, certificate.certificateID))
# Close the PDF object cleanly.
page.showPage()
| {"golden_diff": "diff --git a/django_project/certification/views/certificate.py b/django_project/certification/views/certificate.py\n--- a/django_project/certification/views/certificate.py\n+++ b/django_project/certification/views/certificate.py\n@@ -153,6 +153,7 @@\n course = Course.objects.get(slug=course_slug)\n attendee = Attendee.objects.get(pk=pk)\n certificate = Certificate.objects.get(course=course, attendee=attendee)\n+ current_site = request.META['HTTP_HOST']\n \n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n@@ -278,8 +279,9 @@\n page.setFont('Times-Roman', 8)\n page.drawString(\n margin_left, (margin_bottom - 20),\n- 'You can verify this certificate by visiting /%s/certificate/%s/.'\n- % (project.slug, certificate.certificateID))\n+ 'You can verify this certificate by visiting '\n+ 'http://%s/en/%s/certificate/%s/.'\n+ % (current_site, project.slug, certificate.certificateID))\n \n # Close the PDF object cleanly.\n page.showPage()\n", "issue": "Certificate URL needs to include domain\nSee the url below - it is not useful since it does not specify a domain.\r\n\r\n<img width=\"1285\" alt=\"screen shot 2017-07-24 at 10 52 15 pm\" src=\"https://user-images.githubusercontent.com/178003/28544268-e2f26940-70c2-11e7-8d08-d4c39b466600.png\">\r\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import CreateView, DetailView\nfrom django.core.urlresolvers import reverse\nfrom braces.views import LoginRequiredMixin\nfrom reportlab.pdfgen import canvas\nfrom reportlab.lib.pagesizes import A4, landscape\nfrom reportlab.lib.utils import ImageReader\nfrom ..models import Certificate, Course, Attendee\nfrom ..forms import CertificateForm\nfrom base.models.project import Project\n\n\nclass CertificateMixin(object):\n \"\"\"Mixin class to provide standard settings for Certificate.\"\"\"\n\n model = Certificate\n form_class = CertificateForm\n\n\nclass CertificateCreateView(\n LoginRequiredMixin, CertificateMixin, CreateView):\n \"\"\"Create view for Certificate.\"\"\"\n\n context_object_name = 'certificate'\n template_name = 'certificate/create.html'\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful creation of the object, the User will be redirected\n to the Course detail page.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n\n return reverse('course-detail', kwargs={\n 'project_slug': self.project_slug,\n 'organisation_slug': self.organisation_slug,\n 'slug': self.course_slug\n })\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n context = super(\n CertificateCreateView, self).get_context_data(**kwargs)\n context['course'] = Course.objects.get(slug=self.course_slug)\n context['attendee'] = Attendee.objects.get(pk=self.pk)\n return context\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype: dict\n \"\"\"\n\n kwargs = super(CertificateCreateView, self).get_form_kwargs()\n self.project_slug = self.kwargs.get('project_slug', None)\n self.organisation_slug = self.kwargs.get('organisation_slug', None)\n self.course_slug = self.kwargs.get('course_slug', None)\n self.pk = self.kwargs.get('pk', None)\n self.course = Course.objects.get(slug=self.course_slug)\n self.attendee = Attendee.objects.get(pk=self.pk)\n kwargs.update({\n 'user': self.request.user,\n 'course': self.course,\n 'attendee': self.attendee,\n })\n return kwargs\n\n\nclass CertificateDetailView(DetailView):\n \"\"\"Detail view for Certificate.\"\"\"\n\n model = Certificate\n context_object_name = 'certificate'\n template_name = 'certificate/detail.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Get the context data which is passed to a template.\n\n :param kwargs: Any arguments to pass to the superclass.\n :type kwargs: dict\n\n :returns: Context data which will be passed to the template.\n :rtype: dict\n \"\"\"\n\n self.certificateID = self.kwargs.get('id', None)\n self.project_slug = self.kwargs.get('project_slug', None)\n context = super(\n CertificateDetailView, self).get_context_data(**kwargs)\n issued_id = \\\n Certificate.objects.all().values_list('certificateID', flat=True)\n if self.certificateID in issued_id:\n context['certificate'] = \\\n Certificate.objects.get(certificateID=self.certificateID)\n context['project_slug'] = self.project_slug\n return context\n\n def get_queryset(self):\n \"\"\"Get the queryset for this view.\n\n :returns: Queryset which is all certificate in the\n corresponding organisation.\n :rtype: QuerySet\n \"\"\"\n\n qs = Certificate.objects.all()\n return qs\n\n def get_object(self, queryset=None):\n \"\"\"Get the object for this view.\n\n :param queryset: A query set\n :type queryset: QuerySet\n\n :returns: Queryset which is filtered to only show a certificate\n depends on the input certificate ID.\n :rtype: QuerySet\n :raises: Http404\n \"\"\"\n\n if queryset is None:\n queryset = self.get_queryset()\n certificateID = self.kwargs.get('id', None)\n if certificateID:\n try:\n obj = queryset.get(certificateID=certificateID)\n return obj\n except Certificate.DoesNotExist:\n return None\n else:\n raise Http404('Sorry! Certificate by this ID is not exist.')\n\n\ndef certificate_pdf_view(request, **kwargs):\n\n project_slug = kwargs.pop('project_slug')\n course_slug = kwargs.pop('course_slug')\n pk = kwargs.pop('pk')\n project = Project.objects.get(slug=project_slug)\n course = Course.objects.get(slug=course_slug)\n attendee = Attendee.objects.get(pk=pk)\n certificate = Certificate.objects.get(course=course, attendee=attendee)\n\n # Create the HttpResponse object with the appropriate PDF headers.\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'filename=\"certificate.pdf\"'\n\n # Create the PDF object, using the response object as its \"file.\"\n page = canvas.Canvas(response, pagesize=landscape(A4))\n width, height = A4\n center = height * 0.5\n\n if project.image_file:\n project_logo = ImageReader(project.image_file)\n else:\n project_logo = None\n\n if course.certifying_organisation.logo:\n organisation_logo = ImageReader(course.certifying_organisation.logo)\n else:\n organisation_logo = None\n\n if project.signature:\n project_owner_signature = ImageReader(project.signature)\n else:\n project_owner_signature = None\n\n if course.course_convener.signature:\n convener_signature = ImageReader(course.course_convener.signature)\n else:\n convener_signature = None\n\n if course.template_certificate:\n background = ImageReader(course.template_certificate)\n else:\n background = None\n\n # Certificate margin.\n margin_right = height - 50\n margin_left = 50\n margin_bottom = 50\n max_left = margin_right - 50\n\n # Draw things on the PDF. Here's where the PDF generation happens.\n # See the ReportLab documentation for the full list of functionality.\n if background is not None:\n page.drawImage(\n background, 0, 0, height=width, width=height,\n preserveAspectRatio=True, mask='auto')\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.setFont('Times-Roman', 18)\n # page.drawString(margin_left, 480, project.name)\n # page.drawRightString(\n # (margin_right), 480, course.certifying_organisation.name)\n\n if project_logo is not None:\n page.drawImage(\n project_logo, 50, 500, width=50, height=50,\n preserveAspectRatio=True, mask='auto')\n\n if organisation_logo is not None:\n page.drawImage(\n organisation_logo, max_left, 500, height=50, width=50,\n preserveAspectRatio=True, anchor='c', mask='auto')\n\n page.setFont('Times-Bold', 26)\n page.drawCentredString(center, 480, 'Certificate of Completion')\n page.drawCentredString(\n center, 400, '%s %s' % (attendee.firstname, attendee.surname))\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 360, 'Has attended and completed the course:')\n page.setFont('Times-Bold', 20)\n page.drawCentredString(center, 300, course.course_type.name)\n page.setFont('Times-Roman', 16)\n page.drawCentredString(\n center, 270,\n 'From %s %s %s to %s %s %s'\n % (course.start_date.day, course.start_date.strftime('%B'),\n course.start_date.year, course.end_date.day,\n course.end_date.strftime('%B'), course.end_date.year))\n page.setFillColorRGB(0.1, 0.1, 0.1)\n page.drawCentredString(\n center, 220, 'Convened by %s %s at %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name, course.training_center))\n\n if project_owner_signature is not None:\n page.drawImage(\n project_owner_signature,\n (margin_left + 100), (margin_bottom + 70), width=100, height=70,\n preserveAspectRatio=True, anchor='s', mask='auto')\n\n if convener_signature is not None:\n page.drawImage(\n convener_signature, (margin_right - 200), (margin_bottom + 70),\n width=100, height=70, preserveAspectRatio=True, anchor='s',\n mask='auto')\n\n page.setFont('Times-Italic', 12)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 60),\n '%s %s' % (project.owner.first_name, project.owner.last_name))\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 60),\n '%s %s' % (\n course.course_convener.user.first_name,\n course.course_convener.user.last_name))\n page.line(\n (margin_left + 70), (margin_bottom + 55),\n (margin_left + 230), (margin_bottom + 55))\n page.line(\n (margin_right - 70), (margin_bottom + 55),\n (margin_right - 230), (margin_bottom + 55))\n page.setFont('Times-Roman', 13)\n page.drawCentredString(\n (margin_left + 150), (margin_bottom + 40), 'Project Owner')\n page.drawCentredString(\n (margin_right - 150), (margin_bottom + 40), 'Convener')\n\n # Footnotes.\n page.setFont('Times-Roman', 14)\n page.drawString(\n margin_left, margin_bottom - 10, 'ID: %s' % certificate.certificateID)\n page.setFont('Times-Roman', 8)\n page.drawString(\n margin_left, (margin_bottom - 20),\n 'You can verify this certificate by visiting /%s/certificate/%s/.'\n % (project.slug, certificate.certificateID))\n\n # Close the PDF object cleanly.\n page.showPage()\n page.save()\n return response\n", "path": "django_project/certification/views/certificate.py"}]} | 3,766 | 264 |
gh_patches_debug_34378 | rasdani/github-patches | git_diff | nipy__nipype-3637 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CSVReader does not handle quoted fields or tsv files
### Summary
I want to add functionality to the CSVReader so it can handle tsv files. While checking that out, I noticed that we're just splitting on `,`, which is not ideal.
### Actual behavior
Parsing CSV files with fields that contain quoted text with ',' characters does not work correctly. TSV files are not supported.
### Expected behavior
Quoted fields should be correctly parsed if they contain the delimiter.
### How to replicate the behavior
Run the script below.
### Script/Workflow details
```
from nipype.interfaces import utility
reader = utility.CSVReader()
lines = ["foo,\"hello,world\",300.1\n"]
with open("testcsv.csv", "w") as fid:
fid.writelines(lines)
fid.flush()
reader.inputs.in_file = "testcsv.csv"
out = reader.run()
assert out.outputs.column_1 == ["hello, world"]
```
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
{'commit_hash': 'c46a957bd',
'commit_source': 'repository',
'networkx_version': '3.2.1',
'nibabel_version': '5.2.1',
'nipype_version': '1.8.7.dev0',
'numpy_version': '1.26.4',
'pkg_path': '/home/USER/Desktop/nipype/nipype',
'scipy_version': '1.12.0',
'sys_executable': '/home/USER/Desktop/nipype/.venv/bin/python',
'sys_platform': 'linux',
'sys_version': '3.9.18 (main, Feb 1 2024, 11:48:51) \n[GCC 11.4.0]',
'traits_version': '6.3.2'}
```
### Execution environment
- My python environment outside container
</issue>
<code>
[start of nipype/interfaces/utility/csv.py]
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 et:
3 """CSV Handling utilities
4 """
5 from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface
6 from ..io import add_traits
7
8
9 class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):
10 in_file = File(
11 exists=True, mandatory=True, desc="Input comma-seperated value (CSV) file"
12 )
13 header = traits.Bool(
14 False, usedefault=True, desc="True if the first line is a column header"
15 )
16
17
18 class CSVReader(BaseInterface):
19 """
20 Examples
21 --------
22
23 >>> reader = CSVReader() # doctest: +SKIP
24 >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP
25 >>> out = reader.run() # doctest: +SKIP
26 >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP
27 True
28 >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP
29 True
30 >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP
31 True
32
33 >>> reader = CSVReader() # doctest: +SKIP
34 >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP
35 >>> reader.inputs.header = True # doctest: +SKIP
36 >>> out = reader.run() # doctest: +SKIP
37 >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP
38 True
39 >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP
40 True
41 >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP
42 True
43
44 """
45
46 input_spec = CSVReaderInputSpec
47 output_spec = DynamicTraitedSpec
48 _always_run = True
49
50 def _append_entry(self, outputs, entry):
51 for key, value in zip(self._outfields, entry):
52 outputs[key].append(value)
53 return outputs
54
55 def _parse_line(self, line):
56 line = line.replace("\n", "")
57 entry = [x.strip() for x in line.split(",")]
58 return entry
59
60 def _get_outfields(self):
61 with open(self.inputs.in_file) as fid:
62 entry = self._parse_line(fid.readline())
63 if self.inputs.header:
64 self._outfields = tuple(entry)
65 else:
66 self._outfields = tuple(["column_" + str(x) for x in range(len(entry))])
67 return self._outfields
68
69 def _run_interface(self, runtime):
70 self._get_outfields()
71 return runtime
72
73 def _outputs(self):
74 return self._add_output_traits(super()._outputs())
75
76 def _add_output_traits(self, base):
77 return add_traits(base, self._get_outfields())
78
79 def _list_outputs(self):
80 outputs = self.output_spec().get()
81 isHeader = True
82 for key in self._outfields:
83 outputs[key] = [] # initialize outfields
84 with open(self.inputs.in_file) as fid:
85 for line in fid.readlines():
86 if self.inputs.header and isHeader: # skip header line
87 isHeader = False
88 continue
89 entry = self._parse_line(line)
90 outputs = self._append_entry(outputs, entry)
91 return outputs
92
[end of nipype/interfaces/utility/csv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py
--- a/nipype/interfaces/utility/csv.py
+++ b/nipype/interfaces/utility/csv.py
@@ -2,6 +2,7 @@
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""CSV Handling utilities
"""
+import csv
from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface
from ..io import add_traits
@@ -13,6 +14,7 @@
header = traits.Bool(
False, usedefault=True, desc="True if the first line is a column header"
)
+ delimiter = traits.String(",", usedefault=True, desc="Delimiter to use.")
class CSVReader(BaseInterface):
@@ -52,14 +54,11 @@
outputs[key].append(value)
return outputs
- def _parse_line(self, line):
- line = line.replace("\n", "")
- entry = [x.strip() for x in line.split(",")]
- return entry
-
def _get_outfields(self):
with open(self.inputs.in_file) as fid:
- entry = self._parse_line(fid.readline())
+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)
+
+ entry = next(reader)
if self.inputs.header:
self._outfields = tuple(entry)
else:
@@ -82,10 +81,10 @@
for key in self._outfields:
outputs[key] = [] # initialize outfields
with open(self.inputs.in_file) as fid:
- for line in fid.readlines():
+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)
+ for entry in reader:
if self.inputs.header and isHeader: # skip header line
isHeader = False
continue
- entry = self._parse_line(line)
outputs = self._append_entry(outputs, entry)
return outputs
| {"golden_diff": "diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py\n--- a/nipype/interfaces/utility/csv.py\n+++ b/nipype/interfaces/utility/csv.py\n@@ -2,6 +2,7 @@\n # vi: set ft=python sts=4 ts=4 sw=4 et:\n \"\"\"CSV Handling utilities\n \"\"\"\n+import csv\n from ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\n from ..io import add_traits\n \n@@ -13,6 +14,7 @@\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n+ delimiter = traits.String(\",\", usedefault=True, desc=\"Delimiter to use.\")\n \n \n class CSVReader(BaseInterface):\n@@ -52,14 +54,11 @@\n outputs[key].append(value)\n return outputs\n \n- def _parse_line(self, line):\n- line = line.replace(\"\\n\", \"\")\n- entry = [x.strip() for x in line.split(\",\")]\n- return entry\n-\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n- entry = self._parse_line(fid.readline())\n+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n+\n+ entry = next(reader)\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n@@ -82,10 +81,10 @@\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n- for line in fid.readlines():\n+ reader = csv.reader(fid, delimiter=self.inputs.delimiter)\n+ for entry in reader:\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n- entry = self._parse_line(line)\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "issue": "CSVReader does not handle quoted fields or tsv files\n### Summary\r\nI want to add functionality to the CSVReader so it can handle tsv files. While checking that out, I noticed that we're just splitting on `,`, which is not ideal.\r\n\r\n### Actual behavior\r\nParsing CSV files with fields that contain quoted text with ',' characters does not work correctly. TSV files are not supported.\r\n\r\n\r\n### Expected behavior\r\nQuoted fields should be correctly parsed if they contain the delimiter.\r\n\r\n### How to replicate the behavior\r\nRun the script below.\r\n\r\n### Script/Workflow details\r\n\r\n```\r\nfrom nipype.interfaces import utility\r\n\r\nreader = utility.CSVReader()\r\n\r\nlines = [\"foo,\\\"hello,world\\\",300.1\\n\"]\r\n\r\nwith open(\"testcsv.csv\", \"w\") as fid:\r\n fid.writelines(lines)\r\n fid.flush()\r\n\r\nreader.inputs.in_file = \"testcsv.csv\"\r\nout = reader.run()\r\n\r\nassert out.outputs.column_1 == [\"hello, world\"]\r\n```\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n{'commit_hash': 'c46a957bd',\r\n 'commit_source': 'repository',\r\n 'networkx_version': '3.2.1',\r\n 'nibabel_version': '5.2.1',\r\n 'nipype_version': '1.8.7.dev0',\r\n 'numpy_version': '1.26.4',\r\n 'pkg_path': '/home/USER/Desktop/nipype/nipype',\r\n 'scipy_version': '1.12.0',\r\n 'sys_executable': '/home/USER/Desktop/nipype/.venv/bin/python',\r\n 'sys_platform': 'linux',\r\n 'sys_version': '3.9.18 (main, Feb 1 2024, 11:48:51) \\n[GCC 11.4.0]',\r\n 'traits_version': '6.3.2'}\r\n\r\n```\r\n\r\n### Execution environment\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"CSV Handling utilities\n\"\"\"\nfrom ..base import traits, TraitedSpec, DynamicTraitedSpec, File, BaseInterface\nfrom ..io import add_traits\n\n\nclass CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec):\n in_file = File(\n exists=True, mandatory=True, desc=\"Input comma-seperated value (CSV) file\"\n )\n header = traits.Bool(\n False, usedefault=True, desc=\"True if the first line is a column header\"\n )\n\n\nclass CSVReader(BaseInterface):\n \"\"\"\n Examples\n --------\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n >>> reader = CSVReader() # doctest: +SKIP\n >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP\n >>> reader.inputs.header = True # doctest: +SKIP\n >>> out = reader.run() # doctest: +SKIP\n >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP\n True\n >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP\n True\n >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP\n True\n\n \"\"\"\n\n input_spec = CSVReaderInputSpec\n output_spec = DynamicTraitedSpec\n _always_run = True\n\n def _append_entry(self, outputs, entry):\n for key, value in zip(self._outfields, entry):\n outputs[key].append(value)\n return outputs\n\n def _parse_line(self, line):\n line = line.replace(\"\\n\", \"\")\n entry = [x.strip() for x in line.split(\",\")]\n return entry\n\n def _get_outfields(self):\n with open(self.inputs.in_file) as fid:\n entry = self._parse_line(fid.readline())\n if self.inputs.header:\n self._outfields = tuple(entry)\n else:\n self._outfields = tuple([\"column_\" + str(x) for x in range(len(entry))])\n return self._outfields\n\n def _run_interface(self, runtime):\n self._get_outfields()\n return runtime\n\n def _outputs(self):\n return self._add_output_traits(super()._outputs())\n\n def _add_output_traits(self, base):\n return add_traits(base, self._get_outfields())\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n isHeader = True\n for key in self._outfields:\n outputs[key] = [] # initialize outfields\n with open(self.inputs.in_file) as fid:\n for line in fid.readlines():\n if self.inputs.header and isHeader: # skip header line\n isHeader = False\n continue\n entry = self._parse_line(line)\n outputs = self._append_entry(outputs, entry)\n return outputs\n", "path": "nipype/interfaces/utility/csv.py"}]} | 1,967 | 428 |
gh_patches_debug_22105 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4902 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Protect against web/api 5xx responses
When a build is attempted, but a web/api instance throws a 5xx response, a number of strange behaviors can result:
- Builds will get stuck in triggered state
- Builds will fail randomly when updating the api fails
- 5xx responses will be returned from the web servers to users
Part of the resolution to this may be defensive protection around intermittent 5xx responses. This may take some operation changes as well though, as our load balancer should really assume a 5xx response is enough to dislodge the server from the lb group.
Raised from #2255
</issue>
<code>
[start of readthedocs/restapi/client.py]
1 # -*- coding: utf-8 -*-
2
3 """Simple client to access our API with Slumber credentials."""
4
5 from __future__ import (
6 absolute_import,
7 division,
8 print_function,
9 unicode_literals,
10 )
11
12 import logging
13
14 import requests
15 from django.conf import settings
16 from requests_toolbelt.adapters import host_header_ssl
17 from rest_framework.renderers import JSONRenderer
18 from slumber import API, serialize
19
20 log = logging.getLogger(__name__)
21
22 PRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org')
23 API_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org')
24 USER = getattr(settings, 'SLUMBER_USERNAME', None)
25 PASS = getattr(settings, 'SLUMBER_PASSWORD', None)
26
27
28 class DrfJsonSerializer(serialize.JsonSerializer):
29
30 """Additional serialization help from the DRF renderer"""
31
32 key = 'json-drf'
33
34 def dumps(self, data):
35 """Used to be able to render datetime objects."""
36 return JSONRenderer().render(data)
37
38
39 def setup_api():
40 session = requests.Session()
41 if API_HOST.startswith('https'):
42 # Only use the HostHeaderSSLAdapter for HTTPS connections
43 adapter_class = host_header_ssl.HostHeaderSSLAdapter
44 else:
45 adapter_class = requests.adapters.HTTPAdapter
46
47 session.mount(
48 API_HOST,
49 adapter_class(max_retries=3),
50 )
51 session.headers.update({'Host': PRODUCTION_DOMAIN})
52 api_config = {
53 'base_url': '%s/api/v2/' % API_HOST,
54 'serializer': serialize.Serializer(
55 default='json-drf',
56 serializers=[
57 serialize.JsonSerializer(),
58 DrfJsonSerializer(),
59 ],
60 ),
61 'session': session,
62 }
63 if USER and PASS:
64 log.debug(
65 'Using slumber v2 with user %s, pointed at %s',
66 USER,
67 API_HOST,
68 )
69 session.auth = (USER, PASS)
70 else:
71 log.warning('SLUMBER_USERNAME/PASSWORD settings are not set')
72 return API(**api_config)
73
74
75 api = setup_api()
76
[end of readthedocs/restapi/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/restapi/client.py b/readthedocs/restapi/client.py
--- a/readthedocs/restapi/client.py
+++ b/readthedocs/restapi/client.py
@@ -13,6 +13,7 @@
import requests
from django.conf import settings
+from requests.packages.urllib3.util.retry import Retry # noqa
from requests_toolbelt.adapters import host_header_ssl
from rest_framework.renderers import JSONRenderer
from slumber import API, serialize
@@ -44,9 +45,21 @@
else:
adapter_class = requests.adapters.HTTPAdapter
+ # Define a retry mechanism trying to attempt to not fail in the first
+ # error. Builders hit this issue frequently because the webs are high loaded
+ retry = Retry(
+ total=3,
+ read=3,
+ connect=3,
+ status=3,
+ backoff_factor=0.5, # 0.5, 1, 2 seconds
+ method_whitelist=('GET', 'PUT', 'PATCH', 'POST'),
+ status_forcelist=(408, 413, 429, 500, 502, 503, 504),
+ )
+
session.mount(
API_HOST,
- adapter_class(max_retries=3),
+ adapter_class(max_retries=retry),
)
session.headers.update({'Host': PRODUCTION_DOMAIN})
api_config = {
| {"golden_diff": "diff --git a/readthedocs/restapi/client.py b/readthedocs/restapi/client.py\n--- a/readthedocs/restapi/client.py\n+++ b/readthedocs/restapi/client.py\n@@ -13,6 +13,7 @@\n \n import requests\n from django.conf import settings\n+from requests.packages.urllib3.util.retry import Retry # noqa\n from requests_toolbelt.adapters import host_header_ssl\n from rest_framework.renderers import JSONRenderer\n from slumber import API, serialize\n@@ -44,9 +45,21 @@\n else:\n adapter_class = requests.adapters.HTTPAdapter\n \n+ # Define a retry mechanism trying to attempt to not fail in the first\n+ # error. Builders hit this issue frequently because the webs are high loaded\n+ retry = Retry(\n+ total=3,\n+ read=3,\n+ connect=3,\n+ status=3,\n+ backoff_factor=0.5, # 0.5, 1, 2 seconds\n+ method_whitelist=('GET', 'PUT', 'PATCH', 'POST'),\n+ status_forcelist=(408, 413, 429, 500, 502, 503, 504),\n+ )\n+\n session.mount(\n API_HOST,\n- adapter_class(max_retries=3),\n+ adapter_class(max_retries=retry),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n", "issue": "Protect against web/api 5xx responses\nWhen a build is attempted, but a web/api instance throws a 5xx response, a number of strange behaviors can result:\n- Builds will get stuck in triggered state\n- Builds will fail randomly when updating the api fails\n- 5xx responses will be returned from the web servers to users\n\nPart of the resolution to this may be defensive protection around intermittent 5xx responses. This may take some operation changes as well though, as our load balancer should really assume a 5xx response is enough to dislodge the server from the lb group.\n\nRaised from #2255 \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Simple client to access our API with Slumber credentials.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport logging\n\nimport requests\nfrom django.conf import settings\nfrom requests_toolbelt.adapters import host_header_ssl\nfrom rest_framework.renderers import JSONRenderer\nfrom slumber import API, serialize\n\nlog = logging.getLogger(__name__)\n\nPRODUCTION_DOMAIN = getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org')\nAPI_HOST = getattr(settings, 'SLUMBER_API_HOST', 'https://readthedocs.org')\nUSER = getattr(settings, 'SLUMBER_USERNAME', None)\nPASS = getattr(settings, 'SLUMBER_PASSWORD', None)\n\n\nclass DrfJsonSerializer(serialize.JsonSerializer):\n\n \"\"\"Additional serialization help from the DRF renderer\"\"\"\n\n key = 'json-drf'\n\n def dumps(self, data):\n \"\"\"Used to be able to render datetime objects.\"\"\"\n return JSONRenderer().render(data)\n\n\ndef setup_api():\n session = requests.Session()\n if API_HOST.startswith('https'):\n # Only use the HostHeaderSSLAdapter for HTTPS connections\n adapter_class = host_header_ssl.HostHeaderSSLAdapter\n else:\n adapter_class = requests.adapters.HTTPAdapter\n\n session.mount(\n API_HOST,\n adapter_class(max_retries=3),\n )\n session.headers.update({'Host': PRODUCTION_DOMAIN})\n api_config = {\n 'base_url': '%s/api/v2/' % API_HOST,\n 'serializer': serialize.Serializer(\n default='json-drf',\n serializers=[\n serialize.JsonSerializer(),\n DrfJsonSerializer(),\n ],\n ),\n 'session': session,\n }\n if USER and PASS:\n log.debug(\n 'Using slumber v2 with user %s, pointed at %s',\n USER,\n API_HOST,\n )\n session.auth = (USER, PASS)\n else:\n log.warning('SLUMBER_USERNAME/PASSWORD settings are not set')\n return API(**api_config)\n\n\napi = setup_api()\n", "path": "readthedocs/restapi/client.py"}]} | 1,257 | 326 |
gh_patches_debug_5993 | rasdani/github-patches | git_diff | common-workflow-language__cwltool-1504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when building subgraph with --target
Hello,
I am trying to use `--target` option with this very simple workflow
```
cwltool --target step2 --debug --print-subgraph wf.cwl > subgraph.cwl
```
However I get an error `TypeError: unhashable type: 'CommentedMap'`
Thanks!
---
## Expected Behavior
**subgraph.cwl**
```
{
"class": "Workflow",
"cwlVersion": "v1.2",
"id": "file:///home/wf.cwl",
"inputs": [
{
"id": "file:///home/wf.cwl#step2_in",
"type": "string"
}
],
"outputs": [
{
"id": "file:///home/wf.cwl#step2_out",
"outputSource": "file:///home/wf.cwl#step2/output",
"type": "File"
}
],
"steps": [
{
"id": "file:///home/wf.cwl#step2",
"in": [
{
"id": "file:///home/wf.cwl#step2/in",
"source": "file:///home/wf.cwl#step2_in"
}
],
"out": [
{
"id": "file:///home/wf.cwl#step2/output"
}
],
"run": "file:///home/echo.cwl"
}
]
}
```
## Actual Behavior
Fails with the error as seen in [Full Traceback](#traceback)
## Workflow Code
**wf.cwl**
```
class: Workflow
cwlVersion: v1.2
inputs:
- id: step1_in
type: string
- id: step2_in
type: string
outputs:
- id: step1_out
type: File
outputSource: step1/output
- id: step2_out
type: File
outputSource: step2/output
steps:
- id: step1
in:
- id: in
source: step1_in
out:
- id: output
run: echo.cwl
- id: step2
in:
- id: in
source: step2_in
out:
- id: output
run: echo.cwl
```
**echo.cwl**
```
cwlVersion: v1.2
class: CommandLineTool
baseCommand: echo
inputs:
- id: in
type: string
inputBinding: {position: 1}
outputs:
- id: output
type: stdout
stdout: output_echo.txt
```
**wf.yml**
```
step1_in: step1
step2_in: step2
```
## Full Traceback
<a name="traceback"/>
```
Traceback (most recent call last):
File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py", line 1020, in main
ctool = choose_target(args, tool, loadingContext)
File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py", line 768, in choose_target
extracted = get_subgraph(
File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py", line 83, in get_subgraph
declare_node(nodes, out, None)
File "/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py", line 41, in declare_node
if nodeid in nodes:
TypeError: unhashable type: 'CommentedMap'
```
When I am printing out the `nodeid` that is passed to `declare_node` function I see `CommentedMap` instead of `string`:
```
ordereddict([('id', 'file:///home/wf.cwl#step1/output')])
```
After I changed this lines 96-97 in [subgraph.py](https://github.com/common-workflow-language/cwltool/blob/main/cwltool/subgraph.py)
```
#declare_node(nodes, out, None)
declare_node(nodes, out["id"], None)
#nodes[out].up.append(st["id"])
nodes[out["id"]].up.append(st["id"])
```
I got the `subgraph.cwl` as shown above
## Your Environment
* cwltool version: 3.0.20201203173111
</issue>
<code>
[start of cwltool/subgraph.py]
1 import urllib
2 from collections import namedtuple
3 from typing import (
4 Any,
5 Dict,
6 List,
7 Mapping,
8 MutableMapping,
9 MutableSequence,
10 Optional,
11 Set,
12 Tuple,
13 cast,
14 )
15
16 from ruamel.yaml.comments import CommentedMap
17
18 from .utils import CWLObjectType, aslist
19 from .workflow import Workflow, WorkflowStep
20
21 Node = namedtuple("Node", ("up", "down", "type"))
22 UP = "up"
23 DOWN = "down"
24 INPUT = "input"
25 OUTPUT = "output"
26 STEP = "step"
27
28
29 def subgraph_visit(
30 current: str,
31 nodes: MutableMapping[str, Node],
32 visited: Set[str],
33 direction: str,
34 ) -> None:
35
36 if current in visited:
37 return
38 visited.add(current)
39
40 if direction == DOWN:
41 d = nodes[current].down
42 if direction == UP:
43 d = nodes[current].up
44 for c in d:
45 subgraph_visit(c, nodes, visited, direction)
46
47
48 def declare_node(nodes: Dict[str, Node], nodeid: str, tp: Optional[str]) -> Node:
49 if nodeid in nodes:
50 n = nodes[nodeid]
51 if n.type is None:
52 nodes[nodeid] = Node(n.up, n.down, tp)
53 else:
54 nodes[nodeid] = Node([], [], tp)
55 return nodes[nodeid]
56
57
58 def find_step(steps: List[WorkflowStep], stepid: str) -> Optional[CWLObjectType]:
59 for st in steps:
60 if st.tool["id"] == stepid:
61 return st.tool
62 return None
63
64
65 def get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:
66 if tool.tool["class"] != "Workflow":
67 raise Exception("Can only extract subgraph from workflow")
68
69 nodes: Dict[str, Node] = {}
70
71 for inp in tool.tool["inputs"]:
72 declare_node(nodes, inp["id"], INPUT)
73
74 for out in tool.tool["outputs"]:
75 declare_node(nodes, out["id"], OUTPUT)
76 for i in aslist(out.get("outputSource", [])):
77 # source is upstream from output (dependency)
78 nodes[out["id"]].up.append(i)
79 # output is downstream from source
80 declare_node(nodes, i, None)
81 nodes[i].down.append(out["id"])
82
83 for st in tool.tool["steps"]:
84 step = declare_node(nodes, st["id"], STEP)
85 for i in st["in"]:
86 if "source" not in i:
87 continue
88 for src in aslist(i["source"]):
89 # source is upstream from step (dependency)
90 step.up.append(src)
91 # step is downstream from source
92 declare_node(nodes, src, None)
93 nodes[src].down.append(st["id"])
94 for out in st["out"]:
95 # output is downstream from step
96 step.down.append(out)
97 # step is upstream from output
98 declare_node(nodes, out, None)
99 nodes[out].up.append(st["id"])
100
101 # Find all the downstream nodes from the starting points
102 visited_down: Set[str] = set()
103 for r in roots:
104 if nodes[r].type == OUTPUT:
105 subgraph_visit(r, nodes, visited_down, UP)
106 else:
107 subgraph_visit(r, nodes, visited_down, DOWN)
108
109 # Now make sure all the nodes are connected to upstream inputs
110 visited: Set[str] = set()
111 rewire: Dict[str, Tuple[str, CWLObjectType]] = {}
112 for v in visited_down:
113 visited.add(v)
114 if nodes[v].type in (STEP, OUTPUT):
115 for u in nodes[v].up:
116 if u in visited_down:
117 continue
118 if nodes[u].type == INPUT:
119 visited.add(u)
120 else:
121 # rewire
122 df = urllib.parse.urldefrag(u)
123 rn = str(df[0] + "#" + df[1].replace("/", "_"))
124 if nodes[v].type == STEP:
125 wfstep = find_step(tool.steps, v)
126 if wfstep is not None:
127 for inp in cast(
128 MutableSequence[CWLObjectType], wfstep["inputs"]
129 ):
130 if "source" in inp and u in cast(
131 CWLObjectType, inp["source"]
132 ):
133 rewire[u] = (rn, cast(CWLObjectType, inp["type"]))
134 break
135 else:
136 raise Exception("Could not find step %s" % v)
137
138 extracted = CommentedMap()
139 for f in tool.tool:
140 if f in ("steps", "inputs", "outputs"):
141 extracted[f] = []
142 for i in tool.tool[f]:
143 if i["id"] in visited:
144 if f == "steps":
145 for inport in i["in"]:
146 if "source" not in inport:
147 continue
148 if isinstance(inport["source"], MutableSequence):
149 inport["source"] = [
150 rewire[s][0]
151 for s in inport["source"]
152 if s in rewire
153 ]
154 elif inport["source"] in rewire:
155 inport["source"] = rewire[inport["source"]][0]
156 extracted[f].append(i)
157 else:
158 extracted[f] = tool.tool[f]
159
160 for rv in rewire.values():
161 extracted["inputs"].append({"id": rv[0], "type": rv[1]})
162
163 return extracted
164
165
166 def get_step(tool: Workflow, step_id: str) -> CommentedMap:
167
168 extracted = CommentedMap()
169
170 step = find_step(tool.steps, step_id)
171 if step is None:
172 raise Exception(f"Step {step_id} was not found")
173
174 extracted["steps"] = [step]
175 extracted["inputs"] = []
176 extracted["outputs"] = []
177
178 for inport in cast(List[CWLObjectType], step["in"]):
179 name = cast(str, inport["id"]).split("#")[-1].split("/")[-1]
180 extracted["inputs"].append({"id": name, "type": "Any"})
181 inport["source"] = name
182 if "linkMerge" in inport:
183 del inport["linkMerge"]
184
185 for outport in cast(List[str], step["out"]):
186 name = outport.split("#")[-1].split("/")[-1]
187 extracted["outputs"].append(
188 {"id": name, "type": "Any", "outputSource": f"{step_id}/{name}"}
189 )
190
191 for f in tool.tool:
192 if f not in ("steps", "inputs", "outputs"):
193 extracted[f] = tool.tool[f]
194
195 return extracted
196
197
198 def get_process(tool: Workflow, step_id: str, index: Mapping[str, Any]) -> Any:
199 """Return just a single Process from a Workflow step."""
200 step = find_step(tool.steps, step_id)
201 if step is None:
202 raise Exception(f"Step {step_id} was not found")
203
204 run = step["run"]
205
206 if isinstance(run, str):
207 return index[run]
208 else:
209 return run
210
[end of cwltool/subgraph.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py
--- a/cwltool/subgraph.py
+++ b/cwltool/subgraph.py
@@ -92,6 +92,8 @@
declare_node(nodes, src, None)
nodes[src].down.append(st["id"])
for out in st["out"]:
+ if isinstance(out, Mapping) and "id" in out:
+ out = out["id"]
# output is downstream from step
step.down.append(out)
# step is upstream from output
| {"golden_diff": "diff --git a/cwltool/subgraph.py b/cwltool/subgraph.py\n--- a/cwltool/subgraph.py\n+++ b/cwltool/subgraph.py\n@@ -92,6 +92,8 @@\n declare_node(nodes, src, None)\n nodes[src].down.append(st[\"id\"])\n for out in st[\"out\"]:\n+ if isinstance(out, Mapping) and \"id\" in out:\n+ out = out[\"id\"]\n # output is downstream from step\n step.down.append(out)\n # step is upstream from output\n", "issue": "Error when building subgraph with --target\nHello,\r\n\r\nI am trying to use `--target` option with this very simple workflow\r\n\r\n```\r\ncwltool --target step2 --debug --print-subgraph wf.cwl > subgraph.cwl\r\n```\r\n\r\nHowever I get an error `TypeError: unhashable type: 'CommentedMap'`\r\n\r\nThanks!\r\n---\r\n\r\n## Expected Behavior\r\n**subgraph.cwl**\r\n```\r\n{\r\n \"class\": \"Workflow\",\r\n \"cwlVersion\": \"v1.2\",\r\n \"id\": \"file:///home/wf.cwl\",\r\n \"inputs\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2_in\",\r\n \"type\": \"string\"\r\n }\r\n ],\r\n \"outputs\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2_out\",\r\n \"outputSource\": \"file:///home/wf.cwl#step2/output\",\r\n \"type\": \"File\"\r\n }\r\n ],\r\n \"steps\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2\",\r\n \"in\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2/in\",\r\n \"source\": \"file:///home/wf.cwl#step2_in\"\r\n }\r\n ],\r\n \"out\": [\r\n {\r\n \"id\": \"file:///home/wf.cwl#step2/output\"\r\n }\r\n ],\r\n \"run\": \"file:///home/echo.cwl\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\n## Actual Behavior\r\nFails with the error as seen in [Full Traceback](#traceback)\r\n\r\n## Workflow Code\r\n**wf.cwl**\r\n```\r\nclass: Workflow\r\ncwlVersion: v1.2\r\ninputs:\r\n - id: step1_in\r\n type: string\r\n - id: step2_in\r\n type: string\r\noutputs:\r\n - id: step1_out\r\n type: File\r\n outputSource: step1/output\r\n - id: step2_out\r\n type: File\r\n outputSource: step2/output\r\nsteps:\r\n - id: step1\r\n in:\r\n - id: in\r\n source: step1_in\r\n out:\r\n - id: output\r\n run: echo.cwl\r\n - id: step2\r\n in:\r\n - id: in\r\n source: step2_in\r\n out:\r\n - id: output\r\n run: echo.cwl\r\n```\r\n**echo.cwl**\r\n```\r\ncwlVersion: v1.2\r\nclass: CommandLineTool\r\nbaseCommand: echo\r\ninputs:\r\n - id: in\r\n type: string\r\n inputBinding: {position: 1}\r\noutputs:\r\n - id: output\r\n type: stdout\r\nstdout: output_echo.txt\r\n```\r\n**wf.yml**\r\n```\r\nstep1_in: step1\r\nstep2_in: step2\r\n```\r\n\r\n## Full Traceback\r\n\r\n<a name=\"traceback\"/>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py\", line 1020, in main\r\n ctool = choose_target(args, tool, loadingContext)\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/main.py\", line 768, in choose_target\r\n extracted = get_subgraph(\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py\", line 83, in get_subgraph\r\n declare_node(nodes, out, None)\r\n File \"/home/miniconda/envs/cwl_env/lib/python3.8/site-packages/cwltool/subgraph.py\", line 41, in declare_node\r\n if nodeid in nodes:\r\nTypeError: unhashable type: 'CommentedMap' \r\n```\r\nWhen I am printing out the `nodeid` that is passed to `declare_node` function I see `CommentedMap` instead of `string`:\r\n```\r\nordereddict([('id', 'file:///home/wf.cwl#step1/output')])\r\n```\r\nAfter I changed this lines 96-97 in [subgraph.py](https://github.com/common-workflow-language/cwltool/blob/main/cwltool/subgraph.py)\r\n```\r\n#declare_node(nodes, out, None)\r\ndeclare_node(nodes, out[\"id\"], None)\r\n#nodes[out].up.append(st[\"id\"])\r\nnodes[out[\"id\"]].up.append(st[\"id\"])\r\n```\r\nI got the `subgraph.cwl` as shown above\r\n\r\n## Your Environment\r\n* cwltool version: 3.0.20201203173111\r\n\r\n\n", "before_files": [{"content": "import urllib\nfrom collections import namedtuple\nfrom typing import (\n Any,\n Dict,\n List,\n Mapping,\n MutableMapping,\n MutableSequence,\n Optional,\n Set,\n Tuple,\n cast,\n)\n\nfrom ruamel.yaml.comments import CommentedMap\n\nfrom .utils import CWLObjectType, aslist\nfrom .workflow import Workflow, WorkflowStep\n\nNode = namedtuple(\"Node\", (\"up\", \"down\", \"type\"))\nUP = \"up\"\nDOWN = \"down\"\nINPUT = \"input\"\nOUTPUT = \"output\"\nSTEP = \"step\"\n\n\ndef subgraph_visit(\n current: str,\n nodes: MutableMapping[str, Node],\n visited: Set[str],\n direction: str,\n) -> None:\n\n if current in visited:\n return\n visited.add(current)\n\n if direction == DOWN:\n d = nodes[current].down\n if direction == UP:\n d = nodes[current].up\n for c in d:\n subgraph_visit(c, nodes, visited, direction)\n\n\ndef declare_node(nodes: Dict[str, Node], nodeid: str, tp: Optional[str]) -> Node:\n if nodeid in nodes:\n n = nodes[nodeid]\n if n.type is None:\n nodes[nodeid] = Node(n.up, n.down, tp)\n else:\n nodes[nodeid] = Node([], [], tp)\n return nodes[nodeid]\n\n\ndef find_step(steps: List[WorkflowStep], stepid: str) -> Optional[CWLObjectType]:\n for st in steps:\n if st.tool[\"id\"] == stepid:\n return st.tool\n return None\n\n\ndef get_subgraph(roots: MutableSequence[str], tool: Workflow) -> CommentedMap:\n if tool.tool[\"class\"] != \"Workflow\":\n raise Exception(\"Can only extract subgraph from workflow\")\n\n nodes: Dict[str, Node] = {}\n\n for inp in tool.tool[\"inputs\"]:\n declare_node(nodes, inp[\"id\"], INPUT)\n\n for out in tool.tool[\"outputs\"]:\n declare_node(nodes, out[\"id\"], OUTPUT)\n for i in aslist(out.get(\"outputSource\", [])):\n # source is upstream from output (dependency)\n nodes[out[\"id\"]].up.append(i)\n # output is downstream from source\n declare_node(nodes, i, None)\n nodes[i].down.append(out[\"id\"])\n\n for st in tool.tool[\"steps\"]:\n step = declare_node(nodes, st[\"id\"], STEP)\n for i in st[\"in\"]:\n if \"source\" not in i:\n continue\n for src in aslist(i[\"source\"]):\n # source is upstream from step (dependency)\n step.up.append(src)\n # step is downstream from source\n declare_node(nodes, src, None)\n nodes[src].down.append(st[\"id\"])\n for out in st[\"out\"]:\n # output is downstream from step\n step.down.append(out)\n # step is upstream from output\n declare_node(nodes, out, None)\n nodes[out].up.append(st[\"id\"])\n\n # Find all the downstream nodes from the starting points\n visited_down: Set[str] = set()\n for r in roots:\n if nodes[r].type == OUTPUT:\n subgraph_visit(r, nodes, visited_down, UP)\n else:\n subgraph_visit(r, nodes, visited_down, DOWN)\n\n # Now make sure all the nodes are connected to upstream inputs\n visited: Set[str] = set()\n rewire: Dict[str, Tuple[str, CWLObjectType]] = {}\n for v in visited_down:\n visited.add(v)\n if nodes[v].type in (STEP, OUTPUT):\n for u in nodes[v].up:\n if u in visited_down:\n continue\n if nodes[u].type == INPUT:\n visited.add(u)\n else:\n # rewire\n df = urllib.parse.urldefrag(u)\n rn = str(df[0] + \"#\" + df[1].replace(\"/\", \"_\"))\n if nodes[v].type == STEP:\n wfstep = find_step(tool.steps, v)\n if wfstep is not None:\n for inp in cast(\n MutableSequence[CWLObjectType], wfstep[\"inputs\"]\n ):\n if \"source\" in inp and u in cast(\n CWLObjectType, inp[\"source\"]\n ):\n rewire[u] = (rn, cast(CWLObjectType, inp[\"type\"]))\n break\n else:\n raise Exception(\"Could not find step %s\" % v)\n\n extracted = CommentedMap()\n for f in tool.tool:\n if f in (\"steps\", \"inputs\", \"outputs\"):\n extracted[f] = []\n for i in tool.tool[f]:\n if i[\"id\"] in visited:\n if f == \"steps\":\n for inport in i[\"in\"]:\n if \"source\" not in inport:\n continue\n if isinstance(inport[\"source\"], MutableSequence):\n inport[\"source\"] = [\n rewire[s][0]\n for s in inport[\"source\"]\n if s in rewire\n ]\n elif inport[\"source\"] in rewire:\n inport[\"source\"] = rewire[inport[\"source\"]][0]\n extracted[f].append(i)\n else:\n extracted[f] = tool.tool[f]\n\n for rv in rewire.values():\n extracted[\"inputs\"].append({\"id\": rv[0], \"type\": rv[1]})\n\n return extracted\n\n\ndef get_step(tool: Workflow, step_id: str) -> CommentedMap:\n\n extracted = CommentedMap()\n\n step = find_step(tool.steps, step_id)\n if step is None:\n raise Exception(f\"Step {step_id} was not found\")\n\n extracted[\"steps\"] = [step]\n extracted[\"inputs\"] = []\n extracted[\"outputs\"] = []\n\n for inport in cast(List[CWLObjectType], step[\"in\"]):\n name = cast(str, inport[\"id\"]).split(\"#\")[-1].split(\"/\")[-1]\n extracted[\"inputs\"].append({\"id\": name, \"type\": \"Any\"})\n inport[\"source\"] = name\n if \"linkMerge\" in inport:\n del inport[\"linkMerge\"]\n\n for outport in cast(List[str], step[\"out\"]):\n name = outport.split(\"#\")[-1].split(\"/\")[-1]\n extracted[\"outputs\"].append(\n {\"id\": name, \"type\": \"Any\", \"outputSource\": f\"{step_id}/{name}\"}\n )\n\n for f in tool.tool:\n if f not in (\"steps\", \"inputs\", \"outputs\"):\n extracted[f] = tool.tool[f]\n\n return extracted\n\n\ndef get_process(tool: Workflow, step_id: str, index: Mapping[str, Any]) -> Any:\n \"\"\"Return just a single Process from a Workflow step.\"\"\"\n step = find_step(tool.steps, step_id)\n if step is None:\n raise Exception(f\"Step {step_id} was not found\")\n\n run = step[\"run\"]\n\n if isinstance(run, str):\n return index[run]\n else:\n return run\n", "path": "cwltool/subgraph.py"}]} | 3,603 | 120 |
gh_patches_debug_6546 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2645 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider texas_roadhouse is broken
During the global build at 2021-08-11-14-42-19, spider **texas_roadhouse** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/texas_roadhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson))
</issue>
<code>
[start of locations/spiders/texas_roadhouse.py]
1 import json
2 import scrapy
3 from locations.items import GeojsonPointItem
4 from locations.hours import OpeningHours
5
6
7 class TexasRoadhouseSpider(scrapy.Spider):
8 name = "texas_roadhouse"
9 item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" }
10 allowed_domains = ["www.texasroadhouse.com"]
11 start_urls = (
12 'https://www.texasroadhouse.com/sitemap.xml',
13 )
14
15 def parse_hours(self, store_hours):
16 opening_hours = OpeningHours()
17
18 for weekday in store_hours:
19 # convert day from full Monday to Mo, etc
20 day = weekday.get('day')[:2]
21 open_time = weekday.get('hours').get('openTime')
22 close_time = weekday.get('hours').get('closeTime')
23 opening_hours.add_range(day=day,
24 open_time=open_time,
25 close_time=close_time,
26 time_format='%I:%M%p')
27
28 return opening_hours.as_opening_hours()
29
30 def parse(self, response):
31 response.selector.remove_namespaces()
32 city_urls = response.xpath('//url/loc/text()').extract()
33 for path in city_urls:
34 if path.startswith('https://www.texasroadhouse.com/locations/'):
35 yield scrapy.Request(
36 path.strip(),
37 callback=self.parse_store,
38 )
39
40 def parse_store(self, response):
41 data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])
42
43 properties = {
44 'lat': data['latitude'],
45 'lon': data['longitude'],
46 'ref': data['url'],
47 'name': data['name'],
48 'addr_full': data['address1'],
49 'city': data['city'],
50 'state': data['state'],
51 'postcode': data['postalCode'],
52 'country': data['countryCode'],
53 'phone': data['telephone'],
54 'website': response.urljoin(data['url']),
55 'opening_hours': self.parse_hours(data['schedule']),
56 }
57
58 yield GeojsonPointItem(**properties)
59
[end of locations/spiders/texas_roadhouse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py
--- a/locations/spiders/texas_roadhouse.py
+++ b/locations/spiders/texas_roadhouse.py
@@ -6,7 +6,10 @@
class TexasRoadhouseSpider(scrapy.Spider):
name = "texas_roadhouse"
- item_attributes = { 'brand': "Texas Roadhouse", 'brand_wikidata': "Q7707945" }
+ item_attributes = {
+ 'brand': "Texas Roadhouse",
+ 'brand_wikidata': "Q7707945",
+ }
allowed_domains = ["www.texasroadhouse.com"]
start_urls = (
'https://www.texasroadhouse.com/sitemap.xml',
| {"golden_diff": "diff --git a/locations/spiders/texas_roadhouse.py b/locations/spiders/texas_roadhouse.py\n--- a/locations/spiders/texas_roadhouse.py\n+++ b/locations/spiders/texas_roadhouse.py\n@@ -6,7 +6,10 @@\n \n class TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n- item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n+ item_attributes = {\n+ 'brand': \"Texas Roadhouse\",\n+ 'brand_wikidata': \"Q7707945\",\n+ }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n", "issue": "Spider texas_roadhouse is broken\nDuring the global build at 2021-08-11-14-42-19, spider **texas_roadhouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/logs/texas_roadhouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-11-14-42-19/output/texas_roadhouse.geojson))\n", "before_files": [{"content": "import json\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass TexasRoadhouseSpider(scrapy.Spider):\n name = \"texas_roadhouse\"\n item_attributes = { 'brand': \"Texas Roadhouse\", 'brand_wikidata': \"Q7707945\" }\n allowed_domains = [\"www.texasroadhouse.com\"]\n start_urls = (\n 'https://www.texasroadhouse.com/sitemap.xml',\n )\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n # convert day from full Monday to Mo, etc\n day = weekday.get('day')[:2]\n open_time = weekday.get('hours').get('openTime')\n close_time = weekday.get('hours').get('closeTime')\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%I:%M%p')\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path.startswith('https://www.texasroadhouse.com/locations/'):\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script/text()').extract_first()[22:-1])\n\n properties = {\n 'lat': data['latitude'],\n 'lon': data['longitude'],\n 'ref': data['url'],\n 'name': data['name'],\n 'addr_full': data['address1'],\n 'city': data['city'],\n 'state': data['state'],\n 'postcode': data['postalCode'],\n 'country': data['countryCode'],\n 'phone': data['telephone'],\n 'website': response.urljoin(data['url']),\n 'opening_hours': self.parse_hours(data['schedule']),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/texas_roadhouse.py"}]} | 1,294 | 183 |
gh_patches_debug_4587 | rasdani/github-patches | git_diff | ipython__ipython-11722 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing ship numpy testing decorator
```
Traceback (most recent call last):
File "/Users/mbussonnier/dev/cpython/test/bin/iptest", line 6, in <module>
from IPython.testing.iptestcontroller import main
File "/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptestcontroller.py", line 23, in <module>
from .iptest import (
File "/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptest.py", line 40, in <module>
from IPython.external.decorators import KnownFailure, dec
ImportError: cannot import name 'dec' from 'IPython.external.decorators' (/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/external/decorators/__init__.py)
```
Seem like `dec` is not define ini out `_decorator.py`
Appologies for shortness boarding a plane
</issue>
<code>
[start of IPython/external/decorators/__init__.py]
1 try:
2 from numpy.testing import *
3 from numpy.testing import dec
4 from numpy.testing.noseclasses import KnownFailure
5 except ImportError:
6 from ._decorators import *
7 try:
8 from ._numpy_testing_noseclasses import KnownFailure
9 except ImportError:
10 pass
11
[end of IPython/external/decorators/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/external/decorators/__init__.py b/IPython/external/decorators/__init__.py
--- a/IPython/external/decorators/__init__.py
+++ b/IPython/external/decorators/__init__.py
@@ -1,9 +1,7 @@
try:
- from numpy.testing import *
- from numpy.testing import dec
- from numpy.testing.noseclasses import KnownFailure
+ from numpy.testing.noseclasses import KnownFailure, knownfailureif
except ImportError:
- from ._decorators import *
+ from ._decorators import knownfailureif
try:
from ._numpy_testing_noseclasses import KnownFailure
except ImportError:
| {"golden_diff": "diff --git a/IPython/external/decorators/__init__.py b/IPython/external/decorators/__init__.py\n--- a/IPython/external/decorators/__init__.py\n+++ b/IPython/external/decorators/__init__.py\n@@ -1,9 +1,7 @@\n try:\n- from numpy.testing import *\n- from numpy.testing import dec\n- from numpy.testing.noseclasses import KnownFailure\n+ from numpy.testing.noseclasses import KnownFailure, knownfailureif\n except ImportError:\n- from ._decorators import *\n+ from ._decorators import knownfailureif\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n", "issue": "Missing ship numpy testing decorator\n```\r\nTraceback (most recent call last):\r\n File \"/Users/mbussonnier/dev/cpython/test/bin/iptest\", line 6, in <module>\r\n from IPython.testing.iptestcontroller import main\r\n File \"/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptestcontroller.py\", line 23, in <module>\r\n from .iptest import (\r\n File \"/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/testing/iptest.py\", line 40, in <module>\r\n from IPython.external.decorators import KnownFailure, dec\r\nImportError: cannot import name 'dec' from 'IPython.external.decorators' (/Users/mbussonnier/dev/cpython/test/lib/python3.8/site-packages/IPython/external/decorators/__init__.py)\r\n```\r\n\r\nSeem like `dec` is not define ini out `_decorator.py` \r\n\r\n\r\nAppologies for shortness boarding a plane\n", "before_files": [{"content": "try:\n from numpy.testing import *\n from numpy.testing import dec\n from numpy.testing.noseclasses import KnownFailure\nexcept ImportError:\n from ._decorators import *\n try:\n from ._numpy_testing_noseclasses import KnownFailure\n except ImportError:\n pass\n", "path": "IPython/external/decorators/__init__.py"}]} | 839 | 150 |
gh_patches_debug_17485 | rasdani/github-patches | git_diff | kymatio__kymatio-183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[MAINT] AttributeError: module 'kymatio' has no attribute '__version__'
This came up in #172
PEP 396 has introduced the convention of exposing the constant `__version__` to the top-level module
At the moment this is in `kymatio.version.version` (so that `version.py` can be loaded in `setup.py` without importing the whole `kymatio` module)
https://www.python.org/dev/peps/pep-0396/
Will submit a PR later
</issue>
<code>
[start of kymatio/__init__.py]
1 __all__ = [
2 'Scattering3D',
3 'Scattering2D',
4 'Scattering1D'
5 ]
6
7 from .scattering2d.scattering2d import Scattering2D
8 from .scattering1d.scattering1d import Scattering1D
9 from .scattering3d.scattering3d import Scattering3D
10
11 # Make sure that DeprecationWarning within this package always gets printed
12 ### Snippet copied from sklearn.__init__
13 import warnings
14 import re
15 warnings.filterwarnings('always', category=DeprecationWarning,
16 module=r'^{0}.*'.format(re.escape(__name__)))
17 ### End Snippet
18
19
20
[end of kymatio/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kymatio/__init__.py b/kymatio/__init__.py
--- a/kymatio/__init__.py
+++ b/kymatio/__init__.py
@@ -1,13 +1,3 @@
-__all__ = [
- 'Scattering3D',
- 'Scattering2D',
- 'Scattering1D'
- ]
-
-from .scattering2d.scattering2d import Scattering2D
-from .scattering1d.scattering1d import Scattering1D
-from .scattering3d.scattering3d import Scattering3D
-
# Make sure that DeprecationWarning within this package always gets printed
### Snippet copied from sklearn.__init__
import warnings
@@ -16,4 +6,14 @@
module=r'^{0}.*'.format(re.escape(__name__)))
### End Snippet
+__all__ = [
+ 'Scattering1D',
+ 'Scattering2D',
+ 'Scattering3D'
+ ]
+
+from .scattering1d.scattering1d import Scattering1D
+from .scattering2d.scattering2d import Scattering2D
+from .scattering3d.scattering3d import Scattering3D
+from .version import version as __version__
| {"golden_diff": "diff --git a/kymatio/__init__.py b/kymatio/__init__.py\n--- a/kymatio/__init__.py\n+++ b/kymatio/__init__.py\n@@ -1,13 +1,3 @@\n-__all__ = [\n- 'Scattering3D',\n- 'Scattering2D',\n- 'Scattering1D'\n- ]\n-\n-from .scattering2d.scattering2d import Scattering2D\n-from .scattering1d.scattering1d import Scattering1D\n-from .scattering3d.scattering3d import Scattering3D\n-\n # Make sure that DeprecationWarning within this package always gets printed\n ### Snippet copied from sklearn.__init__\n import warnings\n@@ -16,4 +6,14 @@\n module=r'^{0}.*'.format(re.escape(__name__)))\n ### End Snippet\n \n+__all__ = [\n+ 'Scattering1D',\n+ 'Scattering2D',\n+ 'Scattering3D'\n+ ]\n+\n+from .scattering1d.scattering1d import Scattering1D\n+from .scattering2d.scattering2d import Scattering2D\n+from .scattering3d.scattering3d import Scattering3D\n \n+from .version import version as __version__\n", "issue": "[MAINT] AttributeError: module 'kymatio' has no attribute '__version__'\nThis came up in #172\r\n\r\nPEP 396 has introduced the convention of exposing the constant `__version__` to the top-level module\r\nAt the moment this is in `kymatio.version.version` (so that `version.py` can be loaded in `setup.py` without importing the whole `kymatio` module)\r\nhttps://www.python.org/dev/peps/pep-0396/\r\n\r\nWill submit a PR later\r\n\n", "before_files": [{"content": "__all__ = [\n 'Scattering3D',\n 'Scattering2D',\n 'Scattering1D'\n ]\n\nfrom .scattering2d.scattering2d import Scattering2D\nfrom .scattering1d.scattering1d import Scattering1D\nfrom .scattering3d.scattering3d import Scattering3D\n\n# Make sure that DeprecationWarning within this package always gets printed\n### Snippet copied from sklearn.__init__\nimport warnings\nimport re\nwarnings.filterwarnings('always', category=DeprecationWarning,\n module=r'^{0}.*'.format(re.escape(__name__)))\n### End Snippet\n\n\n", "path": "kymatio/__init__.py"}]} | 831 | 289 |
gh_patches_debug_5270 | rasdani/github-patches | git_diff | streamlit__streamlit-3949 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix wrong Streamlit static app for st.caption() doc
**Link to doc page in question (if any):**
[[click here]](https://docs.streamlit.io/library/api-reference/text/st.caption)
**Name of the Streamlit feature whose docs need improvement:**
`st.caption()`
**What you think the docs should say:**
The Streamlit static app below should show how `st.caption()` works - it currently shows how `st.title()` work
<img src="https://user-images.githubusercontent.com/7164864/137554438-77380317-1267-4840-9521-36f86e32b456.png" width="300px">
➡️ Code is [here](https://github.com/streamlit/streamlit/blob/267bf98d9a8f8d94cf6d3e9f0ea8866233d4836e/lib/streamlit/elements/markdown.py#L221) but I have no idea how to replace that URL with a correct one
</issue>
<code>
[start of lib/streamlit/elements/markdown.py]
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import cast
16
17 import streamlit
18 from streamlit import type_util
19 from streamlit.proto.Markdown_pb2 import Markdown as MarkdownProto
20 from .utils import clean_text
21
22
23 class MarkdownMixin:
24 def markdown(self, body, unsafe_allow_html=False):
25 """Display string formatted as Markdown.
26
27 Parameters
28 ----------
29 body : str
30 The string to display as Github-flavored Markdown. Syntax
31 information can be found at: https://github.github.com/gfm.
32
33 This also supports:
34
35 * Emoji shortcodes, such as `:+1:` and `:sunglasses:`.
36 For a list of all supported codes,
37 see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json.
38
39 * LaTeX expressions, by wrapping them in "$" or "$$" (the "$$"
40 must be on their own lines). Supported LaTeX functions are listed
41 at https://katex.org/docs/supported.html.
42
43 unsafe_allow_html : bool
44 By default, any HTML tags found in the body will be escaped and
45 therefore treated as pure text. This behavior may be turned off by
46 setting this argument to True.
47
48 That said, we *strongly advise against it*. It is hard to write
49 secure HTML, so by using this argument you may be compromising your
50 users' security. For more information, see:
51
52 https://github.com/streamlit/streamlit/issues/152
53
54 *Also note that `unsafe_allow_html` is a temporary measure and may
55 be removed from Streamlit at any time.*
56
57 If you decide to turn on HTML anyway, we ask you to please tell us
58 your exact use case here:
59
60 https://discuss.streamlit.io/t/96
61
62 This will help us come up with safe APIs that allow you to do what
63 you want.
64
65 Example
66 -------
67 >>> st.markdown('Streamlit is **_really_ cool**.')
68
69 .. output::
70 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS
71 height: 50px
72
73 """
74 markdown_proto = MarkdownProto()
75
76 markdown_proto.body = clean_text(body)
77 markdown_proto.allow_html = unsafe_allow_html
78
79 return self.dg._enqueue("markdown", markdown_proto)
80
81 def header(self, body, anchor=None):
82 """Display text in header formatting.
83
84 Parameters
85 ----------
86 body : str
87 The text to display.
88
89 anchor : str
90 The anchor name of the header that can be accessed with #anchor
91 in the URL. If omitted, it generates an anchor using the body.
92
93 Example
94 -------
95 >>> st.header('This is a header')
96
97 .. output::
98 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj
99 height: 100px
100
101 """
102 header_proto = MarkdownProto()
103 if anchor is None:
104 header_proto.body = f"## {clean_text(body)}"
105 else:
106 header_proto.body = f'<h2 data-anchor="{anchor}">{clean_text(body)}</h2>'
107 header_proto.allow_html = True
108 return self.dg._enqueue("markdown", header_proto)
109
110 def subheader(self, body, anchor=None):
111 """Display text in subheader formatting.
112
113 Parameters
114 ----------
115 body : str
116 The text to display.
117
118 anchor : str
119 The anchor name of the header that can be accessed with #anchor
120 in the URL. If omitted, it generates an anchor using the body.
121
122 Example
123 -------
124 >>> st.subheader('This is a subheader')
125
126 .. output::
127 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ
128 height: 100px
129
130 """
131 subheader_proto = MarkdownProto()
132 if anchor is None:
133 subheader_proto.body = f"### {clean_text(body)}"
134 else:
135 subheader_proto.body = f'<h3 data-anchor="{anchor}">{clean_text(body)}</h3>'
136 subheader_proto.allow_html = True
137
138 return self.dg._enqueue("markdown", subheader_proto)
139
140 def code(self, body, language="python"):
141 """Display a code block with optional syntax highlighting.
142
143 (This is a convenience wrapper around `st.markdown()`)
144
145 Parameters
146 ----------
147 body : str
148 The string to display as code.
149
150 language : str
151 The language that the code is written in, for syntax highlighting.
152 If omitted, the code will be unstyled.
153
154 Example
155 -------
156 >>> code = '''def hello():
157 ... print("Hello, Streamlit!")'''
158 >>> st.code(code, language='python')
159
160 .. output::
161 https://static.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2
162 height: 100px
163
164 """
165 code_proto = MarkdownProto()
166 markdown = "```%(language)s\n%(body)s\n```" % {
167 "language": language or "",
168 "body": body,
169 }
170 code_proto.body = clean_text(markdown)
171 return self.dg._enqueue("markdown", code_proto)
172
173 def title(self, body, anchor=None):
174 """Display text in title formatting.
175
176 Each document should have a single `st.title()`, although this is not
177 enforced.
178
179 Parameters
180 ----------
181 body : str
182 The text to display.
183
184 anchor : str
185 The anchor name of the header that can be accessed with #anchor
186 in the URL. If omitted, it generates an anchor using the body.
187
188 Example
189 -------
190 >>> st.title('This is a title')
191
192 .. output::
193 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj
194 height: 100px
195
196 """
197 title_proto = MarkdownProto()
198 if anchor is None:
199 title_proto.body = f"# {clean_text(body)}"
200 else:
201 title_proto.body = f'<h1 data-anchor="{anchor}">{clean_text(body)}</h1>'
202 title_proto.allow_html = True
203 return self.dg._enqueue("markdown", title_proto)
204
205 def caption(self, body):
206 """Display text in small font.
207
208 This should be used for captions, asides, footnotes, sidenotes, and
209 other explanatory text.
210
211 Parameters
212 ----------
213 body : str
214 The text to display.
215
216 Example
217 -------
218 >>> st.caption('This is a string that explains something above.')
219
220 .. output::
221 https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj
222 height: 100px
223
224 """
225 caption_proto = MarkdownProto()
226 caption_proto.body = body
227 caption_proto.allow_html = False
228 caption_proto.is_caption = True
229 return self.dg._enqueue("markdown", caption_proto)
230
231 def latex(self, body):
232 # This docstring needs to be "raw" because of the backslashes in the
233 # example below.
234 r"""Display mathematical expressions formatted as LaTeX.
235
236 Supported LaTeX functions are listed at
237 https://katex.org/docs/supported.html.
238
239 Parameters
240 ----------
241 body : str or SymPy expression
242 The string or SymPy expression to display as LaTeX. If str, it's
243 a good idea to use raw Python strings since LaTeX uses backslashes
244 a lot.
245
246
247 Example
248 -------
249 >>> st.latex(r'''
250 ... a + ar + a r^2 + a r^3 + \cdots + a r^{n-1} =
251 ... \sum_{k=0}^{n-1} ar^k =
252 ... a \left(\frac{1-r^{n}}{1-r}\right)
253 ... ''')
254
255 .. output::
256 https://static.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4
257 height: 75px
258
259 """
260 if type_util.is_sympy_expession(body):
261 import sympy
262
263 body = sympy.latex(body)
264
265 latex_proto = MarkdownProto()
266 latex_proto.body = "$$\n%s\n$$" % clean_text(body)
267 return self.dg._enqueue("markdown", latex_proto)
268
269 @property
270 def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
271 """Get our DeltaGenerator."""
272 return cast("streamlit.delta_generator.DeltaGenerator", self)
273
[end of lib/streamlit/elements/markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/streamlit/elements/markdown.py b/lib/streamlit/elements/markdown.py
--- a/lib/streamlit/elements/markdown.py
+++ b/lib/streamlit/elements/markdown.py
@@ -218,8 +218,8 @@
>>> st.caption('This is a string that explains something above.')
.. output::
- https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj
- height: 100px
+ https://static.streamlit.io/1.1.0-eQCi/index.html?id=SVQb16b2UDZX4W8VLkEJLJ
+ height: 175px
"""
caption_proto = MarkdownProto()
| {"golden_diff": "diff --git a/lib/streamlit/elements/markdown.py b/lib/streamlit/elements/markdown.py\n--- a/lib/streamlit/elements/markdown.py\n+++ b/lib/streamlit/elements/markdown.py\n@@ -218,8 +218,8 @@\n >>> st.caption('This is a string that explains something above.')\n \n .. output::\n- https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n- height: 100px\n+ https://static.streamlit.io/1.1.0-eQCi/index.html?id=SVQb16b2UDZX4W8VLkEJLJ\n+ height: 175px\n \n \"\"\"\n caption_proto = MarkdownProto()\n", "issue": "Fix wrong Streamlit static app for st.caption() doc\n**Link to doc page in question (if any):**\r\n\r\n[[click here]](https://docs.streamlit.io/library/api-reference/text/st.caption)\r\n\r\n**Name of the Streamlit feature whose docs need improvement:**\r\n`st.caption()`\r\n\r\n**What you think the docs should say:**\r\nThe Streamlit static app below should show how `st.caption()` works - it currently shows how `st.title()` work\r\n\r\n<img src=\"https://user-images.githubusercontent.com/7164864/137554438-77380317-1267-4840-9521-36f86e32b456.png\" width=\"300px\">\r\n\r\n\u27a1\ufe0f Code is [here](https://github.com/streamlit/streamlit/blob/267bf98d9a8f8d94cf6d3e9f0ea8866233d4836e/lib/streamlit/elements/markdown.py#L221) but I have no idea how to replace that URL with a correct one\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import cast\n\nimport streamlit\nfrom streamlit import type_util\nfrom streamlit.proto.Markdown_pb2 import Markdown as MarkdownProto\nfrom .utils import clean_text\n\n\nclass MarkdownMixin:\n def markdown(self, body, unsafe_allow_html=False):\n \"\"\"Display string formatted as Markdown.\n\n Parameters\n ----------\n body : str\n The string to display as Github-flavored Markdown. Syntax\n information can be found at: https://github.github.com/gfm.\n\n This also supports:\n\n * Emoji shortcodes, such as `:+1:` and `:sunglasses:`.\n For a list of all supported codes,\n see https://raw.githubusercontent.com/omnidan/node-emoji/master/lib/emoji.json.\n\n * LaTeX expressions, by wrapping them in \"$\" or \"$$\" (the \"$$\"\n must be on their own lines). Supported LaTeX functions are listed\n at https://katex.org/docs/supported.html.\n\n unsafe_allow_html : bool\n By default, any HTML tags found in the body will be escaped and\n therefore treated as pure text. This behavior may be turned off by\n setting this argument to True.\n\n That said, we *strongly advise against it*. It is hard to write\n secure HTML, so by using this argument you may be compromising your\n users' security. For more information, see:\n\n https://github.com/streamlit/streamlit/issues/152\n\n *Also note that `unsafe_allow_html` is a temporary measure and may\n be removed from Streamlit at any time.*\n\n If you decide to turn on HTML anyway, we ask you to please tell us\n your exact use case here:\n\n https://discuss.streamlit.io/t/96\n\n This will help us come up with safe APIs that allow you to do what\n you want.\n\n Example\n -------\n >>> st.markdown('Streamlit is **_really_ cool**.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=PXz9xgY8aB88eziDVEZLyS\n height: 50px\n\n \"\"\"\n markdown_proto = MarkdownProto()\n\n markdown_proto.body = clean_text(body)\n markdown_proto.allow_html = unsafe_allow_html\n\n return self.dg._enqueue(\"markdown\", markdown_proto)\n\n def header(self, body, anchor=None):\n \"\"\"Display text in header formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.header('This is a header')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=AnfQVFgSCQtGv6yMUMUYjj\n height: 100px\n\n \"\"\"\n header_proto = MarkdownProto()\n if anchor is None:\n header_proto.body = f\"## {clean_text(body)}\"\n else:\n header_proto.body = f'<h2 data-anchor=\"{anchor}\">{clean_text(body)}</h2>'\n header_proto.allow_html = True\n return self.dg._enqueue(\"markdown\", header_proto)\n\n def subheader(self, body, anchor=None):\n \"\"\"Display text in subheader formatting.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.subheader('This is a subheader')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=LBKJTfFUwudrbWENSHV6cJ\n height: 100px\n\n \"\"\"\n subheader_proto = MarkdownProto()\n if anchor is None:\n subheader_proto.body = f\"### {clean_text(body)}\"\n else:\n subheader_proto.body = f'<h3 data-anchor=\"{anchor}\">{clean_text(body)}</h3>'\n subheader_proto.allow_html = True\n\n return self.dg._enqueue(\"markdown\", subheader_proto)\n\n def code(self, body, language=\"python\"):\n \"\"\"Display a code block with optional syntax highlighting.\n\n (This is a convenience wrapper around `st.markdown()`)\n\n Parameters\n ----------\n body : str\n The string to display as code.\n\n language : str\n The language that the code is written in, for syntax highlighting.\n If omitted, the code will be unstyled.\n\n Example\n -------\n >>> code = '''def hello():\n ... print(\"Hello, Streamlit!\")'''\n >>> st.code(code, language='python')\n\n .. output::\n https://static.streamlit.io/0.27.0-kBtt/index.html?id=VDRnaCEZWSBCNUd5gNQZv2\n height: 100px\n\n \"\"\"\n code_proto = MarkdownProto()\n markdown = \"```%(language)s\\n%(body)s\\n```\" % {\n \"language\": language or \"\",\n \"body\": body,\n }\n code_proto.body = clean_text(markdown)\n return self.dg._enqueue(\"markdown\", code_proto)\n\n def title(self, body, anchor=None):\n \"\"\"Display text in title formatting.\n\n Each document should have a single `st.title()`, although this is not\n enforced.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n anchor : str\n The anchor name of the header that can be accessed with #anchor\n in the URL. If omitted, it generates an anchor using the body.\n\n Example\n -------\n >>> st.title('This is a title')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n height: 100px\n\n \"\"\"\n title_proto = MarkdownProto()\n if anchor is None:\n title_proto.body = f\"# {clean_text(body)}\"\n else:\n title_proto.body = f'<h1 data-anchor=\"{anchor}\">{clean_text(body)}</h1>'\n title_proto.allow_html = True\n return self.dg._enqueue(\"markdown\", title_proto)\n\n def caption(self, body):\n \"\"\"Display text in small font.\n\n This should be used for captions, asides, footnotes, sidenotes, and\n other explanatory text.\n\n Parameters\n ----------\n body : str\n The text to display.\n\n Example\n -------\n >>> st.caption('This is a string that explains something above.')\n\n .. output::\n https://static.streamlit.io/0.25.0-2JkNY/index.html?id=SFcBGANWd8kWXF28XnaEZj\n height: 100px\n\n \"\"\"\n caption_proto = MarkdownProto()\n caption_proto.body = body\n caption_proto.allow_html = False\n caption_proto.is_caption = True\n return self.dg._enqueue(\"markdown\", caption_proto)\n\n def latex(self, body):\n # This docstring needs to be \"raw\" because of the backslashes in the\n # example below.\n r\"\"\"Display mathematical expressions formatted as LaTeX.\n\n Supported LaTeX functions are listed at\n https://katex.org/docs/supported.html.\n\n Parameters\n ----------\n body : str or SymPy expression\n The string or SymPy expression to display as LaTeX. If str, it's\n a good idea to use raw Python strings since LaTeX uses backslashes\n a lot.\n\n\n Example\n -------\n >>> st.latex(r'''\n ... a + ar + a r^2 + a r^3 + \\cdots + a r^{n-1} =\n ... \\sum_{k=0}^{n-1} ar^k =\n ... a \\left(\\frac{1-r^{n}}{1-r}\\right)\n ... ''')\n\n .. output::\n https://static.streamlit.io/0.50.0-td2L/index.html?id=NJFsy6NbGTsH2RF9W6ioQ4\n height: 75px\n\n \"\"\"\n if type_util.is_sympy_expession(body):\n import sympy\n\n body = sympy.latex(body)\n\n latex_proto = MarkdownProto()\n latex_proto.body = \"$$\\n%s\\n$$\" % clean_text(body)\n return self.dg._enqueue(\"markdown\", latex_proto)\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/markdown.py"}]} | 3,628 | 187 |
gh_patches_debug_32148 | rasdani/github-patches | git_diff | learningequality__kolibri-11937 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enhance Quiz- Side panel doesn't show on mobile
## Overview
When I was explaining a certain piece of behavior in the select resource component to @akolson . I discovered that when you want to add resource while on mobile the side panel does not appear at all. or when it's loaded and the page change to mobile view everything get's distorted.
Uploading Screen Recording 2024-02-01 at 16.37.15.mov…
improve exam preview layout when there are many questions in a quiz
### Observed behavior
see comments in https://docs.google.com/document/d/1QeRghWCFdJgWV627DaIWRM80pJ3IkJr_TRQjI4uJXEw/edit#heading=h.kj33sjr8m9b5
### Expected behavior
don't scroll exercise preview offscreen
### User-facing consequences
extra scrolling when quizzes have many questions
### Steps to reproduce
create a quiz with 50 questions and then try to rearrange them
### Context
0.12
</issue>
<code>
[start of kolibri/plugins/learn/viewsets.py]
1 from django.db.models import Count
2 from django.db.models import OuterRef
3 from django.db.models import Q
4 from django.db.models import Subquery
5 from django.db.models import Sum
6 from django.db.models.fields import IntegerField
7 from rest_framework.permissions import IsAuthenticated
8 from rest_framework.response import Response
9 from rest_framework.views import APIView
10
11 from kolibri.core.api import ReadOnlyValuesViewset
12 from kolibri.core.auth.models import Classroom
13 from kolibri.core.auth.models import Facility
14 from kolibri.core.content.api import ContentNodeProgressViewset
15 from kolibri.core.content.api import ContentNodeViewset
16 from kolibri.core.content.api import UserContentNodeViewset
17 from kolibri.core.content.models import ContentNode
18 from kolibri.core.exams.models import Exam
19 from kolibri.core.lessons.models import Lesson
20 from kolibri.core.logger.models import AttemptLog
21 from kolibri.core.logger.models import MasteryLog
22
23
24 contentnode_progress_viewset = ContentNodeProgressViewset()
25 contentnode_viewset = ContentNodeViewset()
26 user_contentnode_viewset = UserContentNodeViewset()
27
28
29 class LearnStateView(APIView):
30 def get(self, request, format=None):
31 """
32 Returns some configuration variables applicable to users navigating learn.
33 - in_classes: Whether the user is in any classes
34 - can_download_externally: Whether the user can download content externally from Kolibri
35 """
36 if request.user.is_anonymous:
37 default_facility = Facility.get_default_facility()
38 can_download_externally = (
39 default_facility.dataset.show_download_button_in_learn
40 if default_facility
41 else True
42 )
43 return Response(
44 {
45 "in_classes": False,
46 "can_download_externally": can_download_externally,
47 }
48 )
49 return Response(
50 {
51 "in_classes": request.user.memberships.exists(),
52 "can_download_externally": request.user.dataset.show_download_button_in_learn,
53 }
54 )
55
56
57 def _consolidate_lessons_data(request, lessons):
58 lesson_contentnode_ids = set()
59 for lesson in lessons:
60 lesson_contentnode_ids |= {
61 resource["contentnode_id"] for resource in lesson["resources"]
62 }
63
64 contentnode_progress = (
65 contentnode_progress_viewset.serialize_list(
66 request, {"ids": lesson_contentnode_ids}
67 )
68 if lesson_contentnode_ids
69 else []
70 )
71
72 contentnodes = (
73 contentnode_viewset.serialize_list(request, {"ids": lesson_contentnode_ids})
74 if lesson_contentnode_ids
75 else []
76 )
77
78 progress_map = {l["content_id"]: l["progress"] for l in contentnode_progress}
79
80 contentnode_map = {c["id"]: c for c in contentnodes}
81
82 for lesson in lessons:
83 lesson["progress"] = {
84 "resource_progress": sum(
85 (
86 progress_map[resource["content_id"]]
87 for resource in lesson["resources"]
88 if resource["content_id"] in progress_map
89 )
90 ),
91 "total_resources": len(lesson["resources"]),
92 }
93 missing_resource = False
94 for resource in lesson["resources"]:
95 resource["progress"] = progress_map.get(resource["content_id"], 0)
96 resource["contentnode"] = contentnode_map.get(
97 resource["contentnode_id"], None
98 )
99 missing_resource = missing_resource or not resource["contentnode"]
100 lesson["missing_resource"] = missing_resource
101
102
103 class LearnerClassroomViewset(ReadOnlyValuesViewset):
104 """
105 Returns all Classrooms for which the requesting User is a member,
106 along with all associated assignments.
107 """
108
109 permission_classes = (IsAuthenticated,)
110
111 values = ("id", "name")
112
113 def get_queryset(self):
114 if self.request.user.is_anonymous:
115 return Classroom.objects.none()
116 return Classroom.objects.filter(membership__user=self.request.user)
117
118 def consolidate(self, items, queryset):
119 if not items:
120 return items
121 lessons = (
122 Lesson.objects.filter(
123 lesson_assignments__collection__membership__user=self.request.user,
124 is_active=True,
125 collection__in=(c["id"] for c in items),
126 )
127 .distinct()
128 .values(
129 "description", "id", "is_active", "title", "resources", "collection"
130 )
131 )
132 _consolidate_lessons_data(self.request, lessons)
133
134 user_masterylog_content_ids = MasteryLog.objects.filter(
135 user=self.request.user
136 ).values("summarylog__content_id")
137
138 exams = (
139 Exam.objects.filter(
140 assignments__collection__membership__user=self.request.user,
141 collection__in=(c["id"] for c in items),
142 )
143 .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))
144 .annotate(
145 closed=Subquery(
146 MasteryLog.objects.filter(
147 summarylog__content_id=OuterRef("id"), user=self.request.user
148 ).values("complete")[:1]
149 ),
150 score=Subquery(
151 AttemptLog.objects.filter(
152 sessionlog__content_id=OuterRef("id"), user=self.request.user
153 )
154 .order_by()
155 .values_list("item")
156 .distinct()
157 .values("masterylog")
158 .annotate(total_correct=Sum("correct"))
159 .values("total_correct"),
160 output_field=IntegerField(),
161 ),
162 answer_count=Subquery(
163 AttemptLog.objects.filter(
164 sessionlog__content_id=OuterRef("id"), user=self.request.user
165 )
166 .order_by()
167 .values_list("item")
168 .distinct()
169 .values("masterylog")
170 .annotate(total_complete=Count("id"))
171 .values("total_complete"),
172 output_field=IntegerField(),
173 ),
174 )
175 .distinct()
176 .values(
177 "collection",
178 "active",
179 "archive",
180 "id",
181 "question_count",
182 "title",
183 "closed",
184 "answer_count",
185 "score",
186 "question_sources",
187 )
188 )
189 exam_node_ids = set()
190 for exam in exams:
191 exam_node_ids |= {
192 question["exercise_id"] for question in exam.get("question_sources")
193 }
194
195 available_exam_ids = set(
196 ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(
197 "id", flat=True
198 )
199 )
200
201 for exam in exams:
202 closed = exam.pop("closed")
203 score = exam.pop("score")
204 answer_count = exam.pop("answer_count")
205 if closed is not None:
206 exam["progress"] = {
207 "closed": closed,
208 "score": score,
209 "answer_count": answer_count,
210 "started": True,
211 }
212 else:
213 exam["progress"] = {
214 "score": None,
215 "answer_count": None,
216 "closed": None,
217 "started": False,
218 }
219 exam["missing_resource"] = any(
220 question["exercise_id"] not in available_exam_ids
221 for question in exam.get("question_sources")
222 )
223 out_items = []
224 for item in items:
225 item["assignments"] = {
226 "exams": [exam for exam in exams if exam["collection"] == item["id"]],
227 "lessons": [
228 lesson for lesson in lessons if lesson["collection"] == item["id"]
229 ],
230 }
231 out_items.append(item)
232 return out_items
233
234
235 learner_classroom_viewset = LearnerClassroomViewset()
236
237
238 def _resumable_resources(classrooms):
239 for classroom in classrooms:
240 for lesson in classroom["assignments"]["lessons"]:
241 for resource in lesson["resources"]:
242 yield 0 < resource["progress"] < 1
243
244
245 class LearnHomePageHydrationView(APIView):
246 def get(self, request, format=None):
247 classrooms = []
248 resumable_resources = []
249 resumable_resources_progress = []
250 if not request.user.is_anonymous:
251 classrooms = learner_classroom_viewset.serialize_list(request)
252 if not classrooms or not any(_resumable_resources(classrooms)):
253 resumable_resources = user_contentnode_viewset.serialize_list(
254 request,
255 {"resume": True, "max_results": 12, "ordering": "-last_interacted"},
256 )
257 resumable_resources_progress = (
258 contentnode_progress_viewset.serialize_list(
259 request,
260 {
261 "resume": True,
262 "max_results": 12,
263 "ordering": "-last_interacted",
264 },
265 )
266 )
267
268 return Response(
269 {
270 "classrooms": classrooms,
271 "resumable_resources": resumable_resources,
272 "resumable_resources_progress": resumable_resources_progress,
273 }
274 )
275
276
277 def _map_lesson_classroom(item):
278 return {
279 "id": item.pop("collection__id"),
280 "name": item.pop("collection__name"),
281 "parent": item.pop("collection__parent_id"),
282 }
283
284
285 class LearnerLessonViewset(ReadOnlyValuesViewset):
286 """
287 Special Viewset for Learners to view Lessons to which they are assigned.
288 The core Lesson Viewset is locked down to Admin users only.
289 """
290
291 permission_classes = (IsAuthenticated,)
292
293 values = (
294 "id",
295 "title",
296 "description",
297 "resources",
298 "is_active",
299 "collection",
300 "collection__id",
301 "collection__name",
302 "collection__parent_id",
303 )
304
305 field_map = {"classroom": _map_lesson_classroom}
306
307 def get_queryset(self):
308 if self.request.user.is_anonymous:
309 return Lesson.objects.none()
310 return Lesson.objects.filter(
311 lesson_assignments__collection__membership__user=self.request.user,
312 is_active=True,
313 )
314
315 def consolidate(self, items, queryset):
316 if not items:
317 return items
318
319 _consolidate_lessons_data(self.request, items)
320
321 return items
322
[end of kolibri/plugins/learn/viewsets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py
--- a/kolibri/plugins/learn/viewsets.py
+++ b/kolibri/plugins/learn/viewsets.py
@@ -16,6 +16,7 @@
from kolibri.core.content.api import UserContentNodeViewset
from kolibri.core.content.models import ContentNode
from kolibri.core.exams.models import Exam
+from kolibri.core.exams.models import exam_assignment_lookup
from kolibri.core.lessons.models import Lesson
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import MasteryLog
@@ -182,14 +183,19 @@
"title",
"closed",
"answer_count",
+ "data_model_version",
"score",
"question_sources",
)
)
exam_node_ids = set()
+
for exam in exams:
exam_node_ids |= {
- question["exercise_id"] for question in exam.get("question_sources")
+ exercise_id
+ for exercise_id, _ in exam_assignment_lookup(
+ exam.get("question_sources", [])
+ )
}
available_exam_ids = set(
@@ -217,8 +223,10 @@
"started": False,
}
exam["missing_resource"] = any(
- question["exercise_id"] not in available_exam_ids
- for question in exam.get("question_sources")
+ exercise_id not in available_exam_ids
+ for exercise_id, _ in exam_assignment_lookup(
+ exam.get("question_sources", [])
+ )
)
out_items = []
for item in items:
| {"golden_diff": "diff --git a/kolibri/plugins/learn/viewsets.py b/kolibri/plugins/learn/viewsets.py\n--- a/kolibri/plugins/learn/viewsets.py\n+++ b/kolibri/plugins/learn/viewsets.py\n@@ -16,6 +16,7 @@\n from kolibri.core.content.api import UserContentNodeViewset\n from kolibri.core.content.models import ContentNode\n from kolibri.core.exams.models import Exam\n+from kolibri.core.exams.models import exam_assignment_lookup\n from kolibri.core.lessons.models import Lesson\n from kolibri.core.logger.models import AttemptLog\n from kolibri.core.logger.models import MasteryLog\n@@ -182,14 +183,19 @@\n \"title\",\n \"closed\",\n \"answer_count\",\n+ \"data_model_version\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n+\n for exam in exams:\n exam_node_ids |= {\n- question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n+ exercise_id\n+ for exercise_id, _ in exam_assignment_lookup(\n+ exam.get(\"question_sources\", [])\n+ )\n }\n \n available_exam_ids = set(\n@@ -217,8 +223,10 @@\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n- question[\"exercise_id\"] not in available_exam_ids\n- for question in exam.get(\"question_sources\")\n+ exercise_id not in available_exam_ids\n+ for exercise_id, _ in exam_assignment_lookup(\n+ exam.get(\"question_sources\", [])\n+ )\n )\n out_items = []\n for item in items:\n", "issue": "Enhance Quiz- Side panel doesn't show on mobile\n\r\n## Overview\r\n\r\nWhen I was explaining a certain piece of behavior in the select resource component to @akolson . I discovered that when you want to add resource while on mobile the side panel does not appear at all. or when it's loaded and the page change to mobile view everything get's distorted.\r\n\r\n\r\n\r\nUploading Screen Recording 2024-02-01 at 16.37.15.mov\u2026\r\n\r\n\r\n\nimprove exam preview layout when there are many questions in a quiz\n\r\n### Observed behavior\r\n\r\nsee comments in https://docs.google.com/document/d/1QeRghWCFdJgWV627DaIWRM80pJ3IkJr_TRQjI4uJXEw/edit#heading=h.kj33sjr8m9b5\r\n\r\n### Expected behavior\r\n\r\ndon't scroll exercise preview offscreen\r\n\r\n### User-facing consequences\r\n\r\nextra scrolling when quizzes have many questions\r\n\r\n\r\n### Steps to reproduce\r\n\r\ncreate a quiz with 50 questions and then try to rearrange them \r\n\r\n### Context\r\n\r\n0.12\n", "before_files": [{"content": "from django.db.models import Count\nfrom django.db.models import OuterRef\nfrom django.db.models import Q\nfrom django.db.models import Subquery\nfrom django.db.models import Sum\nfrom django.db.models.fields import IntegerField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom kolibri.core.api import ReadOnlyValuesViewset\nfrom kolibri.core.auth.models import Classroom\nfrom kolibri.core.auth.models import Facility\nfrom kolibri.core.content.api import ContentNodeProgressViewset\nfrom kolibri.core.content.api import ContentNodeViewset\nfrom kolibri.core.content.api import UserContentNodeViewset\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.exams.models import Exam\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.logger.models import AttemptLog\nfrom kolibri.core.logger.models import MasteryLog\n\n\ncontentnode_progress_viewset = ContentNodeProgressViewset()\ncontentnode_viewset = ContentNodeViewset()\nuser_contentnode_viewset = UserContentNodeViewset()\n\n\nclass LearnStateView(APIView):\n def get(self, request, format=None):\n \"\"\"\n Returns some configuration variables applicable to users navigating learn.\n - in_classes: Whether the user is in any classes\n - can_download_externally: Whether the user can download content externally from Kolibri\n \"\"\"\n if request.user.is_anonymous:\n default_facility = Facility.get_default_facility()\n can_download_externally = (\n default_facility.dataset.show_download_button_in_learn\n if default_facility\n else True\n )\n return Response(\n {\n \"in_classes\": False,\n \"can_download_externally\": can_download_externally,\n }\n )\n return Response(\n {\n \"in_classes\": request.user.memberships.exists(),\n \"can_download_externally\": request.user.dataset.show_download_button_in_learn,\n }\n )\n\n\ndef _consolidate_lessons_data(request, lessons):\n lesson_contentnode_ids = set()\n for lesson in lessons:\n lesson_contentnode_ids |= {\n resource[\"contentnode_id\"] for resource in lesson[\"resources\"]\n }\n\n contentnode_progress = (\n contentnode_progress_viewset.serialize_list(\n request, {\"ids\": lesson_contentnode_ids}\n )\n if lesson_contentnode_ids\n else []\n )\n\n contentnodes = (\n contentnode_viewset.serialize_list(request, {\"ids\": lesson_contentnode_ids})\n if lesson_contentnode_ids\n else []\n )\n\n progress_map = {l[\"content_id\"]: l[\"progress\"] for l in contentnode_progress}\n\n contentnode_map = {c[\"id\"]: c for c in contentnodes}\n\n for lesson in lessons:\n lesson[\"progress\"] = {\n \"resource_progress\": sum(\n (\n progress_map[resource[\"content_id\"]]\n for resource in lesson[\"resources\"]\n if resource[\"content_id\"] in progress_map\n )\n ),\n \"total_resources\": len(lesson[\"resources\"]),\n }\n missing_resource = False\n for resource in lesson[\"resources\"]:\n resource[\"progress\"] = progress_map.get(resource[\"content_id\"], 0)\n resource[\"contentnode\"] = contentnode_map.get(\n resource[\"contentnode_id\"], None\n )\n missing_resource = missing_resource or not resource[\"contentnode\"]\n lesson[\"missing_resource\"] = missing_resource\n\n\nclass LearnerClassroomViewset(ReadOnlyValuesViewset):\n \"\"\"\n Returns all Classrooms for which the requesting User is a member,\n along with all associated assignments.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\"id\", \"name\")\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Classroom.objects.none()\n return Classroom.objects.filter(membership__user=self.request.user)\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n lessons = (\n Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n collection__in=(c[\"id\"] for c in items),\n )\n .distinct()\n .values(\n \"description\", \"id\", \"is_active\", \"title\", \"resources\", \"collection\"\n )\n )\n _consolidate_lessons_data(self.request, lessons)\n\n user_masterylog_content_ids = MasteryLog.objects.filter(\n user=self.request.user\n ).values(\"summarylog__content_id\")\n\n exams = (\n Exam.objects.filter(\n assignments__collection__membership__user=self.request.user,\n collection__in=(c[\"id\"] for c in items),\n )\n .filter(Q(active=True) | Q(id__in=user_masterylog_content_ids))\n .annotate(\n closed=Subquery(\n MasteryLog.objects.filter(\n summarylog__content_id=OuterRef(\"id\"), user=self.request.user\n ).values(\"complete\")[:1]\n ),\n score=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_correct=Sum(\"correct\"))\n .values(\"total_correct\"),\n output_field=IntegerField(),\n ),\n answer_count=Subquery(\n AttemptLog.objects.filter(\n sessionlog__content_id=OuterRef(\"id\"), user=self.request.user\n )\n .order_by()\n .values_list(\"item\")\n .distinct()\n .values(\"masterylog\")\n .annotate(total_complete=Count(\"id\"))\n .values(\"total_complete\"),\n output_field=IntegerField(),\n ),\n )\n .distinct()\n .values(\n \"collection\",\n \"active\",\n \"archive\",\n \"id\",\n \"question_count\",\n \"title\",\n \"closed\",\n \"answer_count\",\n \"score\",\n \"question_sources\",\n )\n )\n exam_node_ids = set()\n for exam in exams:\n exam_node_ids |= {\n question[\"exercise_id\"] for question in exam.get(\"question_sources\")\n }\n\n available_exam_ids = set(\n ContentNode.objects.filter_by_uuids(exam_node_ids).values_list(\n \"id\", flat=True\n )\n )\n\n for exam in exams:\n closed = exam.pop(\"closed\")\n score = exam.pop(\"score\")\n answer_count = exam.pop(\"answer_count\")\n if closed is not None:\n exam[\"progress\"] = {\n \"closed\": closed,\n \"score\": score,\n \"answer_count\": answer_count,\n \"started\": True,\n }\n else:\n exam[\"progress\"] = {\n \"score\": None,\n \"answer_count\": None,\n \"closed\": None,\n \"started\": False,\n }\n exam[\"missing_resource\"] = any(\n question[\"exercise_id\"] not in available_exam_ids\n for question in exam.get(\"question_sources\")\n )\n out_items = []\n for item in items:\n item[\"assignments\"] = {\n \"exams\": [exam for exam in exams if exam[\"collection\"] == item[\"id\"]],\n \"lessons\": [\n lesson for lesson in lessons if lesson[\"collection\"] == item[\"id\"]\n ],\n }\n out_items.append(item)\n return out_items\n\n\nlearner_classroom_viewset = LearnerClassroomViewset()\n\n\ndef _resumable_resources(classrooms):\n for classroom in classrooms:\n for lesson in classroom[\"assignments\"][\"lessons\"]:\n for resource in lesson[\"resources\"]:\n yield 0 < resource[\"progress\"] < 1\n\n\nclass LearnHomePageHydrationView(APIView):\n def get(self, request, format=None):\n classrooms = []\n resumable_resources = []\n resumable_resources_progress = []\n if not request.user.is_anonymous:\n classrooms = learner_classroom_viewset.serialize_list(request)\n if not classrooms or not any(_resumable_resources(classrooms)):\n resumable_resources = user_contentnode_viewset.serialize_list(\n request,\n {\"resume\": True, \"max_results\": 12, \"ordering\": \"-last_interacted\"},\n )\n resumable_resources_progress = (\n contentnode_progress_viewset.serialize_list(\n request,\n {\n \"resume\": True,\n \"max_results\": 12,\n \"ordering\": \"-last_interacted\",\n },\n )\n )\n\n return Response(\n {\n \"classrooms\": classrooms,\n \"resumable_resources\": resumable_resources,\n \"resumable_resources_progress\": resumable_resources_progress,\n }\n )\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LearnerLessonViewset(ReadOnlyValuesViewset):\n \"\"\"\n Special Viewset for Learners to view Lessons to which they are assigned.\n The core Lesson Viewset is locked down to Admin users only.\n \"\"\"\n\n permission_classes = (IsAuthenticated,)\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\",\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n )\n\n field_map = {\"classroom\": _map_lesson_classroom}\n\n def get_queryset(self):\n if self.request.user.is_anonymous:\n return Lesson.objects.none()\n return Lesson.objects.filter(\n lesson_assignments__collection__membership__user=self.request.user,\n is_active=True,\n )\n\n def consolidate(self, items, queryset):\n if not items:\n return items\n\n _consolidate_lessons_data(self.request, items)\n\n return items\n", "path": "kolibri/plugins/learn/viewsets.py"}]} | 3,747 | 363 |
gh_patches_debug_17251 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-183 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ASGI app broken with asyncio
Our ASGI app is currently broken with async io stuff. The implementation I made needs a bit of love :) We might have a look at how Ariadne is doing this :)
Here's a test that fails:
```python
import typing
import pytest
import strawberry
from starlette.testclient import TestClient
from strawberry.asgi import GraphQL
@pytest.fixture
def async_schema():
@strawberry.type
class Query:
@strawberry.field
async def hello(self, info, name: typing.Optional[str] = None) -> str:
return f"Hello {name or 'world'}"
return strawberry.Schema(Query)
@pytest.fixture
def test_client(async_schema):
app = GraphQL(async_schema)
return TestClient(app)
def test_simple_query(schema, test_client):
response = test_client.post("/", json={"query": "{ hello }"})
assert response.json() == {"data": {"hello": "Hello world"}}
```
</issue>
<code>
[start of strawberry/graphql.py]
1 import typing
2
3 from graphql import (
4 ExecutionResult,
5 GraphQLError,
6 GraphQLSchema,
7 execute as graphql_excute,
8 parse,
9 )
10 from graphql.subscription import subscribe as graphql_subscribe
11 from graphql.type import validate_schema
12 from graphql.validation import validate
13
14 from .middleware import DirectivesMiddleware
15
16
17 async def execute(
18 schema: GraphQLSchema,
19 query: str,
20 context_value: typing.Any = None,
21 variable_values: typing.Dict[str, typing.Any] = None,
22 operation_name: str = None,
23 ):
24 schema_validation_errors = validate_schema(schema)
25 if schema_validation_errors:
26 return ExecutionResult(data=None, errors=schema_validation_errors)
27
28 try:
29 document = parse(query)
30 except GraphQLError as error:
31 return ExecutionResult(data=None, errors=[error])
32 except Exception as error:
33 error = GraphQLError(str(error), original_error=error)
34 return ExecutionResult(data=None, errors=[error])
35
36 validation_errors = validate(schema, document)
37
38 if validation_errors:
39 return ExecutionResult(data=None, errors=validation_errors)
40
41 return graphql_excute(
42 schema,
43 parse(query),
44 middleware=[DirectivesMiddleware()],
45 variable_values=variable_values,
46 operation_name=operation_name,
47 context_value=context_value,
48 )
49
50
51 async def subscribe(
52 schema: GraphQLSchema,
53 query: str,
54 root_value: typing.Any = None,
55 context_value: typing.Any = None,
56 variable_values: typing.Dict[str, typing.Any] = None,
57 operation_name: str = None,
58 ) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]:
59 document = parse(query)
60
61 return await graphql_subscribe(
62 schema=schema,
63 document=document,
64 root_value=root_value,
65 context_value=context_value,
66 variable_values=variable_values,
67 operation_name=operation_name,
68 )
69
[end of strawberry/graphql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/graphql.py b/strawberry/graphql.py
--- a/strawberry/graphql.py
+++ b/strawberry/graphql.py
@@ -1,4 +1,5 @@
import typing
+from inspect import isawaitable
from graphql import (
ExecutionResult,
@@ -38,7 +39,7 @@
if validation_errors:
return ExecutionResult(data=None, errors=validation_errors)
- return graphql_excute(
+ result = graphql_excute(
schema,
parse(query),
middleware=[DirectivesMiddleware()],
@@ -46,6 +47,9 @@
operation_name=operation_name,
context_value=context_value,
)
+ if isawaitable(result):
+ result = await typing.cast(typing.Awaitable[ExecutionResult], result)
+ return result
async def subscribe(
| {"golden_diff": "diff --git a/strawberry/graphql.py b/strawberry/graphql.py\n--- a/strawberry/graphql.py\n+++ b/strawberry/graphql.py\n@@ -1,4 +1,5 @@\n import typing\n+from inspect import isawaitable\n \n from graphql import (\n ExecutionResult,\n@@ -38,7 +39,7 @@\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n \n- return graphql_excute(\n+ result = graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n@@ -46,6 +47,9 @@\n operation_name=operation_name,\n context_value=context_value,\n )\n+ if isawaitable(result):\n+ result = await typing.cast(typing.Awaitable[ExecutionResult], result)\n+ return result\n \n \n async def subscribe(\n", "issue": "ASGI app broken with asyncio\nOur ASGI app is currently broken with async io stuff. The implementation I made needs a bit of love :) We might have a look at how Ariadne is doing this :)\r\n\r\nHere's a test that fails:\r\n\r\n```python\r\nimport typing\r\n\r\nimport pytest\r\n\r\nimport strawberry\r\nfrom starlette.testclient import TestClient\r\nfrom strawberry.asgi import GraphQL\r\n\r\n\r\[email protected]\r\ndef async_schema():\r\n @strawberry.type\r\n class Query:\r\n @strawberry.field\r\n async def hello(self, info, name: typing.Optional[str] = None) -> str:\r\n return f\"Hello {name or 'world'}\"\r\n\r\n return strawberry.Schema(Query)\r\n\r\n\r\[email protected]\r\ndef test_client(async_schema):\r\n app = GraphQL(async_schema)\r\n\r\n return TestClient(app)\r\n\r\n\r\ndef test_simple_query(schema, test_client):\r\n response = test_client.post(\"/\", json={\"query\": \"{ hello }\"})\r\n\r\n assert response.json() == {\"data\": {\"hello\": \"Hello world\"}}\r\n```\n", "before_files": [{"content": "import typing\n\nfrom graphql import (\n ExecutionResult,\n GraphQLError,\n GraphQLSchema,\n execute as graphql_excute,\n parse,\n)\nfrom graphql.subscription import subscribe as graphql_subscribe\nfrom graphql.type import validate_schema\nfrom graphql.validation import validate\n\nfrom .middleware import DirectivesMiddleware\n\n\nasync def execute(\n schema: GraphQLSchema,\n query: str,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n):\n schema_validation_errors = validate_schema(schema)\n if schema_validation_errors:\n return ExecutionResult(data=None, errors=schema_validation_errors)\n\n try:\n document = parse(query)\n except GraphQLError as error:\n return ExecutionResult(data=None, errors=[error])\n except Exception as error:\n error = GraphQLError(str(error), original_error=error)\n return ExecutionResult(data=None, errors=[error])\n\n validation_errors = validate(schema, document)\n\n if validation_errors:\n return ExecutionResult(data=None, errors=validation_errors)\n\n return graphql_excute(\n schema,\n parse(query),\n middleware=[DirectivesMiddleware()],\n variable_values=variable_values,\n operation_name=operation_name,\n context_value=context_value,\n )\n\n\nasync def subscribe(\n schema: GraphQLSchema,\n query: str,\n root_value: typing.Any = None,\n context_value: typing.Any = None,\n variable_values: typing.Dict[str, typing.Any] = None,\n operation_name: str = None,\n) -> typing.Union[typing.AsyncIterator[ExecutionResult], ExecutionResult]:\n document = parse(query)\n\n return await graphql_subscribe(\n schema=schema,\n document=document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n", "path": "strawberry/graphql.py"}]} | 1,262 | 187 |
gh_patches_debug_31569 | rasdani/github-patches | git_diff | joke2k__faker-74 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Honor Environment LANG
I am currently using a wrapper for fake-factory to be able to choose the output but it would be great it would become part of fake-factory core.
This is the script i have in my path: https://gist.github.com/makefu/9101269
usage:
$ LANG=de_DE.utf-8 faker address
Davide-Kaul-Weg 175
94892 Königs Wusterhausen
</issue>
<code>
[start of faker/__main__.py]
1 from __future__ import unicode_literals
2 from __future__ import print_function
3 import sys
4
5 if sys.version < '3':
6 text_type = unicode
7 binary_type = str
8 else:
9 text_type = str
10 binary_type = bytes
11
12
13 def print_provider(doc, provider, formatters, excludes=None):
14 if excludes is None:
15 excludes = []
16
17 print()
18 print("### faker.providers.{0}".format(doc.get_provider_name(provider)))
19 print()
20
21 for signature, example in formatters.items():
22 if signature in excludes:
23 continue
24 try:
25 lines = text_type(example).expandtabs().splitlines()
26 except UnicodeEncodeError:
27 raise Exception('error on "{0}" with value "{1}"'.format(signature, example))
28 margin = max(30, doc.max_name_len+1)
29 remains = 150 - margin
30 separator = '#'
31 for line in lines:
32 for i in range(0, (len(line) // remains) + 1):
33 print("\t{fake:<{margin}}{separator} {example}".format(
34 fake=signature,
35 separator=separator,
36 example=line[i*remains:(i+1)*remains],
37 margin=margin
38 ))
39 signature = separator = ' '
40
41
42 def main(provider_or_field=None, *args):
43 from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES
44 fake = Faker(locale=DEFAULT_LOCALE)
45
46 from faker.providers import BaseProvider
47 base_provider_formatters = [f for f in dir(BaseProvider)]
48
49 if provider_or_field:
50 if '.' in provider_or_field:
51 parts = provider_or_field.split('.')
52 locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE
53 fake = Factory.create(locale, providers=[parts[-1]])
54 doc = documentor.Documentor(fake)
55 doc.already_generated = base_provider_formatters
56 print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0]))
57 else:
58 try:
59 print(fake.format(provider_or_field, *args))
60 except AttributeError:
61 print('No faker found for "{0}({1})"'.format(provider_or_field, args))
62
63 else:
64 doc = documentor.Documentor(fake)
65
66 formatters = doc.get_formatters(with_args=True, with_defaults=True)
67
68 for provider, fakers in formatters:
69
70 print_provider(doc, provider, fakers)
71
72 for lang in AVAILABLE_LOCALES:
73 if lang == DEFAULT_LOCALE:
74 continue
75 print()
76 print('## LANGUAGE {0}'.format(lang))
77 fake = Faker(locale=lang)
78 d = documentor.Documentor(fake)
79
80 for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang,
81 excludes=base_provider_formatters):
82 print_provider(d, p, fs)
83
84
85 if __name__ == "__main__":
86 if sys.stdout.encoding is None:
87 print("please set python env PYTHONIOENCODING=UTF-8, example: "
88 "export PYTHONIOENCODING=UTF-8, when write to stdout", file=sys.stderr)
89 exit(1)
90 main(*sys.argv[1:])
[end of faker/__main__.py]
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 from setuptools import setup, find_packages
5
6 here = os.path.abspath(os.path.dirname(__file__))
7 README = open(os.path.join(here, 'README.md')).read()
8 NEWS = open(os.path.join(here, 'NEWS.txt')).read()
9
10
11 version = '0.3.2'
12
13 setup(name='fake-factory',
14 version=version,
15 description="Faker is a Python package that generates fake data for you.",
16 long_description=README + '\n\n' + NEWS,
17 classifiers=[
18 # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
19 'Development Status :: 3 - Alpha',
20 'Environment :: Console',
21 'Intended Audience :: Developers',
22 'Programming Language :: Python',
23 'Programming Language :: Python :: 2',
24 'Programming Language :: Python :: 3',
25 'Topic :: Software Development :: Libraries :: Python Modules',
26 'Topic :: Software Development :: Testing',
27 'Topic :: Utilities',
28 'License :: OSI Approved :: MIT License'
29 ],
30 keywords='faker fixtures data test mock generator',
31 author='joke2k',
32 author_email='[email protected]',
33 url='http://github.com/joke2k/faker',
34 license='MIT License',
35 packages=find_packages(exclude=['*.tests']),
36 platforms=["any"],
37 test_suite='faker.tests',
38 )
39
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/__main__.py b/faker/__main__.py
--- a/faker/__main__.py
+++ b/faker/__main__.py
@@ -1,5 +1,9 @@
from __future__ import unicode_literals
from __future__ import print_function
+
+from faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE
+
+import os
import sys
if sys.version < '3':
@@ -40,7 +44,6 @@
def main(provider_or_field=None, *args):
- from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES
fake = Faker(locale=DEFAULT_LOCALE)
from faker.providers import BaseProvider
@@ -82,6 +85,28 @@
print_provider(d, p, fs)
+def command(*args):
+ try:
+ f = args[0]
+ except IndexError:
+ f = '--help'
+
+ if f in ["--help", "-h"]:
+ main()
+ sys.exit()
+
+ locale = os.environ['LANG'].split('.')[0]
+
+ if locale not in AVAILABLE_LOCALES:
+ locale = DEFAULT_LOCALE
+ fake = Faker(locale=locale)
+
+ try:
+ print(fake.format(f, *args[1:]))
+ except AttributeError:
+ print('No faker found for "{0}"'.format(f))
+
+
if __name__ == "__main__":
if sys.stdout.encoding is None:
print("please set python env PYTHONIOENCODING=UTF-8, example: "
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,7 @@
version=version,
description="Faker is a Python package that generates fake data for you.",
long_description=README + '\n\n' + NEWS,
+ scripts=['bin/faker'],
classifiers=[
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 3 - Alpha',
| {"golden_diff": "diff --git a/faker/__main__.py b/faker/__main__.py\n--- a/faker/__main__.py\n+++ b/faker/__main__.py\n@@ -1,5 +1,9 @@\n from __future__ import unicode_literals\n from __future__ import print_function\n+\n+from faker import Faker, Factory, documentor, AVAILABLE_LOCALES, DEFAULT_LOCALE\n+\n+import os\n import sys\n \n if sys.version < '3':\n@@ -40,7 +44,6 @@\n \n \n def main(provider_or_field=None, *args):\n- from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES\n fake = Faker(locale=DEFAULT_LOCALE)\n \n from faker.providers import BaseProvider\n@@ -82,6 +85,28 @@\n print_provider(d, p, fs)\n \n \n+def command(*args):\n+ try:\n+ f = args[0]\n+ except IndexError:\n+ f = '--help'\n+\n+ if f in [\"--help\", \"-h\"]:\n+ main()\n+ sys.exit()\n+\n+ locale = os.environ['LANG'].split('.')[0]\n+ \n+ if locale not in AVAILABLE_LOCALES:\n+ locale = DEFAULT_LOCALE\n+ fake = Faker(locale=locale)\n+\n+ try:\n+ print(fake.format(f, *args[1:]))\n+ except AttributeError:\n+ print('No faker found for \"{0}\"'.format(f))\n+\n+\n if __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n+ scripts=['bin/faker'],\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n", "issue": "Honor Environment LANG\nI am currently using a wrapper for fake-factory to be able to choose the output but it would be great it would become part of fake-factory core.\n\nThis is the script i have in my path: https://gist.github.com/makefu/9101269\n\nusage: \n$ LANG=de_DE.utf-8 faker address\n Davide-Kaul-Weg 175\n 94892 K\u00f6nigs Wusterhausen\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom __future__ import print_function\nimport sys\n\nif sys.version < '3':\n text_type = unicode\n binary_type = str\nelse:\n text_type = str\n binary_type = bytes\n\n\ndef print_provider(doc, provider, formatters, excludes=None):\n if excludes is None:\n excludes = []\n\n print()\n print(\"### faker.providers.{0}\".format(doc.get_provider_name(provider)))\n print()\n\n for signature, example in formatters.items():\n if signature in excludes:\n continue\n try:\n lines = text_type(example).expandtabs().splitlines()\n except UnicodeEncodeError:\n raise Exception('error on \"{0}\" with value \"{1}\"'.format(signature, example))\n margin = max(30, doc.max_name_len+1)\n remains = 150 - margin\n separator = '#'\n for line in lines:\n for i in range(0, (len(line) // remains) + 1):\n print(\"\\t{fake:<{margin}}{separator} {example}\".format(\n fake=signature,\n separator=separator,\n example=line[i*remains:(i+1)*remains],\n margin=margin\n ))\n signature = separator = ' '\n\n\ndef main(provider_or_field=None, *args):\n from faker import Faker, Factory, documentor, DEFAULT_LOCALE, AVAILABLE_LOCALES\n fake = Faker(locale=DEFAULT_LOCALE)\n\n from faker.providers import BaseProvider\n base_provider_formatters = [f for f in dir(BaseProvider)]\n\n if provider_or_field:\n if '.' in provider_or_field:\n parts = provider_or_field.split('.')\n locale = parts[-2] if parts[-2] in AVAILABLE_LOCALES else DEFAULT_LOCALE\n fake = Factory.create(locale, providers=[parts[-1]])\n doc = documentor.Documentor(fake)\n doc.already_generated = base_provider_formatters\n print_provider(doc, fake.get_providers()[0], doc.get_provider_formatters(fake.get_providers()[0]))\n else:\n try:\n print(fake.format(provider_or_field, *args))\n except AttributeError:\n print('No faker found for \"{0}({1})\"'.format(provider_or_field, args))\n\n else:\n doc = documentor.Documentor(fake)\n\n formatters = doc.get_formatters(with_args=True, with_defaults=True)\n\n for provider, fakers in formatters:\n\n print_provider(doc, provider, fakers)\n\n for lang in AVAILABLE_LOCALES:\n if lang == DEFAULT_LOCALE:\n continue\n print()\n print('## LANGUAGE {0}'.format(lang))\n fake = Faker(locale=lang)\n d = documentor.Documentor(fake)\n\n for p, fs in d.get_formatters(with_args=True, with_defaults=True, locale=lang,\n excludes=base_provider_formatters):\n print_provider(d, p, fs)\n\n\nif __name__ == \"__main__\":\n if sys.stdout.encoding is None:\n print(\"please set python env PYTHONIOENCODING=UTF-8, example: \"\n \"export PYTHONIOENCODING=UTF-8, when write to stdout\", file=sys.stderr)\n exit(1)\n main(*sys.argv[1:])", "path": "faker/__main__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.md')).read()\nNEWS = open(os.path.join(here, 'NEWS.txt')).read()\n\n\nversion = '0.3.2'\n\nsetup(name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n classifiers=[\n # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n )\n", "path": "setup.py"}]} | 1,891 | 464 |
gh_patches_debug_35374 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3156 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Language: support mention type in Entity.mentions.
[Currently](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/language/google/cloud/language/entity.py#L79) the mentions property of an entity is only a list of strings whereas it should be a list of objects containing the mention text and mention type.
Furthermore, this change should add mention_type information to the mention documentation.
</issue>
<code>
[start of language/google/cloud/language/entity.py]
1 # Copyright 2016-2017 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Definition for Google Cloud Natural Language API entities.
16
17 An entity is used to describe a proper name extracted from text.
18 """
19
20
21 class EntityType(object):
22 """List of possible entity types."""
23
24 UNKNOWN = 'UNKNOWN'
25 """Unknown entity type."""
26
27 PERSON = 'PERSON'
28 """Person entity type."""
29
30 LOCATION = 'LOCATION'
31 """Location entity type."""
32
33 ORGANIZATION = 'ORGANIZATION'
34 """Organization entity type."""
35
36 EVENT = 'EVENT'
37 """Event entity type."""
38
39 WORK_OF_ART = 'WORK_OF_ART'
40 """Work of art entity type."""
41
42 CONSUMER_GOOD = 'CONSUMER_GOOD'
43 """Consumer good entity type."""
44
45 OTHER = 'OTHER'
46 """Other entity type (i.e. known but not classified)."""
47
48
49 class Entity(object):
50 """A Google Cloud Natural Language API entity.
51
52 Represents a phrase in text that is a known entity, such as a person,
53 an organization, or location. The API associates information, such as
54 salience and mentions, with entities.
55
56 .. _Entity message: https://cloud.google.com/natural-language/\
57 reference/rest/v1/Entity
58 .. _EntityType enum: https://cloud.google.com/natural-language/\
59 reference/rest/v1/Entity#Type
60
61 See `Entity message`_.
62
63 :type name: str
64 :param name: The name / phrase identified as the entity.
65
66 :type entity_type: str
67 :param entity_type: The type of the entity. See `EntityType enum`_.
68
69 :type metadata: dict
70 :param metadata: The metadata associated with the entity.
71 Wikipedia URLs and Knowledge Graph MIDs are
72 provided, if available. The associated keys are
73 "wikipedia_url" and "mid", respectively.
74
75 :type salience: float
76 :param salience: The prominence of the entity / phrase within the text
77 containing it.
78
79 :type mentions: list
80 :param mentions: List of strings that mention the entity.
81 """
82
83 def __init__(self, name, entity_type, metadata, salience, mentions):
84 self.name = name
85 self.entity_type = entity_type
86 self.metadata = metadata
87 self.salience = salience
88 self.mentions = mentions
89
90 @classmethod
91 def from_api_repr(cls, payload):
92 """Convert an Entity from the JSON API into an :class:`Entity`.
93
94 :param payload: dict
95 :type payload: The value from the backend.
96
97 :rtype: :class:`Entity`
98 :returns: The entity parsed from the API representation.
99 """
100 name = payload['name']
101 entity_type = payload['type']
102 metadata = payload['metadata']
103 salience = payload['salience']
104 mentions = [value['text']['content']
105 for value in payload['mentions']]
106 return cls(name, entity_type, metadata, salience, mentions)
107
[end of language/google/cloud/language/entity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/language/google/cloud/language/entity.py b/language/google/cloud/language/entity.py
--- a/language/google/cloud/language/entity.py
+++ b/language/google/cloud/language/entity.py
@@ -46,6 +46,80 @@
"""Other entity type (i.e. known but not classified)."""
+class MentionType(object):
+ """List of possible mention types."""
+
+ TYPE_UNKNOWN = 'TYPE_UNKNOWN'
+ """Unknown mention type"""
+
+ PROPER = 'PROPER'
+ """Proper name"""
+
+ COMMON = 'COMMON'
+ """Common noun (or noun compound)"""
+
+
+class Mention(object):
+ """A Google Cloud Natural Language API mention.
+
+ Represents a mention for an entity in the text. Currently, proper noun
+ mentions are supported.
+ """
+ def __init__(self, text, mention_type):
+ self.text = text
+ self.mention_type = mention_type
+
+ def __str__(self):
+ return str(self.text)
+
+ @classmethod
+ def from_api_repr(cls, payload):
+ """Convert a Mention from the JSON API into an :class:`Mention`.
+
+ :param payload: dict
+ :type payload: The value from the backend.
+
+ :rtype: :class:`Mention`
+ :returns: The mention parsed from the API representation.
+ """
+ text = TextSpan.from_api_repr(payload['text'])
+ mention_type = payload['type']
+ return cls(text, mention_type)
+
+
+class TextSpan(object):
+ """A span of text from Google Cloud Natural Language API.
+
+ Represents a word or phrase of text, as well as its offset
+ from the original document.
+ """
+ def __init__(self, content, begin_offset):
+ self.content = content
+ self.begin_offset = begin_offset
+
+ def __str__(self):
+ """Return the string representation of this TextSpan.
+
+ :rtype: str
+ :returns: The text content
+ """
+ return self.content
+
+ @classmethod
+ def from_api_repr(cls, payload):
+ """Convert a TextSpan from the JSON API into an :class:`TextSpan`.
+
+ :param payload: dict
+ :type payload: The value from the backend.
+
+ :rtype: :class:`TextSpan`
+ :returns: The text span parsed from the API representation.
+ """
+ content = payload['content']
+ begin_offset = payload['beginOffset']
+ return cls(content=content, begin_offset=begin_offset)
+
+
class Entity(object):
"""A Google Cloud Natural Language API entity.
@@ -101,6 +175,5 @@
entity_type = payload['type']
metadata = payload['metadata']
salience = payload['salience']
- mentions = [value['text']['content']
- for value in payload['mentions']]
+ mentions = [Mention.from_api_repr(val) for val in payload['mentions']]
return cls(name, entity_type, metadata, salience, mentions)
| {"golden_diff": "diff --git a/language/google/cloud/language/entity.py b/language/google/cloud/language/entity.py\n--- a/language/google/cloud/language/entity.py\n+++ b/language/google/cloud/language/entity.py\n@@ -46,6 +46,80 @@\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n \n \n+class MentionType(object):\n+ \"\"\"List of possible mention types.\"\"\"\n+\n+ TYPE_UNKNOWN = 'TYPE_UNKNOWN'\n+ \"\"\"Unknown mention type\"\"\"\n+\n+ PROPER = 'PROPER'\n+ \"\"\"Proper name\"\"\"\n+\n+ COMMON = 'COMMON'\n+ \"\"\"Common noun (or noun compound)\"\"\"\n+\n+\n+class Mention(object):\n+ \"\"\"A Google Cloud Natural Language API mention.\n+\n+ Represents a mention for an entity in the text. Currently, proper noun\n+ mentions are supported.\n+ \"\"\"\n+ def __init__(self, text, mention_type):\n+ self.text = text\n+ self.mention_type = mention_type\n+\n+ def __str__(self):\n+ return str(self.text)\n+\n+ @classmethod\n+ def from_api_repr(cls, payload):\n+ \"\"\"Convert a Mention from the JSON API into an :class:`Mention`.\n+\n+ :param payload: dict\n+ :type payload: The value from the backend.\n+\n+ :rtype: :class:`Mention`\n+ :returns: The mention parsed from the API representation.\n+ \"\"\"\n+ text = TextSpan.from_api_repr(payload['text'])\n+ mention_type = payload['type']\n+ return cls(text, mention_type)\n+\n+\n+class TextSpan(object):\n+ \"\"\"A span of text from Google Cloud Natural Language API.\n+\n+ Represents a word or phrase of text, as well as its offset\n+ from the original document.\n+ \"\"\"\n+ def __init__(self, content, begin_offset):\n+ self.content = content\n+ self.begin_offset = begin_offset\n+\n+ def __str__(self):\n+ \"\"\"Return the string representation of this TextSpan.\n+\n+ :rtype: str\n+ :returns: The text content\n+ \"\"\"\n+ return self.content\n+\n+ @classmethod\n+ def from_api_repr(cls, payload):\n+ \"\"\"Convert a TextSpan from the JSON API into an :class:`TextSpan`.\n+\n+ :param payload: dict\n+ :type payload: The value from the backend.\n+\n+ :rtype: :class:`TextSpan`\n+ :returns: The text span parsed from the API representation.\n+ \"\"\"\n+ content = payload['content']\n+ begin_offset = payload['beginOffset']\n+ return cls(content=content, begin_offset=begin_offset)\n+\n+\n class Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n \n@@ -101,6 +175,5 @@\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n- mentions = [value['text']['content']\n- for value in payload['mentions']]\n+ mentions = [Mention.from_api_repr(val) for val in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "issue": "Language: support mention type in Entity.mentions.\n[Currently](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/language/google/cloud/language/entity.py#L79) the mentions property of an entity is only a list of strings whereas it should be a list of objects containing the mention text and mention type.\r\n\r\nFurthermore, this change should add mention_type information to the mention documentation.\n", "before_files": [{"content": "# Copyright 2016-2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Definition for Google Cloud Natural Language API entities.\n\nAn entity is used to describe a proper name extracted from text.\n\"\"\"\n\n\nclass EntityType(object):\n \"\"\"List of possible entity types.\"\"\"\n\n UNKNOWN = 'UNKNOWN'\n \"\"\"Unknown entity type.\"\"\"\n\n PERSON = 'PERSON'\n \"\"\"Person entity type.\"\"\"\n\n LOCATION = 'LOCATION'\n \"\"\"Location entity type.\"\"\"\n\n ORGANIZATION = 'ORGANIZATION'\n \"\"\"Organization entity type.\"\"\"\n\n EVENT = 'EVENT'\n \"\"\"Event entity type.\"\"\"\n\n WORK_OF_ART = 'WORK_OF_ART'\n \"\"\"Work of art entity type.\"\"\"\n\n CONSUMER_GOOD = 'CONSUMER_GOOD'\n \"\"\"Consumer good entity type.\"\"\"\n\n OTHER = 'OTHER'\n \"\"\"Other entity type (i.e. known but not classified).\"\"\"\n\n\nclass Entity(object):\n \"\"\"A Google Cloud Natural Language API entity.\n\n Represents a phrase in text that is a known entity, such as a person,\n an organization, or location. The API associates information, such as\n salience and mentions, with entities.\n\n .. _Entity message: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity\n .. _EntityType enum: https://cloud.google.com/natural-language/\\\n reference/rest/v1/Entity#Type\n\n See `Entity message`_.\n\n :type name: str\n :param name: The name / phrase identified as the entity.\n\n :type entity_type: str\n :param entity_type: The type of the entity. See `EntityType enum`_.\n\n :type metadata: dict\n :param metadata: The metadata associated with the entity.\n Wikipedia URLs and Knowledge Graph MIDs are\n provided, if available. The associated keys are\n \"wikipedia_url\" and \"mid\", respectively.\n\n :type salience: float\n :param salience: The prominence of the entity / phrase within the text\n containing it.\n\n :type mentions: list\n :param mentions: List of strings that mention the entity.\n \"\"\"\n\n def __init__(self, name, entity_type, metadata, salience, mentions):\n self.name = name\n self.entity_type = entity_type\n self.metadata = metadata\n self.salience = salience\n self.mentions = mentions\n\n @classmethod\n def from_api_repr(cls, payload):\n \"\"\"Convert an Entity from the JSON API into an :class:`Entity`.\n\n :param payload: dict\n :type payload: The value from the backend.\n\n :rtype: :class:`Entity`\n :returns: The entity parsed from the API representation.\n \"\"\"\n name = payload['name']\n entity_type = payload['type']\n metadata = payload['metadata']\n salience = payload['salience']\n mentions = [value['text']['content']\n for value in payload['mentions']]\n return cls(name, entity_type, metadata, salience, mentions)\n", "path": "language/google/cloud/language/entity.py"}]} | 1,598 | 689 |
gh_patches_debug_18622 | rasdani/github-patches | git_diff | translate__pootle-6643 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Traceback with migration pootle_fs.0002_convert_localfs
I hit this traceback when running the new migrations in my local DB, specifically `pootle_fs.0002_convert_localfs`:
```pytb
Running migrations:
Applying pootle_fs.0002_convert_localfs...Traceback (most recent call last):
File "manage.py", line 24, in <module>
execute_from_command_line()
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 367, in execute_from_command_line
utility.execute()
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 359, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py", line 294, in run_from_argv
self.execute(*args, **cmd_options)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py", line 345, in execute
output = self.handle(*args, **options)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/commands/migrate.py", line 204, in handle
fake_initial=fake_initial,
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py", line 115, in migrate
state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py", line 145, in _migrate_all_forwards
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py", line 244, in apply_migration
state = migration.apply(state, schema_editor)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/migration.py", line 126, in apply
operation.database_forwards(self.app_label, schema_editor, old_state, project_state)
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/operations/special.py", line 189, in database_forwards
self.code(from_state.apps, schema_editor)
File "/home/leo/Escritorio/core_pootle/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py", line 118, in convert_to_localfs
filepath = store.file.path[len(proj_trans_path):]
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py", line 63, in _get_path
self._require_file()
File "/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py", line 46, in _require_file
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
ValueError: The 'file' attribute has no file associated with it.
```
</issue>
<code>
[start of pootle/apps/pootle_store/migrations/0033_remove_store_file.py]
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.10.7 on 2017-08-26 14:01
3 from __future__ import unicode_literals
4
5 from django.db import migrations
6
7
8 class Migration(migrations.Migration):
9
10 dependencies = [
11 ('pootle_store', '0032_fix_empty_wordcounts'),
12 ]
13
14 operations = [
15 migrations.RemoveField(
16 model_name='store',
17 name='file',
18 ),
19 ]
20
[end of pootle/apps/pootle_store/migrations/0033_remove_store_file.py]
[start of pootle/apps/pootle_fs/migrations/0002_convert_localfs.py]
1 # -*- coding: utf-8 -*-
2 # Generated by Django 1.10.7 on 2017-08-17 09:16
3 from __future__ import unicode_literals
4
5 import logging
6 import os
7 from functools import partial
8
9 import dirsync
10
11 from django.conf import settings
12 from django.db import migrations
13
14 from translate.lang.data import langcode_re
15
16
17 def _file_belongs_to_project(project, filename):
18 ext = os.path.splitext(filename)[1][1:]
19 filetype_extensions = list(
20 project.filetypes.values_list(
21 "extension__name", flat=True))
22 template_extensions = list(
23 project.filetypes.values_list(
24 "template_extension__name", flat=True))
25 return (
26 ext in filetype_extensions
27 or (ext in template_extensions))
28
29
30 def _detect_treestyle_and_path(project, proj_trans_path):
31 dirlisting = os.walk(proj_trans_path)
32 dirpath_, dirnames, filenames = dirlisting.next()
33
34 if not dirnames:
35 # No subdirectories
36 if filter(partial(_file_belongs_to_project, project), filenames):
37 # Translation files found, assume gnu
38 return "gnu", ""
39
40 # There are subdirectories
41 has_subdirs = filter(
42 (lambda dirname: dirname == 'templates'
43 or langcode_re.match(dirname)),
44 dirnames)
45 if has_subdirs:
46 return "nongnu", None
47
48 # No language subdirs found, look for any translation file
49 # in subdirs
50 for dirpath_, dirnames, filenames in os.walk(proj_trans_path):
51 if filter(partial(_file_belongs_to_project, project), filenames):
52 return "gnu", dirpath_.replace(proj_trans_path, "")
53 # Unsure
54 return "nongnu", None
55
56
57 def _get_translation_mapping(project):
58 old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY
59 proj_trans_path = os.path.join(old_translation_path, project.code)
60 old_treestyle, old_path = (
61 _detect_treestyle_and_path(project, proj_trans_path)
62 if project.treestyle == "auto"
63 else (project.treestyle, None))
64 project.treestyle = "pootle_fs"
65 if old_treestyle == "nongnu":
66 return "/<language_code>/<dir_path>/<filename>.<ext>"
67 else:
68 return "%s/<language_code>.<ext>" % (old_path)
69
70
71 def _set_project_config(Config, project_ct, project):
72 old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY
73 proj_trans_path = os.path.join(old_translation_path, project.code)
74 configs = Config.objects.filter(
75 content_type=project_ct,
76 object_pk=project.pk)
77 configs.delete()
78 Config.objects.update_or_create(
79 content_type=project_ct,
80 object_pk=project.pk,
81 key="pootle_fs.fs_url",
82 defaults=dict(
83 value=proj_trans_path))
84 Config.objects.update_or_create(
85 content_type=project_ct,
86 object_pk=project.pk,
87 key="pootle_fs.fs_type",
88 defaults=dict(
89 value="localfs"))
90 Config.objects.update_or_create(
91 content_type=project_ct,
92 object_pk=project.pk,
93 key="pootle_fs.translation_mappings",
94 defaults=dict(
95 value=dict(default=_get_translation_mapping(project))))
96
97
98 def convert_to_localfs(apps, schema_editor):
99 Project = apps.get_model("pootle_project.Project")
100 Store = apps.get_model("pootle_store.Store")
101 StoreFS = apps.get_model("pootle_fs.StoreFS")
102 Config = apps.get_model("pootle_config.Config")
103 ContentType = apps.get_model("contenttypes.ContentType")
104 project_ct = ContentType.objects.get_for_model(Project)
105 old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY
106
107 for project in Project.objects.exclude(treestyle="pootle_fs"):
108 proj_trans_path = os.path.join(old_translation_path, project.code)
109 proj_stores = Store.objects.filter(
110 translation_project__project=project)
111 _set_project_config(Config, project_ct, project)
112 project.treestyle = "pootle_fs"
113 project.save()
114 store_fs = StoreFS.objects.filter(
115 store__translation_project__project=project)
116 store_fs.delete()
117 for store in proj_stores:
118 filepath = store.file.path[len(proj_trans_path):]
119 StoreFS.objects.update_or_create(
120 project=project,
121 store=store,
122 defaults=dict(
123 path=filepath,
124 pootle_path=store.pootle_path,
125 last_sync_revision=store.last_sync_revision,
126 last_sync_mtime=store.file_mtime))
127 fs_temp = os.path.join(
128 settings.POOTLE_FS_WORKING_PATH, project.code)
129 dirsync.sync(
130 proj_trans_path,
131 fs_temp,
132 "sync",
133 create=True,
134 purge=True,
135 logger=logging.getLogger(dirsync.__name__))
136
137
138 class Migration(migrations.Migration):
139
140 dependencies = [
141 ('contenttypes', '0002_remove_content_type_name'),
142 ('pootle_fs', '0001_initial'),
143 ('pootle_format', '0003_remove_extra_indeces'),
144 ('pootle_config', '0001_initial'),
145 ('pootle_store', '0013_set_store_filetype_again'),
146 ('pootle_project', '0016_change_treestyle_choices_label'),
147 ]
148
149 operations = [
150 migrations.RunPython(convert_to_localfs),
151 ]
152
[end of pootle/apps/pootle_fs/migrations/0002_convert_localfs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py
--- a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py
+++ b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py
@@ -107,7 +107,7 @@
for project in Project.objects.exclude(treestyle="pootle_fs"):
proj_trans_path = os.path.join(old_translation_path, project.code)
proj_stores = Store.objects.filter(
- translation_project__project=project)
+ translation_project__project=project).exclude(file="")
_set_project_config(Config, project_ct, project)
project.treestyle = "pootle_fs"
project.save()
diff --git a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py
--- a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py
+++ b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py
@@ -8,6 +8,7 @@
class Migration(migrations.Migration):
dependencies = [
+ ('pootle_fs', '0002_convert_localfs'),
('pootle_store', '0032_fix_empty_wordcounts'),
]
| {"golden_diff": "diff --git a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\n--- a/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\n+++ b/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\n@@ -107,7 +107,7 @@\n for project in Project.objects.exclude(treestyle=\"pootle_fs\"):\n proj_trans_path = os.path.join(old_translation_path, project.code)\n proj_stores = Store.objects.filter(\n- translation_project__project=project)\n+ translation_project__project=project).exclude(file=\"\")\n _set_project_config(Config, project_ct, project)\n project.treestyle = \"pootle_fs\"\n project.save()\ndiff --git a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py\n--- a/pootle/apps/pootle_store/migrations/0033_remove_store_file.py\n+++ b/pootle/apps/pootle_store/migrations/0033_remove_store_file.py\n@@ -8,6 +8,7 @@\n class Migration(migrations.Migration):\n \n dependencies = [\n+ ('pootle_fs', '0002_convert_localfs'),\n ('pootle_store', '0032_fix_empty_wordcounts'),\n ]\n", "issue": "Traceback with migration pootle_fs.0002_convert_localfs\nI hit this traceback when running the new migrations in my local DB, specifically `pootle_fs.0002_convert_localfs`:\r\n\r\n\r\n```pytb\r\nRunning migrations:\r\n Applying pootle_fs.0002_convert_localfs...Traceback (most recent call last):\r\n File \"manage.py\", line 24, in <module>\r\n execute_from_command_line()\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 367, in execute_from_command_line\r\n utility.execute()\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 359, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py\", line 294, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/base.py\", line 345, in execute\r\n output = self.handle(*args, **options)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/core/management/commands/migrate.py\", line 204, in handle\r\n fake_initial=fake_initial,\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py\", line 115, in migrate\r\n state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py\", line 145, in _migrate_all_forwards\r\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/executor.py\", line 244, in apply_migration\r\n state = migration.apply(state, schema_editor)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/migration.py\", line 126, in apply\r\n operation.database_forwards(self.app_label, schema_editor, old_state, project_state)\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/migrations/operations/special.py\", line 189, in database_forwards\r\n self.code(from_state.apps, schema_editor)\r\n File \"/home/leo/Escritorio/core_pootle/pootle/apps/pootle_fs/migrations/0002_convert_localfs.py\", line 118, in convert_to_localfs\r\n filepath = store.file.path[len(proj_trans_path):]\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py\", line 63, in _get_path\r\n self._require_file()\r\n File \"/home/leo/.virtualenvs/ptl/local/lib/python2.7/site-packages/django/db/models/fields/files.py\", line 46, in _require_file\r\n raise ValueError(\"The '%s' attribute has no file associated with it.\" % self.field.name)\r\nValueError: The 'file' attribute has no file associated with it.\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.7 on 2017-08-26 14:01\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pootle_store', '0032_fix_empty_wordcounts'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='store',\n name='file',\n ),\n ]\n", "path": "pootle/apps/pootle_store/migrations/0033_remove_store_file.py"}, {"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.7 on 2017-08-17 09:16\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nfrom functools import partial\n\nimport dirsync\n\nfrom django.conf import settings\nfrom django.db import migrations\n\nfrom translate.lang.data import langcode_re\n\n\ndef _file_belongs_to_project(project, filename):\n ext = os.path.splitext(filename)[1][1:]\n filetype_extensions = list(\n project.filetypes.values_list(\n \"extension__name\", flat=True))\n template_extensions = list(\n project.filetypes.values_list(\n \"template_extension__name\", flat=True))\n return (\n ext in filetype_extensions\n or (ext in template_extensions))\n\n\ndef _detect_treestyle_and_path(project, proj_trans_path):\n dirlisting = os.walk(proj_trans_path)\n dirpath_, dirnames, filenames = dirlisting.next()\n\n if not dirnames:\n # No subdirectories\n if filter(partial(_file_belongs_to_project, project), filenames):\n # Translation files found, assume gnu\n return \"gnu\", \"\"\n\n # There are subdirectories\n has_subdirs = filter(\n (lambda dirname: dirname == 'templates'\n or langcode_re.match(dirname)),\n dirnames)\n if has_subdirs:\n return \"nongnu\", None\n\n # No language subdirs found, look for any translation file\n # in subdirs\n for dirpath_, dirnames, filenames in os.walk(proj_trans_path):\n if filter(partial(_file_belongs_to_project, project), filenames):\n return \"gnu\", dirpath_.replace(proj_trans_path, \"\")\n # Unsure\n return \"nongnu\", None\n\n\ndef _get_translation_mapping(project):\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n proj_trans_path = os.path.join(old_translation_path, project.code)\n old_treestyle, old_path = (\n _detect_treestyle_and_path(project, proj_trans_path)\n if project.treestyle == \"auto\"\n else (project.treestyle, None))\n project.treestyle = \"pootle_fs\"\n if old_treestyle == \"nongnu\":\n return \"/<language_code>/<dir_path>/<filename>.<ext>\"\n else:\n return \"%s/<language_code>.<ext>\" % (old_path)\n\n\ndef _set_project_config(Config, project_ct, project):\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n proj_trans_path = os.path.join(old_translation_path, project.code)\n configs = Config.objects.filter(\n content_type=project_ct,\n object_pk=project.pk)\n configs.delete()\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.fs_url\",\n defaults=dict(\n value=proj_trans_path))\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.fs_type\",\n defaults=dict(\n value=\"localfs\"))\n Config.objects.update_or_create(\n content_type=project_ct,\n object_pk=project.pk,\n key=\"pootle_fs.translation_mappings\",\n defaults=dict(\n value=dict(default=_get_translation_mapping(project))))\n\n\ndef convert_to_localfs(apps, schema_editor):\n Project = apps.get_model(\"pootle_project.Project\")\n Store = apps.get_model(\"pootle_store.Store\")\n StoreFS = apps.get_model(\"pootle_fs.StoreFS\")\n Config = apps.get_model(\"pootle_config.Config\")\n ContentType = apps.get_model(\"contenttypes.ContentType\")\n project_ct = ContentType.objects.get_for_model(Project)\n old_translation_path = settings.POOTLE_TRANSLATION_DIRECTORY\n\n for project in Project.objects.exclude(treestyle=\"pootle_fs\"):\n proj_trans_path = os.path.join(old_translation_path, project.code)\n proj_stores = Store.objects.filter(\n translation_project__project=project)\n _set_project_config(Config, project_ct, project)\n project.treestyle = \"pootle_fs\"\n project.save()\n store_fs = StoreFS.objects.filter(\n store__translation_project__project=project)\n store_fs.delete()\n for store in proj_stores:\n filepath = store.file.path[len(proj_trans_path):]\n StoreFS.objects.update_or_create(\n project=project,\n store=store,\n defaults=dict(\n path=filepath,\n pootle_path=store.pootle_path,\n last_sync_revision=store.last_sync_revision,\n last_sync_mtime=store.file_mtime))\n fs_temp = os.path.join(\n settings.POOTLE_FS_WORKING_PATH, project.code)\n dirsync.sync(\n proj_trans_path,\n fs_temp,\n \"sync\",\n create=True,\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n ('pootle_fs', '0001_initial'),\n ('pootle_format', '0003_remove_extra_indeces'),\n ('pootle_config', '0001_initial'),\n ('pootle_store', '0013_set_store_filetype_again'),\n ('pootle_project', '0016_change_treestyle_choices_label'),\n ]\n\n operations = [\n migrations.RunPython(convert_to_localfs),\n ]\n", "path": "pootle/apps/pootle_fs/migrations/0002_convert_localfs.py"}]} | 3,105 | 337 |
gh_patches_debug_1416 | rasdani/github-patches | git_diff | frappe__frappe-4871 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`Insufficient Permission for Chat Profile` on load
Use Case:
1. Create New User without System Manager Role
2. Log-in to System
3. Error Pops-up when system is trying to create Chat Profile.
I tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile.
Here is the log. Hope this helps!

Any help is greatly appreciated. I will also try to create an issue for this here in discuss.
Thanks,
Dori
</issue>
<code>
[start of frappe/chat/doctype/chat_profile/chat_profile.py]
1 # imports - module imports
2 from frappe.model.document import Document
3 from frappe import _
4 import frappe
5
6 # imports - frappe module imports
7 from frappe.core.doctype.version.version import get_diff
8 from frappe.chat.doctype.chat_room import chat_room
9 from frappe.chat.util import (
10 safe_json_loads,
11 filter_dict,
12 dictify
13 )
14
15 session = frappe.session
16
17 class ChatProfile(Document):
18 def before_save(self):
19 if not self.is_new():
20 self.get_doc_before_save()
21
22 def on_update(self):
23 if not self.is_new():
24 b, a = self.get_doc_before_save(), self
25 diff = dictify(get_diff(a, b))
26 if diff:
27 user = session.user
28
29 fields = [changed[0] for changed in diff.changed]
30
31 if 'status' in fields:
32 rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])
33 update = dict(user = user, data = dict(status = self.status))
34
35 for room in rooms:
36 frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)
37
38 if 'enable_chat' in fields:
39 update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))
40 frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)
41
42 def authenticate(user):
43 if user != session.user:
44 frappe.throw(_("Sorry, you're not authorized."))
45
46 @frappe.whitelist()
47 def get(user, fields = None):
48 duser = frappe.get_doc('User', user)
49 dprof = frappe.get_doc('Chat Profile', user)
50
51 # If you're adding something here, make sure the client recieves it.
52 profile = dict(
53 # User
54 name = duser.name,
55 email = duser.email,
56 first_name = duser.first_name,
57 last_name = duser.last_name,
58 username = duser.username,
59 avatar = duser.user_image,
60 bio = duser.bio,
61 # Chat Profile
62 status = dprof.status,
63 chat_background = dprof.chat_background,
64 message_preview = bool(dprof.message_preview),
65 notification_tones = bool(dprof.notification_tones),
66 conversation_tones = bool(dprof.conversation_tones),
67 enable_chat = bool(dprof.enable_chat)
68 )
69 profile = filter_dict(profile, fields)
70
71 return dictify(profile)
72
73 @frappe.whitelist()
74 def create(user, exists_ok = False, fields = None):
75 authenticate(user)
76
77 exists_ok, fields = safe_json_loads(exists_ok, fields)
78
79 if frappe.db.exists('Chat Profile', user):
80 if not exists_ok:
81 frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))
82 else:
83 dprof = frappe.new_doc('Chat Profile')
84 dprof.user = user
85 dprof.save()
86
87 profile = get(user, fields = fields)
88
89 return profile
90
91 @frappe.whitelist()
92 def update(user, data):
93 authenticate(user)
94
95 data = safe_json_loads(data)
96
97 dprof = frappe.get_doc('Chat Profile', user)
98 dprof.update(data)
99 dprof.save(ignore_permissions = True)
[end of frappe/chat/doctype/chat_profile/chat_profile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py
--- a/frappe/chat/doctype/chat_profile/chat_profile.py
+++ b/frappe/chat/doctype/chat_profile/chat_profile.py
@@ -82,7 +82,7 @@
else:
dprof = frappe.new_doc('Chat Profile')
dprof.user = user
- dprof.save()
+ dprof.save(ignore_permissions = True)
profile = get(user, fields = fields)
| {"golden_diff": "diff --git a/frappe/chat/doctype/chat_profile/chat_profile.py b/frappe/chat/doctype/chat_profile/chat_profile.py\n--- a/frappe/chat/doctype/chat_profile/chat_profile.py\n+++ b/frappe/chat/doctype/chat_profile/chat_profile.py\n@@ -82,7 +82,7 @@\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n- dprof.save()\n+ dprof.save(ignore_permissions = True)\n \n profile = get(user, fields = fields)\n", "issue": "`Insufficient Permission for Chat Profile` on load\nUse Case:\r\n\r\n1. Create New User without System Manager Role\r\n2. Log-in to System\r\n3. Error Pops-up when system is trying to create Chat Profile.\r\n\r\nI tried replicating it to a user that was once has a System Manager role and it was not popping up. It was because the user has already Chat Profile.\r\n\r\nHere is the log. Hope this helps!\r\n\r\n\r\n\r\n\r\nAny help is greatly appreciated. I will also try to create an issue for this here in discuss.\r\n\r\nThanks,\r\n\r\nDori\n", "before_files": [{"content": "# imports - module imports\nfrom frappe.model.document import Document\nfrom frappe import _\nimport frappe\n\n# imports - frappe module imports\nfrom frappe.core.doctype.version.version import get_diff\nfrom frappe.chat.doctype.chat_room import chat_room\nfrom frappe.chat.util import (\n safe_json_loads,\n filter_dict,\n dictify\n)\n\nsession = frappe.session\n\nclass ChatProfile(Document):\n def before_save(self):\n if not self.is_new():\n self.get_doc_before_save()\n\n def on_update(self):\n if not self.is_new():\n b, a = self.get_doc_before_save(), self\n diff = dictify(get_diff(a, b))\n if diff:\n user = session.user\n\n fields = [changed[0] for changed in diff.changed]\n\n if 'status' in fields:\n rooms = chat_room.get(user, filters = ['Chat Room', 'type', '=', 'Direct'])\n update = dict(user = user, data = dict(status = self.status))\n\n for room in rooms:\n frappe.publish_realtime('frappe.chat.profile:update', update, room = room.name, after_commit = True)\n\n if 'enable_chat' in fields:\n update = dict(user = user, data = dict(enable_chat = bool(self.enable_chat)))\n frappe.publish_realtime('frappe.chat.profile:update', update, user = user, after_commit = True)\n\ndef authenticate(user):\n if user != session.user:\n frappe.throw(_(\"Sorry, you're not authorized.\"))\n\[email protected]()\ndef get(user, fields = None):\n duser = frappe.get_doc('User', user)\n dprof = frappe.get_doc('Chat Profile', user)\n\n # If you're adding something here, make sure the client recieves it.\n profile = dict(\n # User\n name = duser.name,\n email = duser.email,\n first_name = duser.first_name,\n last_name = duser.last_name,\n username = duser.username,\n avatar = duser.user_image,\n bio = duser.bio,\n # Chat Profile\n status = dprof.status,\n chat_background = dprof.chat_background,\n message_preview = bool(dprof.message_preview),\n notification_tones = bool(dprof.notification_tones),\n conversation_tones = bool(dprof.conversation_tones),\n enable_chat = bool(dprof.enable_chat)\n )\n profile = filter_dict(profile, fields)\n\n return dictify(profile)\n\[email protected]()\ndef create(user, exists_ok = False, fields = None):\n authenticate(user)\n\n exists_ok, fields = safe_json_loads(exists_ok, fields)\n\n if frappe.db.exists('Chat Profile', user):\n if not exists_ok:\n frappe.throw(_('Chat Profile for User {user} exists.'.format(user = user)))\n else:\n dprof = frappe.new_doc('Chat Profile')\n dprof.user = user\n dprof.save()\n\n profile = get(user, fields = fields)\n\n return profile\n\[email protected]()\ndef update(user, data):\n authenticate(user)\n\n data = safe_json_loads(data)\n\n dprof = frappe.get_doc('Chat Profile', user)\n dprof.update(data)\n dprof.save(ignore_permissions = True)", "path": "frappe/chat/doctype/chat_profile/chat_profile.py"}]} | 1,648 | 114 |
gh_patches_debug_23168 | rasdani/github-patches | git_diff | feast-dev__feast-983 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Go SDK: go get by Version Tag Does Not Work
## Problem & Expected Behavour
`go get` to install the Feast Go SDK should work by version tag:
```
go get github.com/feast-dev/feast/sdk/[email protected]
```
Returns:
```
go get github.com/feast-dev/feast/sdk/[email protected]: module github.com/feast-dev/[email protected] found, but does not contain package github.com/feast-dev/feast/sdk/go
```
Instead of installing the Go SDK at release version tag v0.5.1.
## Proposed Solution
- Update [setup.py](https://github.com/feast-dev/feast/blob/89883d418b4935d595585689d63d246ad133cabe/sdk/python/setup.py#L86) to support non semver tags. (ie `sdk/go/v0.5.1`)
- Introduce additional tag `sdk/go/v<VERSION>` each release. This tag will allow us to pull the go module by a versioned tag. [Reference](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories)
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2019 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import subprocess
17
18 from setuptools import find_packages, setup
19
20 NAME = "feast"
21 DESCRIPTION = "Python SDK for Feast"
22 URL = "https://github.com/feast-dev/feast"
23 AUTHOR = "Feast"
24 REQUIRES_PYTHON = ">=3.6.0"
25
26 REQUIRED = [
27 "Click==7.*",
28 "google-api-core==1.20.*",
29 "google-auth<2.0dev,>=1.14.0",
30 "google-cloud-bigquery==1.18.*",
31 "google-cloud-storage==1.20.*",
32 "google-cloud-core==1.0.*",
33 "googleapis-common-protos==1.*",
34 "google-cloud-bigquery-storage==0.7.*",
35 "grpcio==1.*",
36 "pandas~=1.0.0",
37 "pandavro==1.5.*",
38 "protobuf>=3.10",
39 "PyYAML==5.1.*",
40 "fastavro>=0.22.11,<0.23",
41 "kafka-python==1.*",
42 "tabulate==0.8.*",
43 "toml==0.10.*",
44 "tqdm==4.*",
45 "pyarrow<0.16.0,>=0.15.1",
46 "numpy",
47 "google",
48 "confluent_kafka",
49 ]
50
51 # README file from Feast repo root directory
52 repo_root = (
53 subprocess.Popen(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
54 .communicate()[0]
55 .rstrip()
56 .decode("utf-8")
57 )
58 README_FILE = os.path.join(repo_root, "README.md")
59 with open(os.path.join(README_FILE), "r") as f:
60 LONG_DESCRIPTION = f.read()
61
62 setup(
63 name=NAME,
64 author=AUTHOR,
65 description=DESCRIPTION,
66 long_description=LONG_DESCRIPTION,
67 long_description_content_type="text/markdown",
68 python_requires=REQUIRES_PYTHON,
69 url=URL,
70 packages=find_packages(exclude=("tests",)),
71 install_requires=REQUIRED,
72 # https://stackoverflow.com/questions/28509965/setuptools-development-requirements
73 # Install dev requirements with: pip install -e .[dev]
74 extras_require={"dev": ["mypy-protobuf==1.*", "grpcio-testing==1.*"]},
75 include_package_data=True,
76 license="Apache",
77 classifiers=[
78 # Trove classifiers
79 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
80 "License :: OSI Approved :: Apache Software License",
81 "Programming Language :: Python",
82 "Programming Language :: Python :: 3",
83 "Programming Language :: Python :: 3.6",
84 ],
85 entry_points={"console_scripts": ["feast=feast.cli:cli"]},
86 use_scm_version={"root": "../..", "relative_to": __file__},
87 setup_requires=["setuptools_scm"],
88 )
89
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -13,6 +13,7 @@
# limitations under the License.
import os
+import re
import subprocess
from setuptools import find_packages, setup
@@ -59,6 +60,13 @@
with open(os.path.join(README_FILE), "r") as f:
LONG_DESCRIPTION = f.read()
+# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.
+# Regex modified from default tag regex in:
+# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9
+TAG_REGEX = re.compile(
+ r"^(?:[\/\w-]+)?(?P<version>[vV]?\d+(?:\.\d+){0,2}[^\+]*)(?:\+.*)?$"
+)
+
setup(
name=NAME,
author=AUTHOR,
@@ -83,6 +91,6 @@
"Programming Language :: Python :: 3.6",
],
entry_points={"console_scripts": ["feast=feast.cli:cli"]},
- use_scm_version={"root": "../..", "relative_to": __file__},
+ use_scm_version={"root": "../..", "relative_to": __file__, "tag_regex": TAG_REGEX},
setup_requires=["setuptools_scm"],
)
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n import os\n+import re\n import subprocess\n \n from setuptools import find_packages, setup\n@@ -59,6 +60,13 @@\n with open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n \n+# Add Support for parsing tags that have a prefix containing '/' (ie 'sdk/go') to setuptools_scm.\n+# Regex modified from default tag regex in:\n+# https://github.com/pypa/setuptools_scm/blob/2a1b46d38fb2b8aeac09853e660bcd0d7c1bc7be/src/setuptools_scm/config.py#L9\n+TAG_REGEX = re.compile(\n+ r\"^(?:[\\/\\w-]+)?(?P<version>[vV]?\\d+(?:\\.\\d+){0,2}[^\\+]*)(?:\\+.*)?$\"\n+)\n+\n setup(\n name=NAME,\n author=AUTHOR,\n@@ -83,6 +91,6 @@\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n- use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n+ use_scm_version={\"root\": \"../..\", \"relative_to\": __file__, \"tag_regex\": TAG_REGEX},\n setup_requires=[\"setuptools_scm\"],\n )\n", "issue": "Go SDK: go get by Version Tag Does Not Work\n## Problem & Expected Behavour\r\n`go get` to install the Feast Go SDK should work by version tag:\r\n```\r\ngo get github.com/feast-dev/feast/sdk/[email protected]\r\n```\r\nReturns: \r\n```\r\ngo get github.com/feast-dev/feast/sdk/[email protected]: module github.com/feast-dev/[email protected] found, but does not contain package github.com/feast-dev/feast/sdk/go\r\n```\r\nInstead of installing the Go SDK at release version tag v0.5.1.\r\n\r\n## Proposed Solution\r\n- Update [setup.py](https://github.com/feast-dev/feast/blob/89883d418b4935d595585689d63d246ad133cabe/sdk/python/setup.py#L86) to support non semver tags. (ie `sdk/go/v0.5.1`)\r\n- Introduce additional tag `sdk/go/v<VERSION>` each release. This tag will allow us to pull the go module by a versioned tag. [Reference](https://github.com/golang/go/wiki/Modules#faqs--multi-module-repositories)\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\nNAME = \"feast\"\nDESCRIPTION = \"Python SDK for Feast\"\nURL = \"https://github.com/feast-dev/feast\"\nAUTHOR = \"Feast\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"Click==7.*\",\n \"google-api-core==1.20.*\",\n \"google-auth<2.0dev,>=1.14.0\",\n \"google-cloud-bigquery==1.18.*\",\n \"google-cloud-storage==1.20.*\",\n \"google-cloud-core==1.0.*\",\n \"googleapis-common-protos==1.*\",\n \"google-cloud-bigquery-storage==0.7.*\",\n \"grpcio==1.*\",\n \"pandas~=1.0.0\",\n \"pandavro==1.5.*\",\n \"protobuf>=3.10\",\n \"PyYAML==5.1.*\",\n \"fastavro>=0.22.11,<0.23\",\n \"kafka-python==1.*\",\n \"tabulate==0.8.*\",\n \"toml==0.10.*\",\n \"tqdm==4.*\",\n \"pyarrow<0.16.0,>=0.15.1\",\n \"numpy\",\n \"google\",\n \"confluent_kafka\",\n]\n\n# README file from Feast repo root directory\nrepo_root = (\n subprocess.Popen([\"git\", \"rev-parse\", \"--show-toplevel\"], stdout=subprocess.PIPE)\n .communicate()[0]\n .rstrip()\n .decode(\"utf-8\")\n)\nREADME_FILE = os.path.join(repo_root, \"README.md\")\nwith open(os.path.join(README_FILE), \"r\") as f:\n LONG_DESCRIPTION = f.read()\n\nsetup(\n name=NAME,\n author=AUTHOR,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=REQUIRED,\n # https://stackoverflow.com/questions/28509965/setuptools-development-requirements\n # Install dev requirements with: pip install -e .[dev]\n extras_require={\"dev\": [\"mypy-protobuf==1.*\", \"grpcio-testing==1.*\"]},\n include_package_data=True,\n license=\"Apache\",\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n entry_points={\"console_scripts\": [\"feast=feast.cli:cli\"]},\n use_scm_version={\"root\": \"../..\", \"relative_to\": __file__},\n setup_requires=[\"setuptools_scm\"],\n)\n", "path": "sdk/python/setup.py"}]} | 1,775 | 358 |
gh_patches_debug_34656 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-3954 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[8tracks] throws 403 error after first 4 songs. Nothing downloads.
8tracks recently updated their API to version 3. They probably updated their security on this (because, according to their license, you can only skip 3 songs within a certain time frame).
</issue>
<code>
[start of youtube_dl/extractor/eighttracks.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import json
5 import random
6 import re
7
8 from .common import InfoExtractor
9 from ..utils import (
10 compat_str,
11 )
12
13
14 class EightTracksIE(InfoExtractor):
15 IE_NAME = '8tracks'
16 _VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
17 _TEST = {
18 "name": "EightTracks",
19 "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
20 "info_dict": {
21 'id': '1336550',
22 'display_id': 'youtube-dl-test-tracks-a',
23 "description": "test chars: \"'/\\ä↭",
24 "title": "youtube-dl test tracks \"'/\\ä↭<>",
25 },
26 "playlist": [
27 {
28 "md5": "96ce57f24389fc8734ce47f4c1abcc55",
29 "info_dict": {
30 "id": "11885610",
31 "ext": "m4a",
32 "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
33 "uploader_id": "ytdl"
34 }
35 },
36 {
37 "md5": "4ab26f05c1f7291ea460a3920be8021f",
38 "info_dict": {
39 "id": "11885608",
40 "ext": "m4a",
41 "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
42 "uploader_id": "ytdl"
43 }
44 },
45 {
46 "md5": "d30b5b5f74217410f4689605c35d1fd7",
47 "info_dict": {
48 "id": "11885679",
49 "ext": "m4a",
50 "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
51 "uploader_id": "ytdl"
52 }
53 },
54 {
55 "md5": "4eb0a669317cd725f6bbd336a29f923a",
56 "info_dict": {
57 "id": "11885680",
58 "ext": "m4a",
59 "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
60 "uploader_id": "ytdl"
61 }
62 },
63 {
64 "md5": "1893e872e263a2705558d1d319ad19e8",
65 "info_dict": {
66 "id": "11885682",
67 "ext": "m4a",
68 "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
69 "uploader_id": "ytdl"
70 }
71 },
72 {
73 "md5": "b673c46f47a216ab1741ae8836af5899",
74 "info_dict": {
75 "id": "11885683",
76 "ext": "m4a",
77 "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
78 "uploader_id": "ytdl"
79 }
80 },
81 {
82 "md5": "1d74534e95df54986da7f5abf7d842b7",
83 "info_dict": {
84 "id": "11885684",
85 "ext": "m4a",
86 "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
87 "uploader_id": "ytdl"
88 }
89 },
90 {
91 "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
92 "info_dict": {
93 "id": "11885685",
94 "ext": "m4a",
95 "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
96 "uploader_id": "ytdl"
97 }
98 }
99 ]
100 }
101
102 def _real_extract(self, url):
103 mobj = re.match(self._VALID_URL, url)
104 playlist_id = mobj.group('id')
105
106 webpage = self._download_webpage(url, playlist_id)
107
108 json_like = self._search_regex(
109 r"(?s)PAGE.mix = (.*?);\n", webpage, 'trax information')
110 data = json.loads(json_like)
111
112 session = str(random.randint(0, 1000000000))
113 mix_id = data['id']
114 track_count = data['tracks_count']
115 first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
116 next_url = first_url
117 entries = []
118 for i in range(track_count):
119 api_json = self._download_webpage(
120 next_url, playlist_id,
121 note='Downloading song information %d/%d' % (i + 1, track_count),
122 errnote='Failed to download song information')
123 api_data = json.loads(api_json)
124 track_data = api_data['set']['track']
125 info = {
126 'id': compat_str(track_data['id']),
127 'url': track_data['track_file_stream_url'],
128 'title': track_data['performer'] + u' - ' + track_data['name'],
129 'raw_title': track_data['name'],
130 'uploader_id': data['user']['login'],
131 'ext': 'm4a',
132 }
133 entries.append(info)
134 next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
135 session, mix_id, track_data['id'])
136 return {
137 '_type': 'playlist',
138 'entries': entries,
139 'id': compat_str(mix_id),
140 'display_id': playlist_id,
141 'title': data.get('name'),
142 'description': data.get('description'),
143 }
144
[end of youtube_dl/extractor/eighttracks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py
--- a/youtube_dl/extractor/eighttracks.py
+++ b/youtube_dl/extractor/eighttracks.py
@@ -4,10 +4,12 @@
import json
import random
import re
+import time
from .common import InfoExtractor
from ..utils import (
compat_str,
+ ExtractorError,
)
@@ -112,14 +114,30 @@
session = str(random.randint(0, 1000000000))
mix_id = data['id']
track_count = data['tracks_count']
+ duration = data['duration']
+ avg_song_duration = duration / track_count
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
next_url = first_url
entries = []
+
for i in range(track_count):
- api_json = self._download_webpage(
- next_url, playlist_id,
- note='Downloading song information %d/%d' % (i + 1, track_count),
- errnote='Failed to download song information')
+
+ api_json = None
+ download_tries = 0
+
+ while api_json is None:
+ try:
+ api_json = self._download_webpage(
+ next_url, playlist_id,
+ note='Downloading song information %d/%d' % (i + 1, track_count),
+ errnote='Failed to download song information')
+ except ExtractorError:
+ if download_tries > 3:
+ raise
+ else:
+ download_tries += 1
+ time.sleep(avg_song_duration)
+
api_data = json.loads(api_json)
track_data = api_data['set']['track']
info = {
@@ -131,6 +149,7 @@
'ext': 'm4a',
}
entries.append(info)
+
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (
session, mix_id, track_data['id'])
return {
| {"golden_diff": "diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py\n--- a/youtube_dl/extractor/eighttracks.py\n+++ b/youtube_dl/extractor/eighttracks.py\n@@ -4,10 +4,12 @@\n import json\n import random\n import re\n+import time\n \n from .common import InfoExtractor\n from ..utils import (\n compat_str,\n+ ExtractorError,\n )\n \n \n@@ -112,14 +114,30 @@\n session = str(random.randint(0, 1000000000))\n mix_id = data['id']\n track_count = data['tracks_count']\n+ duration = data['duration']\n+ avg_song_duration = duration / track_count\n first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)\n next_url = first_url\n entries = []\n+\n for i in range(track_count):\n- api_json = self._download_webpage(\n- next_url, playlist_id,\n- note='Downloading song information %d/%d' % (i + 1, track_count),\n- errnote='Failed to download song information')\n+ \n+ api_json = None\n+ download_tries = 0\n+\n+ while api_json is None:\n+ try:\n+ api_json = self._download_webpage(\n+ next_url, playlist_id,\n+ note='Downloading song information %d/%d' % (i + 1, track_count),\n+ errnote='Failed to download song information')\n+ except ExtractorError:\n+ if download_tries > 3:\n+ raise\n+ else:\n+ download_tries += 1\n+ time.sleep(avg_song_duration)\n+\n api_data = json.loads(api_json)\n track_data = api_data['set']['track']\n info = {\n@@ -131,6 +149,7 @@\n 'ext': 'm4a',\n }\n entries.append(info)\n+\n next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (\n session, mix_id, track_data['id'])\n return {\n", "issue": "[8tracks] throws 403 error after first 4 songs. Nothing downloads.\n8tracks recently updated their API to version 3. They probably updated their security on this (because, according to their license, you can only skip 3 songs within a certain time frame).\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport random\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n compat_str,\n)\n\n\nclass EightTracksIE(InfoExtractor):\n IE_NAME = '8tracks'\n _VALID_URL = r'https?://8tracks\\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'\n _TEST = {\n \"name\": \"EightTracks\",\n \"url\": \"http://8tracks.com/ytdl/youtube-dl-test-tracks-a\",\n \"info_dict\": {\n 'id': '1336550',\n 'display_id': 'youtube-dl-test-tracks-a',\n \"description\": \"test chars: \\\"'/\\\\\u00e4\u21ad\",\n \"title\": \"youtube-dl test tracks \\\"'/\\\\\u00e4\u21ad<>\",\n },\n \"playlist\": [\n {\n \"md5\": \"96ce57f24389fc8734ce47f4c1abcc55\",\n \"info_dict\": {\n \"id\": \"11885610\",\n \"ext\": \"m4a\",\n \"title\": \"youtue-dl project<>\\\"' - youtube-dl test track 1 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"4ab26f05c1f7291ea460a3920be8021f\",\n \"info_dict\": {\n \"id\": \"11885608\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project - youtube-dl test track 2 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"d30b5b5f74217410f4689605c35d1fd7\",\n \"info_dict\": {\n \"id\": \"11885679\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project as well - youtube-dl test track 3 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"4eb0a669317cd725f6bbd336a29f923a\",\n \"info_dict\": {\n \"id\": \"11885680\",\n \"ext\": \"m4a\",\n \"title\": \"youtube-dl project as well - youtube-dl test track 4 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"1893e872e263a2705558d1d319ad19e8\",\n \"info_dict\": {\n \"id\": \"11885682\",\n \"ext\": \"m4a\",\n \"title\": \"PH - youtube-dl test track 5 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"b673c46f47a216ab1741ae8836af5899\",\n \"info_dict\": {\n \"id\": \"11885683\",\n \"ext\": \"m4a\",\n \"title\": \"PH - youtube-dl test track 6 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"1d74534e95df54986da7f5abf7d842b7\",\n \"info_dict\": {\n \"id\": \"11885684\",\n \"ext\": \"m4a\",\n \"title\": \"phihag - youtube-dl test track 7 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n },\n {\n \"md5\": \"f081f47af8f6ae782ed131d38b9cd1c0\",\n \"info_dict\": {\n \"id\": \"11885685\",\n \"ext\": \"m4a\",\n \"title\": \"phihag - youtube-dl test track 8 \\\"'/\\\\\\u00e4\\u21ad\",\n \"uploader_id\": \"ytdl\"\n }\n }\n ]\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n playlist_id = mobj.group('id')\n\n webpage = self._download_webpage(url, playlist_id)\n\n json_like = self._search_regex(\n r\"(?s)PAGE.mix = (.*?);\\n\", webpage, 'trax information')\n data = json.loads(json_like)\n\n session = str(random.randint(0, 1000000000))\n mix_id = data['id']\n track_count = data['tracks_count']\n first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)\n next_url = first_url\n entries = []\n for i in range(track_count):\n api_json = self._download_webpage(\n next_url, playlist_id,\n note='Downloading song information %d/%d' % (i + 1, track_count),\n errnote='Failed to download song information')\n api_data = json.loads(api_json)\n track_data = api_data['set']['track']\n info = {\n 'id': compat_str(track_data['id']),\n 'url': track_data['track_file_stream_url'],\n 'title': track_data['performer'] + u' - ' + track_data['name'],\n 'raw_title': track_data['name'],\n 'uploader_id': data['user']['login'],\n 'ext': 'm4a',\n }\n entries.append(info)\n next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (\n session, mix_id, track_data['id'])\n return {\n '_type': 'playlist',\n 'entries': entries,\n 'id': compat_str(mix_id),\n 'display_id': playlist_id,\n 'title': data.get('name'),\n 'description': data.get('description'),\n }\n", "path": "youtube_dl/extractor/eighttracks.py"}]} | 2,459 | 514 |
gh_patches_debug_38459 | rasdani/github-patches | git_diff | kornia__kornia-2235 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use more pythonic expressions in rgb to hls
Make improvements in the `kornia/color/hls.py` module to use more pythonic expressions. Ex:
```diff
- r = torch.select(im, -3, 0)
+ r = im[…, 0, :,:]
```
_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2030#discussion_r1032065189_
</issue>
<code>
[start of kornia/color/hls.py]
1 import math
2 from typing import Tuple
3
4 import torch
5
6 from kornia.core import Module, Tensor, stack, tensor, where
7
8
9 def rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:
10 r"""Convert a RGB image to HLS.
11
12 .. image:: _static/img/rgb_to_hls.png
13
14 The image data is assumed to be in the range of (0, 1).
15
16 NOTE: this method cannot be compiled with JIT in pytohrch < 1.7.0
17
18 Args:
19 image: RGB image to be converted to HLS with shape :math:`(*, 3, H, W)`.
20 eps: epsilon value to avoid div by zero.
21
22 Returns:
23 HLS version of the image with shape :math:`(*, 3, H, W)`.
24
25 Example:
26 >>> input = torch.rand(2, 3, 4, 5)
27 >>> output = rgb_to_hls(input) # 2x3x4x5
28 """
29 if not isinstance(image, Tensor):
30 raise TypeError(f"Input type is not a Tensor. Got {type(image)}")
31
32 if len(image.shape) < 3 or image.shape[-3] != 3:
33 raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
34
35 _RGB2HSL_IDX = tensor([[[0.0]], [[1.0]], [[2.0]]], device=image.device, dtype=image.dtype) # 3x1x1
36
37 _img_max: Tuple[Tensor, Tensor] = image.max(-3)
38 maxc = _img_max[0]
39 imax = _img_max[1]
40 minc: Tensor = image.min(-3)[0]
41
42 if image.requires_grad:
43 l_ = maxc + minc
44 s = maxc - minc
45 # weird behaviour with undefined vars in JIT...
46 # scripting requires image_hls be defined even if it is not used :S
47 h = l_ # assign to any tensor...
48 image_hls = l_ # assign to any tensor...
49 else:
50 # define the resulting image to avoid the torch.stack([h, l, s])
51 # so, h, l and s require inplace operations
52 # NOTE: stack() increases in a 10% the cost in colab
53 image_hls = torch.empty_like(image)
54 h = torch.select(image_hls, -3, 0)
55 l_ = torch.select(image_hls, -3, 1)
56 s = torch.select(image_hls, -3, 2)
57 torch.add(maxc, minc, out=l_) # l = max + min
58 torch.sub(maxc, minc, out=s) # s = max - min
59
60 # precompute image / (max - min)
61 im = image / (s + eps).unsqueeze(-3)
62
63 # epsilon cannot be inside the torch.where to avoid precision issues
64 s /= where(l_ < 1.0, l_, 2.0 - l_) + eps # saturation
65 l_ /= 2 # luminance
66
67 # note that r,g and b were previously div by (max - min)
68 r = torch.select(im, -3, 0)
69 g = torch.select(im, -3, 1)
70 b = torch.select(im, -3, 2)
71 # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]
72 # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]
73 # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]
74 cond = imax.unsqueeze(-3) == _RGB2HSL_IDX
75 if image.requires_grad:
76 h = torch.mul((g - b) % 6, torch.select(cond, -3, 0))
77 else:
78 torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h)
79 h += torch.add(b - r, 2) * torch.select(cond, -3, 1)
80 h += torch.add(r - g, 4) * torch.select(cond, -3, 2)
81 # h = 2.0 * math.pi * (60.0 * h) / 360.0
82 h *= math.pi / 3.0 # hue [0, 2*pi]
83
84 if image.requires_grad:
85 return stack([h, l_, s], -3)
86 return image_hls
87
88
89 def hls_to_rgb(image: Tensor) -> Tensor:
90 r"""Convert a HLS image to RGB.
91
92 The image data is assumed to be in the range of (0, 1).
93
94 Args:
95 image: HLS image to be converted to RGB with shape :math:`(*, 3, H, W)`.
96
97 Returns:
98 RGB version of the image with shape :math:`(*, 3, H, W)`.
99
100 Example:
101 >>> input = torch.rand(2, 3, 4, 5)
102 >>> output = hls_to_rgb(input) # 2x3x4x5
103 """
104 if not isinstance(image, Tensor):
105 raise TypeError(f"Input type is not a Tensor. Got {type(image)}")
106
107 if len(image.shape) < 3 or image.shape[-3] != 3:
108 raise ValueError(f"Input size must have a shape of (*, 3, H, W). Got {image.shape}")
109
110 _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1
111
112 im: Tensor = image.unsqueeze(-4)
113 h: Tensor = torch.select(im, -3, 0)
114 l: Tensor = torch.select(im, -3, 1)
115 s: Tensor = torch.select(im, -3, 2)
116 h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30
117 a = s * torch.min(l, 1.0 - l)
118
119 # kr = (0 + h) % 12
120 # kg = (8 + h) % 12
121 # kb = (4 + h) % 12
122 k: Tensor = (h + _HLS2RGB) % 12
123
124 # l - a * max(min(min(k - 3.0, 9.0 - k), 1), -1)
125 mink = torch.min(k - 3.0, 9.0 - k)
126 return torch.addcmul(l, a, mink.clamp_(min=-1.0, max=1.0), value=-1)
127
128
129 class RgbToHls(Module):
130 r"""Convert an image from RGB to HLS.
131
132 The image data is assumed to be in the range of (0, 1).
133
134 Returns:
135 HLS version of the image.
136
137 Shape:
138 - image: :math:`(*, 3, H, W)`
139 - output: :math:`(*, 3, H, W)`
140
141 Examples:
142 >>> input = torch.rand(2, 3, 4, 5)
143 >>> hls = RgbToHls()
144 >>> output = hls(input) # 2x3x4x5
145 """
146
147 def forward(self, image: Tensor) -> Tensor:
148 return rgb_to_hls(image)
149
150
151 class HlsToRgb(Module):
152 r"""Convert an image from HLS to RGB.
153
154 The image data is assumed to be in the range of (0, 1).
155
156 Returns:
157 RGB version of the image.
158
159 Shape:
160 - input: :math:`(*, 3, H, W)`
161 - output: :math:`(*, 3, H, W)`
162
163 Reference:
164 https://en.wikipedia.org/wiki/HSL_and_HSV
165
166 Examples:
167 >>> input = torch.rand(2, 3, 4, 5)
168 >>> rgb = HlsToRgb()
169 >>> output = rgb(input) # 2x3x4x5
170 """
171
172 def forward(self, image: Tensor) -> Tensor:
173 return hls_to_rgb(image)
174
[end of kornia/color/hls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/color/hls.py b/kornia/color/hls.py
--- a/kornia/color/hls.py
+++ b/kornia/color/hls.py
@@ -7,7 +7,7 @@
def rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:
- r"""Convert a RGB image to HLS.
+ r"""Convert an RGB image to HLS.
.. image:: _static/img/rgb_to_hls.png
@@ -51,9 +51,7 @@
# so, h, l and s require inplace operations
# NOTE: stack() increases in a 10% the cost in colab
image_hls = torch.empty_like(image)
- h = torch.select(image_hls, -3, 0)
- l_ = torch.select(image_hls, -3, 1)
- s = torch.select(image_hls, -3, 2)
+ h, l_, s = image_hls[..., 0, :, :], image_hls[..., 1, :, :], image_hls[..., 2, :, :]
torch.add(maxc, minc, out=l_) # l = max + min
torch.sub(maxc, minc, out=s) # s = max - min
@@ -65,19 +63,18 @@
l_ /= 2 # luminance
# note that r,g and b were previously div by (max - min)
- r = torch.select(im, -3, 0)
- g = torch.select(im, -3, 1)
- b = torch.select(im, -3, 2)
+ r, g, b = im[..., 0, :, :], im[..., 1, :, :], im[..., 2, :, :]
# h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]
# h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]
# h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]
- cond = imax.unsqueeze(-3) == _RGB2HSL_IDX
+ cond = imax[..., None, :, :] == _RGB2HSL_IDX
if image.requires_grad:
- h = torch.mul((g - b) % 6, torch.select(cond, -3, 0))
+ h = ((g - b) % 6) * cond[..., 0, :, :]
else:
- torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h)
- h += torch.add(b - r, 2) * torch.select(cond, -3, 1)
- h += torch.add(r - g, 4) * torch.select(cond, -3, 2)
+ # replacing `torch.mul` with `out=h` with python * operator gives wrong results
+ torch.mul((g - b) % 6, cond[..., 0, :, :], out=h)
+ h += (b - r + 2) * cond[..., 1, :, :]
+ h += (r - g + 4) * cond[..., 2, :, :]
# h = 2.0 * math.pi * (60.0 * h) / 360.0
h *= math.pi / 3.0 # hue [0, 2*pi]
@@ -110,9 +107,9 @@
_HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1
im: Tensor = image.unsqueeze(-4)
- h: Tensor = torch.select(im, -3, 0)
- l: Tensor = torch.select(im, -3, 1)
- s: Tensor = torch.select(im, -3, 2)
+ h: Tensor = im[..., 0, :, :]
+ l: Tensor = im[..., 1, :, :]
+ s: Tensor = im[..., 2, :, :]
h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30
a = s * torch.min(l, 1.0 - l)
| {"golden_diff": "diff --git a/kornia/color/hls.py b/kornia/color/hls.py\n--- a/kornia/color/hls.py\n+++ b/kornia/color/hls.py\n@@ -7,7 +7,7 @@\n \n \n def rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:\n- r\"\"\"Convert a RGB image to HLS.\n+ r\"\"\"Convert an RGB image to HLS.\n \n .. image:: _static/img/rgb_to_hls.png\n \n@@ -51,9 +51,7 @@\n # so, h, l and s require inplace operations\n # NOTE: stack() increases in a 10% the cost in colab\n image_hls = torch.empty_like(image)\n- h = torch.select(image_hls, -3, 0)\n- l_ = torch.select(image_hls, -3, 1)\n- s = torch.select(image_hls, -3, 2)\n+ h, l_, s = image_hls[..., 0, :, :], image_hls[..., 1, :, :], image_hls[..., 2, :, :]\n torch.add(maxc, minc, out=l_) # l = max + min\n torch.sub(maxc, minc, out=s) # s = max - min\n \n@@ -65,19 +63,18 @@\n l_ /= 2 # luminance\n \n # note that r,g and b were previously div by (max - min)\n- r = torch.select(im, -3, 0)\n- g = torch.select(im, -3, 1)\n- b = torch.select(im, -3, 2)\n+ r, g, b = im[..., 0, :, :], im[..., 1, :, :], im[..., 2, :, :]\n # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]\n # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]\n # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]\n- cond = imax.unsqueeze(-3) == _RGB2HSL_IDX\n+ cond = imax[..., None, :, :] == _RGB2HSL_IDX\n if image.requires_grad:\n- h = torch.mul((g - b) % 6, torch.select(cond, -3, 0))\n+ h = ((g - b) % 6) * cond[..., 0, :, :]\n else:\n- torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h)\n- h += torch.add(b - r, 2) * torch.select(cond, -3, 1)\n- h += torch.add(r - g, 4) * torch.select(cond, -3, 2)\n+ # replacing `torch.mul` with `out=h` with python * operator gives wrong results\n+ torch.mul((g - b) % 6, cond[..., 0, :, :], out=h)\n+ h += (b - r + 2) * cond[..., 1, :, :]\n+ h += (r - g + 4) * cond[..., 2, :, :]\n # h = 2.0 * math.pi * (60.0 * h) / 360.0\n h *= math.pi / 3.0 # hue [0, 2*pi]\n \n@@ -110,9 +107,9 @@\n _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n \n im: Tensor = image.unsqueeze(-4)\n- h: Tensor = torch.select(im, -3, 0)\n- l: Tensor = torch.select(im, -3, 1)\n- s: Tensor = torch.select(im, -3, 2)\n+ h: Tensor = im[..., 0, :, :]\n+ l: Tensor = im[..., 1, :, :]\n+ s: Tensor = im[..., 2, :, :]\n h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30\n a = s * torch.min(l, 1.0 - l)\n", "issue": "Use more pythonic expressions in rgb to hls\nMake improvements in the `kornia/color/hls.py` module to use more pythonic expressions. Ex:\r\n```diff\r\n- r = torch.select(im, -3, 0)\r\n+ r = im[\u2026, 0, :,:]\r\n```\r\n\r\n\r\n_Originally posted by @edgarriba in https://github.com/kornia/kornia/pull/2030#discussion_r1032065189_\r\n \n", "before_files": [{"content": "import math\nfrom typing import Tuple\n\nimport torch\n\nfrom kornia.core import Module, Tensor, stack, tensor, where\n\n\ndef rgb_to_hls(image: Tensor, eps: float = 1e-8) -> Tensor:\n r\"\"\"Convert a RGB image to HLS.\n\n .. image:: _static/img/rgb_to_hls.png\n\n The image data is assumed to be in the range of (0, 1).\n\n NOTE: this method cannot be compiled with JIT in pytohrch < 1.7.0\n\n Args:\n image: RGB image to be converted to HLS with shape :math:`(*, 3, H, W)`.\n eps: epsilon value to avoid div by zero.\n\n Returns:\n HLS version of the image with shape :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = rgb_to_hls(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, Tensor):\n raise TypeError(f\"Input type is not a Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n _RGB2HSL_IDX = tensor([[[0.0]], [[1.0]], [[2.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n\n _img_max: Tuple[Tensor, Tensor] = image.max(-3)\n maxc = _img_max[0]\n imax = _img_max[1]\n minc: Tensor = image.min(-3)[0]\n\n if image.requires_grad:\n l_ = maxc + minc\n s = maxc - minc\n # weird behaviour with undefined vars in JIT...\n # scripting requires image_hls be defined even if it is not used :S\n h = l_ # assign to any tensor...\n image_hls = l_ # assign to any tensor...\n else:\n # define the resulting image to avoid the torch.stack([h, l, s])\n # so, h, l and s require inplace operations\n # NOTE: stack() increases in a 10% the cost in colab\n image_hls = torch.empty_like(image)\n h = torch.select(image_hls, -3, 0)\n l_ = torch.select(image_hls, -3, 1)\n s = torch.select(image_hls, -3, 2)\n torch.add(maxc, minc, out=l_) # l = max + min\n torch.sub(maxc, minc, out=s) # s = max - min\n\n # precompute image / (max - min)\n im = image / (s + eps).unsqueeze(-3)\n\n # epsilon cannot be inside the torch.where to avoid precision issues\n s /= where(l_ < 1.0, l_, 2.0 - l_) + eps # saturation\n l_ /= 2 # luminance\n\n # note that r,g and b were previously div by (max - min)\n r = torch.select(im, -3, 0)\n g = torch.select(im, -3, 1)\n b = torch.select(im, -3, 2)\n # h[imax == 0] = (((g - b) / (max - min)) % 6)[imax == 0]\n # h[imax == 1] = (((b - r) / (max - min)) + 2)[imax == 1]\n # h[imax == 2] = (((r - g) / (max - min)) + 4)[imax == 2]\n cond = imax.unsqueeze(-3) == _RGB2HSL_IDX\n if image.requires_grad:\n h = torch.mul((g - b) % 6, torch.select(cond, -3, 0))\n else:\n torch.mul((g - b).remainder(6), torch.select(cond, -3, 0), out=h)\n h += torch.add(b - r, 2) * torch.select(cond, -3, 1)\n h += torch.add(r - g, 4) * torch.select(cond, -3, 2)\n # h = 2.0 * math.pi * (60.0 * h) / 360.0\n h *= math.pi / 3.0 # hue [0, 2*pi]\n\n if image.requires_grad:\n return stack([h, l_, s], -3)\n return image_hls\n\n\ndef hls_to_rgb(image: Tensor) -> Tensor:\n r\"\"\"Convert a HLS image to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Args:\n image: HLS image to be converted to RGB with shape :math:`(*, 3, H, W)`.\n\n Returns:\n RGB version of the image with shape :math:`(*, 3, H, W)`.\n\n Example:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> output = hls_to_rgb(input) # 2x3x4x5\n \"\"\"\n if not isinstance(image, Tensor):\n raise TypeError(f\"Input type is not a Tensor. Got {type(image)}\")\n\n if len(image.shape) < 3 or image.shape[-3] != 3:\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {image.shape}\")\n\n _HLS2RGB = tensor([[[0.0]], [[8.0]], [[4.0]]], device=image.device, dtype=image.dtype) # 3x1x1\n\n im: Tensor = image.unsqueeze(-4)\n h: Tensor = torch.select(im, -3, 0)\n l: Tensor = torch.select(im, -3, 1)\n s: Tensor = torch.select(im, -3, 2)\n h = h * (6 / math.pi) # h * 360 / (2 * math.pi) / 30\n a = s * torch.min(l, 1.0 - l)\n\n # kr = (0 + h) % 12\n # kg = (8 + h) % 12\n # kb = (4 + h) % 12\n k: Tensor = (h + _HLS2RGB) % 12\n\n # l - a * max(min(min(k - 3.0, 9.0 - k), 1), -1)\n mink = torch.min(k - 3.0, 9.0 - k)\n return torch.addcmul(l, a, mink.clamp_(min=-1.0, max=1.0), value=-1)\n\n\nclass RgbToHls(Module):\n r\"\"\"Convert an image from RGB to HLS.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n HLS version of the image.\n\n Shape:\n - image: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Examples:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> hls = RgbToHls()\n >>> output = hls(input) # 2x3x4x5\n \"\"\"\n\n def forward(self, image: Tensor) -> Tensor:\n return rgb_to_hls(image)\n\n\nclass HlsToRgb(Module):\n r\"\"\"Convert an image from HLS to RGB.\n\n The image data is assumed to be in the range of (0, 1).\n\n Returns:\n RGB version of the image.\n\n Shape:\n - input: :math:`(*, 3, H, W)`\n - output: :math:`(*, 3, H, W)`\n\n Reference:\n https://en.wikipedia.org/wiki/HSL_and_HSV\n\n Examples:\n >>> input = torch.rand(2, 3, 4, 5)\n >>> rgb = HlsToRgb()\n >>> output = rgb(input) # 2x3x4x5\n \"\"\"\n\n def forward(self, image: Tensor) -> Tensor:\n return hls_to_rgb(image)\n", "path": "kornia/color/hls.py"}]} | 2,948 | 990 |
gh_patches_debug_36666 | rasdani/github-patches | git_diff | arviz-devs__arviz-1263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HPD plot shows values over each other
Hey All,
I am trying to figure out how to change the plot_posterior function to show the actual values. In one of my plots, where it is small, I get numbers writing over each other. Any help/advice?

</issue>
<code>
[start of arviz/plots/backends/matplotlib/posteriorplot.py]
1 """Matplotlib Plot posterior densities."""
2 from typing import Optional
3 from numbers import Number
4 import matplotlib.pyplot as plt
5 import numpy as np
6
7 from . import backend_show
8 from ....stats import hdi
9 from ...kdeplot import plot_kde
10 from ...plot_utils import (
11 make_label,
12 _create_axes_grid,
13 format_sig_figs,
14 round_num,
15 calculate_point_estimate,
16 )
17 from ....numeric_utils import get_bins
18
19
20 def plot_posterior(
21 ax,
22 length_plotters,
23 rows,
24 cols,
25 figsize,
26 plotters,
27 bw,
28 bins,
29 kind,
30 point_estimate,
31 round_to,
32 hdi_prob,
33 multimodal,
34 ref_val,
35 rope,
36 ax_labelsize,
37 xt_labelsize,
38 kwargs,
39 titlesize,
40 backend_kwargs,
41 show,
42 ):
43 """Matplotlib posterior plot."""
44 if ax is None:
45 _, ax = _create_axes_grid(
46 length_plotters,
47 rows,
48 cols,
49 figsize=figsize,
50 squeeze=False,
51 backend_kwargs=backend_kwargs,
52 )
53 idx = 0
54 for (var_name, selection, x), ax_ in zip(plotters, np.ravel(ax)):
55 _plot_posterior_op(
56 idx,
57 x.flatten(),
58 var_name,
59 selection,
60 ax=ax_,
61 bw=bw,
62 bins=bins,
63 kind=kind,
64 point_estimate=point_estimate,
65 round_to=round_to,
66 hdi_prob=hdi_prob,
67 multimodal=multimodal,
68 ref_val=ref_val,
69 rope=rope,
70 ax_labelsize=ax_labelsize,
71 xt_labelsize=xt_labelsize,
72 **kwargs
73 )
74 idx += 1
75 ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)
76
77 if backend_show(show):
78 plt.show()
79
80 return ax
81
82
83 def _plot_posterior_op(
84 idx,
85 values,
86 var_name,
87 selection,
88 ax,
89 bw,
90 linewidth,
91 bins,
92 kind,
93 point_estimate,
94 hdi_prob,
95 multimodal,
96 ref_val,
97 rope,
98 ax_labelsize,
99 xt_labelsize,
100 round_to: Optional[int] = None,
101 **kwargs
102 ): # noqa: D202
103 """Artist to draw posterior."""
104
105 def format_as_percent(x, round_to=0):
106 return "{0:.{1:d}f}%".format(100 * x, round_to)
107
108 def display_ref_val():
109 if ref_val is None:
110 return
111 elif isinstance(ref_val, dict):
112 val = None
113 for sel in ref_val.get(var_name, []):
114 if all(
115 k in selection and selection[k] == v for k, v in sel.items() if k != "ref_val"
116 ):
117 val = sel["ref_val"]
118 break
119 if val is None:
120 return
121 elif isinstance(ref_val, list):
122 val = ref_val[idx]
123 elif isinstance(ref_val, Number):
124 val = ref_val
125 else:
126 raise ValueError(
127 "Argument `ref_val` must be None, a constant, a list or a "
128 'dictionary like {"var_name": [{"ref_val": ref_val}]}'
129 )
130 less_than_ref_probability = (values < val).mean()
131 greater_than_ref_probability = (values >= val).mean()
132 ref_in_posterior = "{} <{:g}< {}".format(
133 format_as_percent(less_than_ref_probability, 1),
134 val,
135 format_as_percent(greater_than_ref_probability, 1),
136 )
137 ax.axvline(val, ymin=0.05, ymax=0.75, color="C1", lw=linewidth, alpha=0.65)
138 ax.text(
139 values.mean(),
140 plot_height * 0.6,
141 ref_in_posterior,
142 size=ax_labelsize,
143 color="C1",
144 weight="semibold",
145 horizontalalignment="center",
146 )
147
148 def display_rope():
149 if rope is None:
150 return
151 elif isinstance(rope, dict):
152 vals = None
153 for sel in rope.get(var_name, []):
154 # pylint: disable=line-too-long
155 if all(k in selection and selection[k] == v for k, v in sel.items() if k != "rope"):
156 vals = sel["rope"]
157 break
158 if vals is None:
159 return
160 elif len(rope) == 2:
161 vals = rope
162 else:
163 raise ValueError(
164 "Argument `rope` must be None, a dictionary like"
165 '{"var_name": {"rope": (lo, hi)}}, or an'
166 "iterable of length 2"
167 )
168
169 ax.plot(
170 vals,
171 (plot_height * 0.02, plot_height * 0.02),
172 lw=linewidth * 5,
173 color="C2",
174 solid_capstyle="butt",
175 zorder=0,
176 alpha=0.7,
177 )
178 text_props = {"size": ax_labelsize, "horizontalalignment": "center", "color": "C2"}
179 ax.text(vals[0], plot_height * 0.2, vals[0], weight="semibold", **text_props)
180 ax.text(vals[1], plot_height * 0.2, vals[1], weight="semibold", **text_props)
181
182 def display_point_estimate():
183 if not point_estimate:
184 return
185 point_value = calculate_point_estimate(point_estimate, values, bw)
186 sig_figs = format_sig_figs(point_value, round_to)
187 point_text = "{point_estimate}={point_value:.{sig_figs}g}".format(
188 point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs
189 )
190 ax.text(
191 point_value,
192 plot_height * 0.8,
193 point_text,
194 size=ax_labelsize,
195 horizontalalignment="center",
196 )
197
198 def display_hdi():
199 # np.ndarray with 2 entries, min and max
200 # pylint: disable=line-too-long
201 hdi_probs = hdi(values, hdi_prob=hdi_prob, multimodal=multimodal) # type: np.ndarray
202
203 for hdi_i in np.atleast_2d(hdi_probs):
204 ax.plot(
205 hdi_i,
206 (plot_height * 0.02, plot_height * 0.02),
207 lw=linewidth * 2,
208 color="k",
209 solid_capstyle="butt",
210 )
211 ax.text(
212 hdi_i[0],
213 plot_height * 0.07,
214 round_num(hdi_i[0], round_to),
215 size=ax_labelsize,
216 horizontalalignment="center",
217 )
218 ax.text(
219 hdi_i[1],
220 plot_height * 0.07,
221 round_num(hdi_i[1], round_to),
222 size=ax_labelsize,
223 horizontalalignment="center",
224 )
225 ax.text(
226 (hdi_i[0] + hdi_i[1]) / 2,
227 plot_height * 0.3,
228 format_as_percent(hdi_prob) + " HDI",
229 size=ax_labelsize,
230 horizontalalignment="center",
231 )
232
233 def format_axes():
234 ax.yaxis.set_ticks([])
235 ax.spines["top"].set_visible(False)
236 ax.spines["right"].set_visible(False)
237 ax.spines["left"].set_visible(False)
238 ax.spines["bottom"].set_visible(True)
239 ax.xaxis.set_ticks_position("bottom")
240 ax.tick_params(
241 axis="x", direction="out", width=1, length=3, color="0.5", labelsize=xt_labelsize
242 )
243 ax.spines["bottom"].set_color("0.5")
244
245 if kind == "kde" and values.dtype.kind == "f":
246 kwargs.setdefault("linewidth", linewidth)
247 plot_kde(
248 values,
249 bw=bw,
250 fill_kwargs={"alpha": kwargs.pop("fill_alpha", 0)},
251 plot_kwargs=kwargs,
252 ax=ax,
253 rug=False,
254 show=False,
255 )
256 else:
257 if bins is None:
258 if values.dtype.kind == "i":
259 xmin = values.min()
260 xmax = values.max()
261 bins = get_bins(values)
262 ax.set_xlim(xmin - 0.5, xmax + 0.5)
263 else:
264 bins = "auto"
265 kwargs.setdefault("align", "left")
266 kwargs.setdefault("color", "C0")
267 ax.hist(values, bins=bins, alpha=0.35, **kwargs)
268
269 plot_height = ax.get_ylim()[1]
270
271 format_axes()
272 if hdi_prob != "hide":
273 display_hdi()
274 display_point_estimate()
275 display_ref_val()
276 display_rope()
277
[end of arviz/plots/backends/matplotlib/posteriorplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/backends/matplotlib/posteriorplot.py b/arviz/plots/backends/matplotlib/posteriorplot.py
--- a/arviz/plots/backends/matplotlib/posteriorplot.py
+++ b/arviz/plots/backends/matplotlib/posteriorplot.py
@@ -69,7 +69,7 @@
rope=rope,
ax_labelsize=ax_labelsize,
xt_labelsize=xt_labelsize,
- **kwargs
+ **kwargs,
)
idx += 1
ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)
@@ -98,7 +98,7 @@
ax_labelsize,
xt_labelsize,
round_to: Optional[int] = None,
- **kwargs
+ **kwargs,
): # noqa: D202
"""Artist to draw posterior."""
@@ -175,9 +175,23 @@
zorder=0,
alpha=0.7,
)
- text_props = {"size": ax_labelsize, "horizontalalignment": "center", "color": "C2"}
- ax.text(vals[0], plot_height * 0.2, vals[0], weight="semibold", **text_props)
- ax.text(vals[1], plot_height * 0.2, vals[1], weight="semibold", **text_props)
+ text_props = {"size": ax_labelsize, "color": "C2"}
+ ax.text(
+ vals[0],
+ plot_height * 0.2,
+ f"{vals[0]} ",
+ weight="semibold",
+ horizontalalignment="right",
+ **text_props,
+ )
+ ax.text(
+ vals[1],
+ plot_height * 0.2,
+ f" {vals[1]}",
+ weight="semibold",
+ horizontalalignment="left",
+ **text_props,
+ )
def display_point_estimate():
if not point_estimate:
@@ -211,16 +225,16 @@
ax.text(
hdi_i[0],
plot_height * 0.07,
- round_num(hdi_i[0], round_to),
+ round_num(hdi_i[0], round_to) + " ",
size=ax_labelsize,
- horizontalalignment="center",
+ horizontalalignment="right",
)
ax.text(
hdi_i[1],
plot_height * 0.07,
- round_num(hdi_i[1], round_to),
+ " " + round_num(hdi_i[1], round_to),
size=ax_labelsize,
- horizontalalignment="center",
+ horizontalalignment="left",
)
ax.text(
(hdi_i[0] + hdi_i[1]) / 2,
| {"golden_diff": "diff --git a/arviz/plots/backends/matplotlib/posteriorplot.py b/arviz/plots/backends/matplotlib/posteriorplot.py\n--- a/arviz/plots/backends/matplotlib/posteriorplot.py\n+++ b/arviz/plots/backends/matplotlib/posteriorplot.py\n@@ -69,7 +69,7 @@\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n- **kwargs\n+ **kwargs,\n )\n idx += 1\n ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)\n@@ -98,7 +98,7 @@\n ax_labelsize,\n xt_labelsize,\n round_to: Optional[int] = None,\n- **kwargs\n+ **kwargs,\n ): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n \n@@ -175,9 +175,23 @@\n zorder=0,\n alpha=0.7,\n )\n- text_props = {\"size\": ax_labelsize, \"horizontalalignment\": \"center\", \"color\": \"C2\"}\n- ax.text(vals[0], plot_height * 0.2, vals[0], weight=\"semibold\", **text_props)\n- ax.text(vals[1], plot_height * 0.2, vals[1], weight=\"semibold\", **text_props)\n+ text_props = {\"size\": ax_labelsize, \"color\": \"C2\"}\n+ ax.text(\n+ vals[0],\n+ plot_height * 0.2,\n+ f\"{vals[0]} \",\n+ weight=\"semibold\",\n+ horizontalalignment=\"right\",\n+ **text_props,\n+ )\n+ ax.text(\n+ vals[1],\n+ plot_height * 0.2,\n+ f\" {vals[1]}\",\n+ weight=\"semibold\",\n+ horizontalalignment=\"left\",\n+ **text_props,\n+ )\n \n def display_point_estimate():\n if not point_estimate:\n@@ -211,16 +225,16 @@\n ax.text(\n hdi_i[0],\n plot_height * 0.07,\n- round_num(hdi_i[0], round_to),\n+ round_num(hdi_i[0], round_to) + \" \",\n size=ax_labelsize,\n- horizontalalignment=\"center\",\n+ horizontalalignment=\"right\",\n )\n ax.text(\n hdi_i[1],\n plot_height * 0.07,\n- round_num(hdi_i[1], round_to),\n+ \" \" + round_num(hdi_i[1], round_to),\n size=ax_labelsize,\n- horizontalalignment=\"center\",\n+ horizontalalignment=\"left\",\n )\n ax.text(\n (hdi_i[0] + hdi_i[1]) / 2,\n", "issue": "HPD plot shows values over each other\nHey All,\r\n\r\nI am trying to figure out how to change the plot_posterior function to show the actual values. In one of my plots, where it is small, I get numbers writing over each other. Any help/advice?\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Matplotlib Plot posterior densities.\"\"\"\nfrom typing import Optional\nfrom numbers import Number\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_show\nfrom ....stats import hdi\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import (\n make_label,\n _create_axes_grid,\n format_sig_figs,\n round_num,\n calculate_point_estimate,\n)\nfrom ....numeric_utils import get_bins\n\n\ndef plot_posterior(\n ax,\n length_plotters,\n rows,\n cols,\n figsize,\n plotters,\n bw,\n bins,\n kind,\n point_estimate,\n round_to,\n hdi_prob,\n multimodal,\n ref_val,\n rope,\n ax_labelsize,\n xt_labelsize,\n kwargs,\n titlesize,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib posterior plot.\"\"\"\n if ax is None:\n _, ax = _create_axes_grid(\n length_plotters,\n rows,\n cols,\n figsize=figsize,\n squeeze=False,\n backend_kwargs=backend_kwargs,\n )\n idx = 0\n for (var_name, selection, x), ax_ in zip(plotters, np.ravel(ax)):\n _plot_posterior_op(\n idx,\n x.flatten(),\n var_name,\n selection,\n ax=ax_,\n bw=bw,\n bins=bins,\n kind=kind,\n point_estimate=point_estimate,\n round_to=round_to,\n hdi_prob=hdi_prob,\n multimodal=multimodal,\n ref_val=ref_val,\n rope=rope,\n ax_labelsize=ax_labelsize,\n xt_labelsize=xt_labelsize,\n **kwargs\n )\n idx += 1\n ax_.set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)\n\n if backend_show(show):\n plt.show()\n\n return ax\n\n\ndef _plot_posterior_op(\n idx,\n values,\n var_name,\n selection,\n ax,\n bw,\n linewidth,\n bins,\n kind,\n point_estimate,\n hdi_prob,\n multimodal,\n ref_val,\n rope,\n ax_labelsize,\n xt_labelsize,\n round_to: Optional[int] = None,\n **kwargs\n): # noqa: D202\n \"\"\"Artist to draw posterior.\"\"\"\n\n def format_as_percent(x, round_to=0):\n return \"{0:.{1:d}f}%\".format(100 * x, round_to)\n\n def display_ref_val():\n if ref_val is None:\n return\n elif isinstance(ref_val, dict):\n val = None\n for sel in ref_val.get(var_name, []):\n if all(\n k in selection and selection[k] == v for k, v in sel.items() if k != \"ref_val\"\n ):\n val = sel[\"ref_val\"]\n break\n if val is None:\n return\n elif isinstance(ref_val, list):\n val = ref_val[idx]\n elif isinstance(ref_val, Number):\n val = ref_val\n else:\n raise ValueError(\n \"Argument `ref_val` must be None, a constant, a list or a \"\n 'dictionary like {\"var_name\": [{\"ref_val\": ref_val}]}'\n )\n less_than_ref_probability = (values < val).mean()\n greater_than_ref_probability = (values >= val).mean()\n ref_in_posterior = \"{} <{:g}< {}\".format(\n format_as_percent(less_than_ref_probability, 1),\n val,\n format_as_percent(greater_than_ref_probability, 1),\n )\n ax.axvline(val, ymin=0.05, ymax=0.75, color=\"C1\", lw=linewidth, alpha=0.65)\n ax.text(\n values.mean(),\n plot_height * 0.6,\n ref_in_posterior,\n size=ax_labelsize,\n color=\"C1\",\n weight=\"semibold\",\n horizontalalignment=\"center\",\n )\n\n def display_rope():\n if rope is None:\n return\n elif isinstance(rope, dict):\n vals = None\n for sel in rope.get(var_name, []):\n # pylint: disable=line-too-long\n if all(k in selection and selection[k] == v for k, v in sel.items() if k != \"rope\"):\n vals = sel[\"rope\"]\n break\n if vals is None:\n return\n elif len(rope) == 2:\n vals = rope\n else:\n raise ValueError(\n \"Argument `rope` must be None, a dictionary like\"\n '{\"var_name\": {\"rope\": (lo, hi)}}, or an'\n \"iterable of length 2\"\n )\n\n ax.plot(\n vals,\n (plot_height * 0.02, plot_height * 0.02),\n lw=linewidth * 5,\n color=\"C2\",\n solid_capstyle=\"butt\",\n zorder=0,\n alpha=0.7,\n )\n text_props = {\"size\": ax_labelsize, \"horizontalalignment\": \"center\", \"color\": \"C2\"}\n ax.text(vals[0], plot_height * 0.2, vals[0], weight=\"semibold\", **text_props)\n ax.text(vals[1], plot_height * 0.2, vals[1], weight=\"semibold\", **text_props)\n\n def display_point_estimate():\n if not point_estimate:\n return\n point_value = calculate_point_estimate(point_estimate, values, bw)\n sig_figs = format_sig_figs(point_value, round_to)\n point_text = \"{point_estimate}={point_value:.{sig_figs}g}\".format(\n point_estimate=point_estimate, point_value=point_value, sig_figs=sig_figs\n )\n ax.text(\n point_value,\n plot_height * 0.8,\n point_text,\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n\n def display_hdi():\n # np.ndarray with 2 entries, min and max\n # pylint: disable=line-too-long\n hdi_probs = hdi(values, hdi_prob=hdi_prob, multimodal=multimodal) # type: np.ndarray\n\n for hdi_i in np.atleast_2d(hdi_probs):\n ax.plot(\n hdi_i,\n (plot_height * 0.02, plot_height * 0.02),\n lw=linewidth * 2,\n color=\"k\",\n solid_capstyle=\"butt\",\n )\n ax.text(\n hdi_i[0],\n plot_height * 0.07,\n round_num(hdi_i[0], round_to),\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n ax.text(\n hdi_i[1],\n plot_height * 0.07,\n round_num(hdi_i[1], round_to),\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n ax.text(\n (hdi_i[0] + hdi_i[1]) / 2,\n plot_height * 0.3,\n format_as_percent(hdi_prob) + \" HDI\",\n size=ax_labelsize,\n horizontalalignment=\"center\",\n )\n\n def format_axes():\n ax.yaxis.set_ticks([])\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(True)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.tick_params(\n axis=\"x\", direction=\"out\", width=1, length=3, color=\"0.5\", labelsize=xt_labelsize\n )\n ax.spines[\"bottom\"].set_color(\"0.5\")\n\n if kind == \"kde\" and values.dtype.kind == \"f\":\n kwargs.setdefault(\"linewidth\", linewidth)\n plot_kde(\n values,\n bw=bw,\n fill_kwargs={\"alpha\": kwargs.pop(\"fill_alpha\", 0)},\n plot_kwargs=kwargs,\n ax=ax,\n rug=False,\n show=False,\n )\n else:\n if bins is None:\n if values.dtype.kind == \"i\":\n xmin = values.min()\n xmax = values.max()\n bins = get_bins(values)\n ax.set_xlim(xmin - 0.5, xmax + 0.5)\n else:\n bins = \"auto\"\n kwargs.setdefault(\"align\", \"left\")\n kwargs.setdefault(\"color\", \"C0\")\n ax.hist(values, bins=bins, alpha=0.35, **kwargs)\n\n plot_height = ax.get_ylim()[1]\n\n format_axes()\n if hdi_prob != \"hide\":\n display_hdi()\n display_point_estimate()\n display_ref_val()\n display_rope()\n", "path": "arviz/plots/backends/matplotlib/posteriorplot.py"}]} | 3,308 | 629 |
gh_patches_debug_13793 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
login_required decorator is used at the end of the functions.
In the file readthedocs.org/readthedocs/profiles/views.py, `login_required` decorator is used at the end of some functions and for some other functions, it used in the form of `@login_required`, which reduces the readability of the code.
</issue>
<code>
[start of readthedocs/profiles/views.py]
1 # -*- coding: utf-8 -*-
2 """Views for creating, editing and viewing site-specific user profiles."""
3
4 from __future__ import (
5 absolute_import,
6 division,
7 print_function,
8 unicode_literals,
9 )
10
11 from django.contrib import messages
12 from django.contrib.auth import logout
13 from django.contrib.auth.decorators import login_required
14 from django.contrib.auth.models import User
15 from django.core.urlresolvers import reverse
16 from django.http import HttpResponseRedirect
17 from django.shortcuts import get_object_or_404, redirect, render
18 from django.utils.translation import ugettext_lazy as _
19
20 from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm
21
22
23 def edit_profile(
24 request, form_class, success_url=None,
25 template_name='profiles/private/edit_profile.html', extra_context=None):
26 """
27 Edit the current user's profile.
28
29 **Optional arguments:**
30
31 ``extra_context``
32 A dictionary of variables to add to the template context. Any
33 callable object in this dictionary will be called to produce
34 the end result which appears in the context.
35
36 ``form_class``
37 The form class to use for validating and editing the user
38 profile. This form class must operate similarly to a standard
39 Django ``ModelForm`` in that it must accept an instance of the
40 object to be edited as the keyword argument ``instance`` to
41 its constructor, and it must implement a method named
42 ``save()`` which will save the updates to the object.
43
44 ``success_url``
45 The URL to redirect to following a successful edit. If not
46 specified, this will default to the URL of
47 :view:`profiles.views.profile_detail` for the profile object
48 being edited.
49
50 ``template_name``
51 The template to use when displaying the profile-editing
52 form. If not specified, this will default to
53 :template:`profiles/edit_profile.html`.
54
55 **Context:**
56
57 ``form``
58 The form for editing the profile.
59
60 ``profile``
61 The user's current profile.
62
63 **Template:**
64
65 ``template_name`` keyword argument or
66 :template:`profiles/edit_profile.html`.
67 """
68 profile_obj = request.user.profile
69 if success_url is None:
70 success_url = reverse(
71 'profiles_profile_detail',
72 kwargs={'username': request.user.username})
73 if request.method == 'POST':
74 form = form_class(
75 data=request.POST, files=request.FILES, instance=profile_obj)
76 if form.is_valid():
77 form.save()
78 return HttpResponseRedirect(success_url)
79 else:
80 form = form_class(instance=profile_obj)
81
82 if extra_context is None:
83 extra_context = {}
84 context = {
85 key: value() if callable(value) else value
86 for key, value in extra_context.items()
87 }
88 context.update({
89 'form': form,
90 'profile': profile_obj,
91 'user': profile_obj.user,
92 })
93 return render(request, template_name, context=context)
94
95
96 edit_profile = login_required(edit_profile)
97
98
99 @login_required()
100 def delete_account(request):
101 form = UserDeleteForm()
102 template_name = 'profiles/private/delete_account.html'
103
104 if request.method == 'POST':
105 form = UserDeleteForm(instance=request.user, data=request.POST)
106 if form.is_valid():
107 # Delete the user permanently
108 # It will also delete some projects where the user is the only owner
109 request.user.delete()
110 logout(request)
111 messages.info(request, 'You have successfully deleted your account')
112
113 return redirect('homepage')
114
115 return render(request, template_name, {'form': form})
116
117
118 def profile_detail(
119 request, username, public_profile_field=None,
120 template_name='profiles/public/profile_detail.html',
121 extra_context=None):
122 """
123 Detail view of a user's profile.
124
125 If the user does not exists, ``Http404`` will be raised.
126
127 **Required arguments:**
128
129 ``username``
130 The username of the user whose profile is being displayed.
131
132 **Optional arguments:**
133
134 ``extra_context``
135 A dictionary of variables to add to the template context. Any
136 callable object in this dictionary will be called to produce
137 the end result which appears in the context.
138
139 ``public_profile_field``
140 The name of a ``BooleanField`` on the profile model; if the
141 value of that field on the user's profile is ``False``, the
142 ``profile`` variable in the template will be ``None``. Use
143 this feature to allow users to mark their profiles as not
144 being publicly viewable.
145
146 If this argument is not specified, it will be assumed that all
147 users' profiles are publicly viewable.
148
149 ``template_name``
150 The name of the template to use for displaying the profile. If
151 not specified, this will default to
152 :template:`profiles/profile_detail.html`.
153
154 **Context:**
155
156 ``profile``
157 The user's profile, or ``None`` if the user's profile is not
158 publicly viewable (see the description of
159 ``public_profile_field`` above).
160
161 **Template:**
162
163 ``template_name`` keyword argument or
164 :template:`profiles/profile_detail.html`.
165 """
166 user = get_object_or_404(User, username=username)
167 profile_obj = user.profile
168 if (public_profile_field is not None and
169 not getattr(profile_obj, public_profile_field)):
170 profile_obj = None
171
172 if extra_context is None:
173 extra_context = {}
174 context = {
175 key: value() if callable(value) else value
176 for key, value in extra_context.items()
177 }
178 context.update({'profile': profile_obj})
179 return render(request, template_name, context=context)
180
181
182 @login_required
183 def account_advertising(request):
184 success_url = reverse(account_advertising)
185 profile_obj = request.user.profile
186 if request.method == 'POST':
187 form = UserAdvertisingForm(
188 data=request.POST,
189 instance=profile_obj,
190 )
191 if form.is_valid():
192 form.save()
193 messages.info(request, _('Updated your advertising preferences'))
194 return HttpResponseRedirect(success_url)
195 else:
196 form = UserAdvertisingForm(instance=profile_obj)
197
198 return render(
199 request,
200 'profiles/private/advertising_profile.html',
201 context={
202 'form': form,
203 'profile': profile_obj,
204 'user': profile_obj.user,
205 },
206 )
207
[end of readthedocs/profiles/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py
--- a/readthedocs/profiles/views.py
+++ b/readthedocs/profiles/views.py
@@ -20,6 +20,7 @@
from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm
+@login_required
def edit_profile(
request, form_class, success_url=None,
template_name='profiles/private/edit_profile.html', extra_context=None):
@@ -93,9 +94,6 @@
return render(request, template_name, context=context)
-edit_profile = login_required(edit_profile)
-
-
@login_required()
def delete_account(request):
form = UserDeleteForm()
| {"golden_diff": "diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py\n--- a/readthedocs/profiles/views.py\n+++ b/readthedocs/profiles/views.py\n@@ -20,6 +20,7 @@\n from readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm\n \n \n+@login_required\n def edit_profile(\n request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html', extra_context=None):\n@@ -93,9 +94,6 @@\n return render(request, template_name, context=context)\n \n \n-edit_profile = login_required(edit_profile)\n-\n-\n @login_required()\n def delete_account(request):\n form = UserDeleteForm()\n", "issue": "login_required decorator is used at the end of the functions.\nIn the file readthedocs.org/readthedocs/profiles/views.py, `login_required` decorator is used at the end of some functions and for some other functions, it used in the form of `@login_required`, which reduces the readability of the code.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom readthedocs.core.forms import UserAdvertisingForm, UserDeleteForm\n\n\ndef edit_profile(\n request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html', extra_context=None):\n \"\"\"\n Edit the current user's profile.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and editing the user\n profile. This form class must operate similarly to a standard\n Django ``ModelForm`` in that it must accept an instance of the\n object to be edited as the keyword argument ``instance`` to\n its constructor, and it must implement a method named\n ``save()`` which will save the updates to the object.\n\n ``success_url``\n The URL to redirect to following a successful edit. If not\n specified, this will default to the URL of\n :view:`profiles.views.profile_detail` for the profile object\n being edited.\n\n ``template_name``\n The template to use when displaying the profile-editing\n form. If not specified, this will default to\n :template:`profiles/edit_profile.html`.\n\n **Context:**\n\n ``form``\n The form for editing the profile.\n\n ``profile``\n The user's current profile.\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/edit_profile.html`.\n \"\"\"\n profile_obj = request.user.profile\n if success_url is None:\n success_url = reverse(\n 'profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(\n data=request.POST, files=request.FILES, instance=profile_obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(instance=profile_obj)\n\n if extra_context is None:\n extra_context = {}\n context = {\n key: value() if callable(value) else value\n for key, value in extra_context.items()\n }\n context.update({\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n })\n return render(request, template_name, context=context)\n\n\nedit_profile = login_required(edit_profile)\n\n\n@login_required()\ndef delete_account(request):\n form = UserDeleteForm()\n template_name = 'profiles/private/delete_account.html'\n\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n # Delete the user permanently\n # It will also delete some projects where the user is the only owner\n request.user.delete()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n\n return redirect('homepage')\n\n return render(request, template_name, {'form': form})\n\n\ndef profile_detail(\n request, username, public_profile_field=None,\n template_name='profiles/public/profile_detail.html',\n extra_context=None):\n \"\"\"\n Detail view of a user's profile.\n\n If the user does not exists, ``Http404`` will be raised.\n\n **Required arguments:**\n\n ``username``\n The username of the user whose profile is being displayed.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``public_profile_field``\n The name of a ``BooleanField`` on the profile model; if the\n value of that field on the user's profile is ``False``, the\n ``profile`` variable in the template will be ``None``. Use\n this feature to allow users to mark their profiles as not\n being publicly viewable.\n\n If this argument is not specified, it will be assumed that all\n users' profiles are publicly viewable.\n\n ``template_name``\n The name of the template to use for displaying the profile. If\n not specified, this will default to\n :template:`profiles/profile_detail.html`.\n\n **Context:**\n\n ``profile``\n The user's profile, or ``None`` if the user's profile is not\n publicly viewable (see the description of\n ``public_profile_field`` above).\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/profile_detail.html`.\n \"\"\"\n user = get_object_or_404(User, username=username)\n profile_obj = user.profile\n if (public_profile_field is not None and\n not getattr(profile_obj, public_profile_field)):\n profile_obj = None\n\n if extra_context is None:\n extra_context = {}\n context = {\n key: value() if callable(value) else value\n for key, value in extra_context.items()\n }\n context.update({'profile': profile_obj})\n return render(request, template_name, context=context)\n\n\n@login_required\ndef account_advertising(request):\n success_url = reverse(account_advertising)\n profile_obj = request.user.profile\n if request.method == 'POST':\n form = UserAdvertisingForm(\n data=request.POST,\n instance=profile_obj,\n )\n if form.is_valid():\n form.save()\n messages.info(request, _('Updated your advertising preferences'))\n return HttpResponseRedirect(success_url)\n else:\n form = UserAdvertisingForm(instance=profile_obj)\n\n return render(\n request,\n 'profiles/private/advertising_profile.html',\n context={\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n },\n )\n", "path": "readthedocs/profiles/views.py"}]} | 2,478 | 151 |
gh_patches_debug_20222 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
proxyauth option breaks flow state access in addons
##### Steps to reproduce the problem:
1. Create `test.py` addon
```py
from mitmproxy import ctx
class AuthTest:
def response(self, flow):
ctx.log.info('Getting flow state')
flow.get_state()
ctx.log.info('Got flow state')
addons = [
AuthTest()
]
```
2. Start `mitmdump`, navigate to example.com and observe the log
```bash
mitmdump -s server/test.py
Loading script server/test.py
Proxy server listening at http://*:8080
...
[::1]:56410: GET http://example.com/
<< 200 OK 606b
Getting flow state
Got flow state
```
3. Re-start `mitmdump` with proxyauth applied and navigate to example.com again
```bash
mitmdump -s server/test.py --set proxyauth=foo:bar
Loading script server/test.py
Proxy server listening at http://*:8080
...
[::1]:56290: GET http://example.com/
<< 200 OK 606b
Getting flow state
Addon error: Traceback (most recent call last):
File "server/test.py", line 5, in response
flow.get_state()
File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/flow.py", line 94, in get_state
d = super().get_state()
File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 31, in get_state
state[attr] = get_state(cls, val)
File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 94, in get_state
return _process(typeinfo, val, False)
File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 77, in _process
for k, v in val.items()
File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 77, in <dictcomp>
for k, v in val.items()
File "/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py", line 81, in _process
assert isinstance(val, (int, str, bool, bytes))
AssertionError
```
##### Any other comments? What have you tried so far?
There's a FIXME [right near the code that's breaking](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/stateobject.py#L80), but I think it's a red herring. My best guess is that proxy auth adds something to the flow state that isn't in the list of allowed types `int, str, bool, bytes`—possibly a `dict`.
##### System information
```bash
mitmdump --version
Mitmproxy: 4.0.1
Python: 3.6.5
OpenSSL: OpenSSL 1.0.2o 27 Mar 2018
Platform: Darwin-17.5.0-x86_64-i386-64bit
```
<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->
</issue>
<code>
[start of mitmproxy/stateobject.py]
1 import typing
2 from typing import Any # noqa
3 from typing import MutableMapping # noqa
4
5 from mitmproxy.coretypes import serializable
6 from mitmproxy.utils import typecheck
7
8
9 class StateObject(serializable.Serializable):
10 """
11 An object with serializable state.
12
13 State attributes can either be serializable types(str, tuple, bool, ...)
14 or StateObject instances themselves.
15 """
16
17 _stateobject_attributes: MutableMapping[str, Any] = None
18 """
19 An attribute-name -> class-or-type dict containing all attributes that
20 should be serialized. If the attribute is a class, it must implement the
21 Serializable protocol.
22 """
23
24 def get_state(self):
25 """
26 Retrieve object state.
27 """
28 state = {}
29 for attr, cls in self._stateobject_attributes.items():
30 val = getattr(self, attr)
31 state[attr] = get_state(cls, val)
32 return state
33
34 def set_state(self, state):
35 """
36 Load object state from data returned by a get_state call.
37 """
38 state = state.copy()
39 for attr, cls in self._stateobject_attributes.items():
40 val = state.pop(attr)
41 if val is None:
42 setattr(self, attr, val)
43 else:
44 curr = getattr(self, attr)
45 if hasattr(curr, "set_state"):
46 curr.set_state(val)
47 else:
48 setattr(self, attr, make_object(cls, val))
49 if state:
50 raise RuntimeWarning("Unexpected State in __setstate__: {}".format(state))
51
52
53 def _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any:
54 if val is None:
55 return None
56 elif make and hasattr(typeinfo, "from_state"):
57 return typeinfo.from_state(val)
58 elif not make and hasattr(val, "get_state"):
59 return val.get_state()
60
61 typename = str(typeinfo)
62
63 if typename.startswith("typing.List"):
64 T = typecheck.sequence_type(typeinfo)
65 return [_process(T, x, make) for x in val]
66 elif typename.startswith("typing.Tuple"):
67 Ts = typecheck.tuple_types(typeinfo)
68 if len(Ts) != len(val):
69 raise ValueError("Invalid data. Expected {}, got {}.".format(Ts, val))
70 return tuple(
71 _process(T, x, make) for T, x in zip(Ts, val)
72 )
73 elif typename.startswith("typing.Dict"):
74 k_cls, v_cls = typecheck.mapping_types(typeinfo)
75 return {
76 _process(k_cls, k, make): _process(v_cls, v, make)
77 for k, v in val.items()
78 }
79 elif typename.startswith("typing.Any"):
80 # FIXME: Remove this when we remove flow.metadata
81 assert isinstance(val, (int, str, bool, bytes))
82 return val
83 else:
84 return typeinfo(val)
85
86
87 def make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:
88 """Create an object based on the state given in val."""
89 return _process(typeinfo, val, True)
90
91
92 def get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:
93 """Get the state of the object given as val."""
94 return _process(typeinfo, val, False)
95
[end of mitmproxy/stateobject.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/stateobject.py b/mitmproxy/stateobject.py
--- a/mitmproxy/stateobject.py
+++ b/mitmproxy/stateobject.py
@@ -1,6 +1,7 @@
import typing
from typing import Any # noqa
from typing import MutableMapping # noqa
+import json
from mitmproxy.coretypes import serializable
from mitmproxy.utils import typecheck
@@ -77,8 +78,14 @@
for k, v in val.items()
}
elif typename.startswith("typing.Any"):
- # FIXME: Remove this when we remove flow.metadata
- assert isinstance(val, (int, str, bool, bytes))
+ # This requires a bit of explanation. We can't import our IO layer here,
+ # because it causes a circular import. Rather than restructuring the
+ # code for this, we use JSON serialization, which has similar primitive
+ # type restrictions as tnetstring, to check for conformance.
+ try:
+ json.dumps(val)
+ except TypeError:
+ raise ValueError(f"Data not serializable: {val}")
return val
else:
return typeinfo(val)
| {"golden_diff": "diff --git a/mitmproxy/stateobject.py b/mitmproxy/stateobject.py\n--- a/mitmproxy/stateobject.py\n+++ b/mitmproxy/stateobject.py\n@@ -1,6 +1,7 @@\n import typing\n from typing import Any # noqa\n from typing import MutableMapping # noqa\n+import json\n \n from mitmproxy.coretypes import serializable\n from mitmproxy.utils import typecheck\n@@ -77,8 +78,14 @@\n for k, v in val.items()\n }\n elif typename.startswith(\"typing.Any\"):\n- # FIXME: Remove this when we remove flow.metadata\n- assert isinstance(val, (int, str, bool, bytes))\n+ # This requires a bit of explanation. We can't import our IO layer here,\n+ # because it causes a circular import. Rather than restructuring the\n+ # code for this, we use JSON serialization, which has similar primitive\n+ # type restrictions as tnetstring, to check for conformance.\n+ try:\n+ json.dumps(val)\n+ except TypeError:\n+ raise ValueError(f\"Data not serializable: {val}\")\n return val\n else:\n return typeinfo(val)\n", "issue": "proxyauth option breaks flow state access in addons\n##### Steps to reproduce the problem:\r\n\r\n1. Create `test.py` addon\r\n```py\r\nfrom mitmproxy import ctx\r\nclass AuthTest:\r\n def response(self, flow):\r\n ctx.log.info('Getting flow state')\r\n flow.get_state()\r\n ctx.log.info('Got flow state')\r\n\r\naddons = [\r\n AuthTest()\r\n]\r\n```\r\n2. Start `mitmdump`, navigate to example.com and observe the log\r\n```bash\r\nmitmdump -s server/test.py\r\nLoading script server/test.py\r\nProxy server listening at http://*:8080\r\n...\r\n[::1]:56410: GET http://example.com/\r\n << 200 OK 606b\r\nGetting flow state\r\nGot flow state\r\n```\r\n3. Re-start `mitmdump` with proxyauth applied and navigate to example.com again\r\n```bash\r\nmitmdump -s server/test.py --set proxyauth=foo:bar\r\nLoading script server/test.py\r\nProxy server listening at http://*:8080\r\n...\r\n[::1]:56290: GET http://example.com/\r\n << 200 OK 606b\r\nGetting flow state\r\nAddon error: Traceback (most recent call last):\r\n File \"server/test.py\", line 5, in response\r\n flow.get_state()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/flow.py\", line 94, in get_state\r\n d = super().get_state()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 31, in get_state\r\n state[attr] = get_state(cls, val)\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 94, in get_state\r\n return _process(typeinfo, val, False)\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 77, in _process\r\n for k, v in val.items()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 77, in <dictcomp>\r\n for k, v in val.items()\r\n File \"/usr/local/Cellar/mitmproxy/4.0.1/libexec/lib/python3.6/site-packages/mitmproxy/stateobject.py\", line 81, in _process\r\n assert isinstance(val, (int, str, bool, bytes))\r\nAssertionError\r\n```\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nThere's a FIXME [right near the code that's breaking](https://github.com/mitmproxy/mitmproxy/blob/master/mitmproxy/stateobject.py#L80), but I think it's a red herring. My best guess is that proxy auth adds something to the flow state that isn't in the list of allowed types `int, str, bool, bytes`\u2014possibly a `dict`.\r\n\r\n##### System information\r\n\r\n```bash\r\nmitmdump --version\r\nMitmproxy: 4.0.1\r\nPython: 3.6.5\r\nOpenSSL: OpenSSL 1.0.2o 27 Mar 2018\r\nPlatform: Darwin-17.5.0-x86_64-i386-64bit\r\n```\r\n\r\n<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->\r\n\n", "before_files": [{"content": "import typing\nfrom typing import Any # noqa\nfrom typing import MutableMapping # noqa\n\nfrom mitmproxy.coretypes import serializable\nfrom mitmproxy.utils import typecheck\n\n\nclass StateObject(serializable.Serializable):\n \"\"\"\n An object with serializable state.\n\n State attributes can either be serializable types(str, tuple, bool, ...)\n or StateObject instances themselves.\n \"\"\"\n\n _stateobject_attributes: MutableMapping[str, Any] = None\n \"\"\"\n An attribute-name -> class-or-type dict containing all attributes that\n should be serialized. If the attribute is a class, it must implement the\n Serializable protocol.\n \"\"\"\n\n def get_state(self):\n \"\"\"\n Retrieve object state.\n \"\"\"\n state = {}\n for attr, cls in self._stateobject_attributes.items():\n val = getattr(self, attr)\n state[attr] = get_state(cls, val)\n return state\n\n def set_state(self, state):\n \"\"\"\n Load object state from data returned by a get_state call.\n \"\"\"\n state = state.copy()\n for attr, cls in self._stateobject_attributes.items():\n val = state.pop(attr)\n if val is None:\n setattr(self, attr, val)\n else:\n curr = getattr(self, attr)\n if hasattr(curr, \"set_state\"):\n curr.set_state(val)\n else:\n setattr(self, attr, make_object(cls, val))\n if state:\n raise RuntimeWarning(\"Unexpected State in __setstate__: {}\".format(state))\n\n\ndef _process(typeinfo: typecheck.Type, val: typing.Any, make: bool) -> typing.Any:\n if val is None:\n return None\n elif make and hasattr(typeinfo, \"from_state\"):\n return typeinfo.from_state(val)\n elif not make and hasattr(val, \"get_state\"):\n return val.get_state()\n\n typename = str(typeinfo)\n\n if typename.startswith(\"typing.List\"):\n T = typecheck.sequence_type(typeinfo)\n return [_process(T, x, make) for x in val]\n elif typename.startswith(\"typing.Tuple\"):\n Ts = typecheck.tuple_types(typeinfo)\n if len(Ts) != len(val):\n raise ValueError(\"Invalid data. Expected {}, got {}.\".format(Ts, val))\n return tuple(\n _process(T, x, make) for T, x in zip(Ts, val)\n )\n elif typename.startswith(\"typing.Dict\"):\n k_cls, v_cls = typecheck.mapping_types(typeinfo)\n return {\n _process(k_cls, k, make): _process(v_cls, v, make)\n for k, v in val.items()\n }\n elif typename.startswith(\"typing.Any\"):\n # FIXME: Remove this when we remove flow.metadata\n assert isinstance(val, (int, str, bool, bytes))\n return val\n else:\n return typeinfo(val)\n\n\ndef make_object(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:\n \"\"\"Create an object based on the state given in val.\"\"\"\n return _process(typeinfo, val, True)\n\n\ndef get_state(typeinfo: typecheck.Type, val: typing.Any) -> typing.Any:\n \"\"\"Get the state of the object given as val.\"\"\"\n return _process(typeinfo, val, False)\n", "path": "mitmproxy/stateobject.py"}]} | 2,242 | 258 |
gh_patches_debug_50400 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2251 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Telegram Daily Is not Working on Freqtrade for coinbasepro
## Step 1: Have you search for this issue before posting it?
Yes
If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
## Step 2: Describe your environment
* Operating system: _Ubuntu___
* Python Version: ___3.6__ (`python -V`)
* CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`)
* Branch: Master | Develop
* Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format="%H" -n 1`)
## Step 3: Describe the problem:
Running dry run mode, i am trying to check the /daily on telegram, i get the following error:
```
Traceback (most recent call last):
File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 49, in wrapper
return command_handler(self, *args, **kwargs)
File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 247, in _daily
fiat_disp_cur
File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in _rpc_daily_profit
for key, value in profit_days.items()
File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in <listcomp>
for key, value in profit_days.items()
ValueError: Unknown format code 'f' for object of type 'str'
```
*Explain the problem you have encountered*
### Steps to reproduce:
1. _connect to coinbase pro____
2. _run strategy____
3. ___on telegram, hit /daily__
### Observed Results:
* What happened? output error
* What did you expect to happen? Result of daily profit/loss
### Relevant code exceptions or logs:
```
// paste your log here
```
Telegram Daily Is not Working on Freqtrade for coinbasepro
## Step 1: Have you search for this issue before posting it?
Yes
If you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue).
If it hasn't been reported, please create a new issue.
## Step 2: Describe your environment
* Operating system: _Ubuntu___
* Python Version: ___3.6__ (`python -V`)
* CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`)
* Branch: Master | Develop
* Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format="%H" -n 1`)
## Step 3: Describe the problem:
Running dry run mode, i am trying to check the /daily on telegram, i get the following error:
```
Traceback (most recent call last):
File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 49, in wrapper
return command_handler(self, *args, **kwargs)
File "/home/mtg/freqtrade/freqtrade/rpc/telegram.py", line 247, in _daily
fiat_disp_cur
File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in _rpc_daily_profit
for key, value in profit_days.items()
File "/home/mtg/freqtrade/freqtrade/rpc/rpc.py", line 188, in <listcomp>
for key, value in profit_days.items()
ValueError: Unknown format code 'f' for object of type 'str'
```
*Explain the problem you have encountered*
### Steps to reproduce:
1. _connect to coinbase pro____
2. _run strategy____
3. ___on telegram, hit /daily__
### Observed Results:
* What happened? output error
* What did you expect to happen? Result of daily profit/loss
### Relevant code exceptions or logs:
```
// paste your log here
```
</issue>
<code>
[start of freqtrade/rpc/fiat_convert.py]
1 """
2 Module that define classes to convert Crypto-currency to FIAT
3 e.g BTC to USD
4 """
5
6 import logging
7 import time
8 from typing import Dict, List
9
10 from coinmarketcap import Market
11
12 from freqtrade.constants import SUPPORTED_FIAT
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class CryptoFiat(object):
19 """
20 Object to describe what is the price of Crypto-currency in a FIAT
21 """
22 # Constants
23 CACHE_DURATION = 6 * 60 * 60 # 6 hours
24
25 def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None:
26 """
27 Create an object that will contains the price for a crypto-currency in fiat
28 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
29 :param fiat_symbol: FIAT currency you want to convert to (e.g USD)
30 :param price: Price in FIAT
31 """
32
33 # Public attributes
34 self.crypto_symbol = None
35 self.fiat_symbol = None
36 self.price = 0.0
37
38 # Private attributes
39 self._expiration = 0.0
40
41 self.crypto_symbol = crypto_symbol.upper()
42 self.fiat_symbol = fiat_symbol.upper()
43 self.set_price(price=price)
44
45 def set_price(self, price: float) -> None:
46 """
47 Set the price of the Crypto-currency in FIAT and set the expiration time
48 :param price: Price of the current Crypto currency in the fiat
49 :return: None
50 """
51 self.price = price
52 self._expiration = time.time() + self.CACHE_DURATION
53
54 def is_expired(self) -> bool:
55 """
56 Return if the current price is still valid or needs to be refreshed
57 :return: bool, true the price is expired and needs to be refreshed, false the price is
58 still valid
59 """
60 return self._expiration - time.time() <= 0
61
62
63 class CryptoToFiatConverter(object):
64 """
65 Main class to initiate Crypto to FIAT.
66 This object contains a list of pair Crypto, FIAT
67 This object is also a Singleton
68 """
69 __instance = None
70 _coinmarketcap: Market = None
71
72 _cryptomap: Dict = {}
73
74 def __new__(cls):
75 if CryptoToFiatConverter.__instance is None:
76 CryptoToFiatConverter.__instance = object.__new__(cls)
77 try:
78 CryptoToFiatConverter._coinmarketcap = Market()
79 except BaseException:
80 CryptoToFiatConverter._coinmarketcap = None
81 return CryptoToFiatConverter.__instance
82
83 def __init__(self) -> None:
84 self._pairs: List[CryptoFiat] = []
85 self._load_cryptomap()
86
87 def _load_cryptomap(self) -> None:
88 try:
89 coinlistings = self._coinmarketcap.listings()
90 self._cryptomap = dict(map(lambda coin: (coin["symbol"], str(coin["id"])),
91 coinlistings["data"]))
92 except (BaseException) as exception:
93 logger.error(
94 "Could not load FIAT Cryptocurrency map for the following problem: %s",
95 type(exception).__name__
96 )
97
98 def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float:
99 """
100 Convert an amount of crypto-currency to fiat
101 :param crypto_amount: amount of crypto-currency to convert
102 :param crypto_symbol: crypto-currency used
103 :param fiat_symbol: fiat to convert to
104 :return: float, value in fiat of the crypto-currency amount
105 """
106 if crypto_symbol == fiat_symbol:
107 return crypto_amount
108 price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)
109 return float(crypto_amount) * float(price)
110
111 def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float:
112 """
113 Return the price of the Crypto-currency in Fiat
114 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
115 :param fiat_symbol: FIAT currency you want to convert to (e.g USD)
116 :return: Price in FIAT
117 """
118 crypto_symbol = crypto_symbol.upper()
119 fiat_symbol = fiat_symbol.upper()
120
121 # Check if the fiat convertion you want is supported
122 if not self._is_supported_fiat(fiat=fiat_symbol):
123 raise ValueError(f'The fiat {fiat_symbol} is not supported.')
124
125 # Get the pair that interest us and return the price in fiat
126 for pair in self._pairs:
127 if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol:
128 # If the price is expired we refresh it, avoid to call the API all the time
129 if pair.is_expired():
130 pair.set_price(
131 price=self._find_price(
132 crypto_symbol=pair.crypto_symbol,
133 fiat_symbol=pair.fiat_symbol
134 )
135 )
136
137 # return the last price we have for this pair
138 return pair.price
139
140 # The pair does not exist, so we create it and return the price
141 return self._add_pair(
142 crypto_symbol=crypto_symbol,
143 fiat_symbol=fiat_symbol,
144 price=self._find_price(
145 crypto_symbol=crypto_symbol,
146 fiat_symbol=fiat_symbol
147 )
148 )
149
150 def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float:
151 """
152 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
153 :param fiat_symbol: FIAT currency you want to convert to (e.g USD)
154 :return: price in FIAT
155 """
156 self._pairs.append(
157 CryptoFiat(
158 crypto_symbol=crypto_symbol,
159 fiat_symbol=fiat_symbol,
160 price=price
161 )
162 )
163
164 return price
165
166 def _is_supported_fiat(self, fiat: str) -> bool:
167 """
168 Check if the FIAT your want to convert to is supported
169 :param fiat: FIAT to check (e.g USD)
170 :return: bool, True supported, False not supported
171 """
172
173 fiat = fiat.upper()
174
175 return fiat in SUPPORTED_FIAT
176
177 def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float:
178 """
179 Call CoinMarketCap API to retrieve the price in the FIAT
180 :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)
181 :param fiat_symbol: FIAT currency you want to convert to (e.g USD)
182 :return: float, price of the crypto-currency in Fiat
183 """
184 # Check if the fiat convertion you want is supported
185 if not self._is_supported_fiat(fiat=fiat_symbol):
186 raise ValueError(f'The fiat {fiat_symbol} is not supported.')
187
188 # No need to convert if both crypto and fiat are the same
189 if crypto_symbol == fiat_symbol:
190 return 1.0
191
192 if crypto_symbol not in self._cryptomap:
193 # return 0 for unsupported stake currencies (fiat-convert should not break the bot)
194 logger.warning("unsupported crypto-symbol %s - returning 0.0", crypto_symbol)
195 return 0.0
196
197 try:
198 return float(
199 self._coinmarketcap.ticker(
200 currency=self._cryptomap[crypto_symbol],
201 convert=fiat_symbol
202 )['data']['quotes'][fiat_symbol.upper()]['price']
203 )
204 except BaseException as exception:
205 logger.error("Error in _find_price: %s", exception)
206 return 0.0
207
[end of freqtrade/rpc/fiat_convert.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/freqtrade/rpc/fiat_convert.py b/freqtrade/rpc/fiat_convert.py
--- a/freqtrade/rpc/fiat_convert.py
+++ b/freqtrade/rpc/fiat_convert.py
@@ -104,7 +104,7 @@
:return: float, value in fiat of the crypto-currency amount
"""
if crypto_symbol == fiat_symbol:
- return crypto_amount
+ return float(crypto_amount)
price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)
return float(crypto_amount) * float(price)
| {"golden_diff": "diff --git a/freqtrade/rpc/fiat_convert.py b/freqtrade/rpc/fiat_convert.py\n--- a/freqtrade/rpc/fiat_convert.py\n+++ b/freqtrade/rpc/fiat_convert.py\n@@ -104,7 +104,7 @@\n :return: float, value in fiat of the crypto-currency amount\n \"\"\"\n if crypto_symbol == fiat_symbol:\n- return crypto_amount\n+ return float(crypto_amount)\n price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)\n return float(crypto_amount) * float(price)\n", "issue": "Telegram Daily Is not Working on Freqtrade for coinbasepro\n## Step 1: Have you search for this issue before posting it?\r\nYes\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\n## Step 2: Describe your environment\r\n\r\n * Operating system: _Ubuntu___\r\n * Python Version: ___3.6__ (`python -V`)\r\n * CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`)\r\n * Branch: Master | Develop\r\n * Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format=\"%H\" -n 1`)\r\n \r\n## Step 3: Describe the problem:\r\nRunning dry run mode, i am trying to check the /daily on telegram, i get the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 49, in wrapper\r\n return command_handler(self, *args, **kwargs)\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 247, in _daily\r\n fiat_disp_cur\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in _rpc_daily_profit\r\n for key, value in profit_days.items()\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in <listcomp>\r\n for key, value in profit_days.items()\r\nValueError: Unknown format code 'f' for object of type 'str'\r\n```\r\n*Explain the problem you have encountered*\r\n\r\n### Steps to reproduce:\r\n\r\n 1. _connect to coinbase pro____\r\n 2. _run strategy____\r\n 3. ___on telegram, hit /daily__\r\n \r\n### Observed Results:\r\n\r\n * What happened? output error\r\n * What did you expect to happen? Result of daily profit/loss\r\n\r\n### Relevant code exceptions or logs:\r\n\r\n ```\r\n // paste your log here\r\n ```\r\n\nTelegram Daily Is not Working on Freqtrade for coinbasepro\n## Step 1: Have you search for this issue before posting it?\r\nYes\r\nIf you have discovered a bug in the bot, please [search our issue tracker](https://github.com/freqtrade/freqtrade/issues?q=is%3Aissue). \r\nIf it hasn't been reported, please create a new issue.\r\n\r\n## Step 2: Describe your environment\r\n\r\n * Operating system: _Ubuntu___\r\n * Python Version: ___3.6__ (`python -V`)\r\n * CCXT version: __ccxt==1.17.542___ (`pip freeze | grep ccxt`)\r\n * Branch: Master | Develop\r\n * Last Commit ID: a0a9c9c21cdec0d50eb1597264414ed0e4aeb98a(`git log --format=\"%H\" -n 1`)\r\n \r\n## Step 3: Describe the problem:\r\nRunning dry run mode, i am trying to check the /daily on telegram, i get the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 49, in wrapper\r\n return command_handler(self, *args, **kwargs)\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/telegram.py\", line 247, in _daily\r\n fiat_disp_cur\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in _rpc_daily_profit\r\n for key, value in profit_days.items()\r\n File \"/home/mtg/freqtrade/freqtrade/rpc/rpc.py\", line 188, in <listcomp>\r\n for key, value in profit_days.items()\r\nValueError: Unknown format code 'f' for object of type 'str'\r\n```\r\n*Explain the problem you have encountered*\r\n\r\n### Steps to reproduce:\r\n\r\n 1. _connect to coinbase pro____\r\n 2. _run strategy____\r\n 3. ___on telegram, hit /daily__\r\n \r\n### Observed Results:\r\n\r\n * What happened? output error\r\n * What did you expect to happen? Result of daily profit/loss\r\n\r\n### Relevant code exceptions or logs:\r\n\r\n ```\r\n // paste your log here\r\n ```\r\n\n", "before_files": [{"content": "\"\"\"\nModule that define classes to convert Crypto-currency to FIAT\ne.g BTC to USD\n\"\"\"\n\nimport logging\nimport time\nfrom typing import Dict, List\n\nfrom coinmarketcap import Market\n\nfrom freqtrade.constants import SUPPORTED_FIAT\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CryptoFiat(object):\n \"\"\"\n Object to describe what is the price of Crypto-currency in a FIAT\n \"\"\"\n # Constants\n CACHE_DURATION = 6 * 60 * 60 # 6 hours\n\n def __init__(self, crypto_symbol: str, fiat_symbol: str, price: float) -> None:\n \"\"\"\n Create an object that will contains the price for a crypto-currency in fiat\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :param price: Price in FIAT\n \"\"\"\n\n # Public attributes\n self.crypto_symbol = None\n self.fiat_symbol = None\n self.price = 0.0\n\n # Private attributes\n self._expiration = 0.0\n\n self.crypto_symbol = crypto_symbol.upper()\n self.fiat_symbol = fiat_symbol.upper()\n self.set_price(price=price)\n\n def set_price(self, price: float) -> None:\n \"\"\"\n Set the price of the Crypto-currency in FIAT and set the expiration time\n :param price: Price of the current Crypto currency in the fiat\n :return: None\n \"\"\"\n self.price = price\n self._expiration = time.time() + self.CACHE_DURATION\n\n def is_expired(self) -> bool:\n \"\"\"\n Return if the current price is still valid or needs to be refreshed\n :return: bool, true the price is expired and needs to be refreshed, false the price is\n still valid\n \"\"\"\n return self._expiration - time.time() <= 0\n\n\nclass CryptoToFiatConverter(object):\n \"\"\"\n Main class to initiate Crypto to FIAT.\n This object contains a list of pair Crypto, FIAT\n This object is also a Singleton\n \"\"\"\n __instance = None\n _coinmarketcap: Market = None\n\n _cryptomap: Dict = {}\n\n def __new__(cls):\n if CryptoToFiatConverter.__instance is None:\n CryptoToFiatConverter.__instance = object.__new__(cls)\n try:\n CryptoToFiatConverter._coinmarketcap = Market()\n except BaseException:\n CryptoToFiatConverter._coinmarketcap = None\n return CryptoToFiatConverter.__instance\n\n def __init__(self) -> None:\n self._pairs: List[CryptoFiat] = []\n self._load_cryptomap()\n\n def _load_cryptomap(self) -> None:\n try:\n coinlistings = self._coinmarketcap.listings()\n self._cryptomap = dict(map(lambda coin: (coin[\"symbol\"], str(coin[\"id\"])),\n coinlistings[\"data\"]))\n except (BaseException) as exception:\n logger.error(\n \"Could not load FIAT Cryptocurrency map for the following problem: %s\",\n type(exception).__name__\n )\n\n def convert_amount(self, crypto_amount: float, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Convert an amount of crypto-currency to fiat\n :param crypto_amount: amount of crypto-currency to convert\n :param crypto_symbol: crypto-currency used\n :param fiat_symbol: fiat to convert to\n :return: float, value in fiat of the crypto-currency amount\n \"\"\"\n if crypto_symbol == fiat_symbol:\n return crypto_amount\n price = self.get_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol)\n return float(crypto_amount) * float(price)\n\n def get_price(self, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Return the price of the Crypto-currency in Fiat\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: Price in FIAT\n \"\"\"\n crypto_symbol = crypto_symbol.upper()\n fiat_symbol = fiat_symbol.upper()\n\n # Check if the fiat convertion you want is supported\n if not self._is_supported_fiat(fiat=fiat_symbol):\n raise ValueError(f'The fiat {fiat_symbol} is not supported.')\n\n # Get the pair that interest us and return the price in fiat\n for pair in self._pairs:\n if pair.crypto_symbol == crypto_symbol and pair.fiat_symbol == fiat_symbol:\n # If the price is expired we refresh it, avoid to call the API all the time\n if pair.is_expired():\n pair.set_price(\n price=self._find_price(\n crypto_symbol=pair.crypto_symbol,\n fiat_symbol=pair.fiat_symbol\n )\n )\n\n # return the last price we have for this pair\n return pair.price\n\n # The pair does not exist, so we create it and return the price\n return self._add_pair(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol,\n price=self._find_price(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol\n )\n )\n\n def _add_pair(self, crypto_symbol: str, fiat_symbol: str, price: float) -> float:\n \"\"\"\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: price in FIAT\n \"\"\"\n self._pairs.append(\n CryptoFiat(\n crypto_symbol=crypto_symbol,\n fiat_symbol=fiat_symbol,\n price=price\n )\n )\n\n return price\n\n def _is_supported_fiat(self, fiat: str) -> bool:\n \"\"\"\n Check if the FIAT your want to convert to is supported\n :param fiat: FIAT to check (e.g USD)\n :return: bool, True supported, False not supported\n \"\"\"\n\n fiat = fiat.upper()\n\n return fiat in SUPPORTED_FIAT\n\n def _find_price(self, crypto_symbol: str, fiat_symbol: str) -> float:\n \"\"\"\n Call CoinMarketCap API to retrieve the price in the FIAT\n :param crypto_symbol: Crypto-currency you want to convert (e.g BTC)\n :param fiat_symbol: FIAT currency you want to convert to (e.g USD)\n :return: float, price of the crypto-currency in Fiat\n \"\"\"\n # Check if the fiat convertion you want is supported\n if not self._is_supported_fiat(fiat=fiat_symbol):\n raise ValueError(f'The fiat {fiat_symbol} is not supported.')\n\n # No need to convert if both crypto and fiat are the same\n if crypto_symbol == fiat_symbol:\n return 1.0\n\n if crypto_symbol not in self._cryptomap:\n # return 0 for unsupported stake currencies (fiat-convert should not break the bot)\n logger.warning(\"unsupported crypto-symbol %s - returning 0.0\", crypto_symbol)\n return 0.0\n\n try:\n return float(\n self._coinmarketcap.ticker(\n currency=self._cryptomap[crypto_symbol],\n convert=fiat_symbol\n )['data']['quotes'][fiat_symbol.upper()]['price']\n )\n except BaseException as exception:\n logger.error(\"Error in _find_price: %s\", exception)\n return 0.0\n", "path": "freqtrade/rpc/fiat_convert.py"}]} | 3,756 | 133 |
gh_patches_debug_24208 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2236 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
changing group of plan in django does not work
I cannot change the group of a plan in django. Dropdown is there but changes won't be saved.
</issue>
<code>
[start of meinberlin/apps/plans/admin.py]
1 from django.contrib import admin
2
3 from . import models
4
5
6 @admin.register(models.Plan)
7 class OfflineEventAdmin(admin.ModelAdmin):
8 list_display = ('__str__', 'organisation', 'created')
9
10 date_hierarchy = 'created'
11 search_fields = ('title',)
12 readonly_fields = ('creator', )
13
[end of meinberlin/apps/plans/admin.py]
[start of meinberlin/apps/plans/forms.py]
1 from django import forms
2 from django.conf import settings
3 from django.utils.translation import ugettext_lazy as _
4
5 from adhocracy4.dashboard.components.forms import ProjectDashboardForm
6 from adhocracy4.maps import widgets as maps_widgets
7 from adhocracy4.projects import models as project_models
8
9 from . import models
10
11
12 class PlanForm(forms.ModelForm):
13
14 class Meta:
15 model = models.Plan
16 fields = [
17 'title',
18 'description_image',
19 'contact',
20 'point',
21 'point_label',
22 'district',
23 'cost',
24 'description',
25 'topics',
26 'status',
27 'participation']
28 widgets = {
29 'point': maps_widgets.MapChoosePointWidget(
30 polygon=settings.BERLIN_POLYGON)
31 }
32 error_messages = {
33 'point': {
34 'required': _('Please locate the plan on the map.')
35 }
36 }
37
38 def __init__(self, *args, **kwargs):
39 super().__init__(*args, **kwargs)
40 self.fields['district'].empty_label = _('City wide')
41
42
43 class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):
44
45 widget = forms.Select
46
47 def clean(self, value):
48 if not value:
49 return super().clean([])
50 return super().clean([value])
51
52
53 class ProjectPlansDashboardForm(ProjectDashboardForm):
54 plans = CustomMultipleChoiceField(queryset=None,
55 label=_('Plans'))
56
57 class Meta:
58 model = project_models.Project
59 fields = ['plans']
60 required = False
61
62 def save(self, commit=False):
63 plans = self.cleaned_data['plans']
64 self.instance.plans.set(plans)
65
66 def __init__(self, *args, **kwargs):
67 super().__init__(*args, **kwargs)
68 self.initial['plans'] = self.instance.plans.all()
69 self.fields['plans'].required = False
70 self.fields['plans'].empty_label = '----------'
71 self.fields['plans'].queryset = \
72 self.instance.organisation.plan_set.all()
73
[end of meinberlin/apps/plans/forms.py]
[start of meinberlin/apps/plans/models.py]
1 from ckeditor.fields import RichTextField
2 from django.conf import settings
3 from django.contrib.auth.models import Group
4 from django.core.urlresolvers import reverse
5 from django.db import models
6 from django.utils.functional import cached_property
7 from django.utils.translation import ugettext_lazy as _
8
9 from adhocracy4 import transforms
10 from adhocracy4.administrative_districts.models import AdministrativeDistrict
11 from adhocracy4.images.fields import ConfiguredImageField
12 from adhocracy4.maps import fields as map_fields
13 from adhocracy4.models.base import UserGeneratedContentModel
14 from adhocracy4.phases.models import Phase
15 from adhocracy4.projects import models as project_models
16 from adhocracy4.projects.fields import TopicField
17
18
19 class Plan(UserGeneratedContentModel):
20
21 PARTICIPATION_YES = 0
22 PARTICIPATION_NO = 1
23 PARTICIPATION_UNDECIDED = 2
24 PARTICIPATION_CHOICES = (
25 (PARTICIPATION_YES, _('with')),
26 (PARTICIPATION_NO, _('without')),
27 (PARTICIPATION_UNDECIDED, _('undecided')),
28 )
29
30 STATUS_ONGOING = 0
31 STATUS_DONE = 1
32
33 STATUS_CHOICES = (
34 (STATUS_ONGOING, _('running')),
35 (STATUS_DONE, _('done'))
36 )
37
38 title = models.CharField(max_length=120, verbose_name=_('Title'))
39 organisation = models.ForeignKey(
40 settings.A4_ORGANISATIONS_MODEL,
41 on_delete=models.CASCADE,
42 verbose_name=_('Organisation'))
43 projects = models.ManyToManyField(
44 project_models.Project,
45 related_name='plans',
46 blank=True
47 )
48 group = models.ForeignKey(
49 Group,
50 on_delete=models.SET_NULL,
51 blank=True,
52 null=True)
53 point = map_fields.PointField(
54 verbose_name=_('Where can the plan be located on a map?'),
55 help_text=_('Click inside the marked area '
56 'or type in an address to set the marker. A set '
57 'marker can be dragged when pressed.'))
58 point_label = models.CharField(
59 default='',
60 max_length=255,
61 verbose_name=_('Label of the location'),
62 help_text=_('The label of the location is '
63 'displayed in the detail view of the plan'),
64 )
65 district = models.ForeignKey(
66 AdministrativeDistrict,
67 verbose_name=_('District'),
68 null=True,
69 blank=True
70 )
71 contact = models.TextField(max_length=1000, verbose_name=_('Contact'))
72 cost = models.CharField(
73 blank=True,
74 null=True,
75 max_length=255,
76 verbose_name=_('Cost'))
77 description = RichTextField(verbose_name=_('Description'))
78 description_image = ConfiguredImageField(
79 'plan_image',
80 verbose_name=_('Add image'),
81 upload_to='plan/description_image',
82 blank=True,
83 help_prefix=_(
84 'Visualize your plan.'
85 ),
86 )
87 topics = TopicField(
88 verbose_name=_('Topics'),
89 help_text=_('Add topics to your project.'))
90 status = models.SmallIntegerField(
91 choices=STATUS_CHOICES,
92 verbose_name=_('Status')
93 )
94 participation = models.SmallIntegerField(
95 choices=PARTICIPATION_CHOICES,
96 verbose_name=_('Participation')
97 )
98
99 class Meta:
100 ordering = ['-created']
101
102 @property
103 def reference_number(self):
104 return '{:d}-{:05d}'.format(self.created.year, self.pk)
105
106 @property
107 def administrative_district(self):
108 return self.district
109
110 @property
111 def topic_names(self):
112 if hasattr(settings, 'A4_PROJECT_TOPICS'):
113 choices = dict(settings.A4_PROJECT_TOPICS)
114 return [choices[topic] for topic in self.topics]
115 return []
116
117 @cached_property
118 def published_projects(self):
119 return self.projects.filter(
120 is_draft=False, is_public=True, is_archived=False)
121
122 @cached_property
123 def participation_string(self):
124 project_list = self.published_projects.values_list('id', flat=True)
125 phases_in_plan = Phase.objects\
126 .select_related('module__project')\
127 .filter(module__project_id__in=project_list)\
128 .order_by('-start_date')
129
130 if phases_in_plan.active_phases():
131 return _('running')
132
133 future_phases_with_start_date = phases_in_plan.future_phases()\
134 .exclude(start_date__isnull=True)
135
136 if future_phases_with_start_date:
137 future_phase = future_phases_with_start_date.first()
138 return _('starts at {}')\
139 .format(future_phase.start_date.strftime('%d.%m.%Y'))
140
141 def __str__(self):
142 return self.title
143
144 def get_absolute_url(self):
145 return reverse('meinberlin_plans:plan-detail',
146 kwargs=dict(pk='{:05d}'.format(self.pk),
147 year=self.created.year))
148
149 def save(self, *args, **kwargs):
150 self.description = transforms.clean_html_field(self.description)
151 self.group = self._get_group(self.creator, self.organisation)
152 super().save(*args, **kwargs)
153
154 def _get_group(self, user, organisation):
155 user_groups = user.groups.all()
156 org_groups = organisation.groups.all()
157 shared_groups = user_groups & org_groups
158 return shared_groups.distinct().first()
159
160 def is_group_member(self, user):
161 if self.group:
162 return user.groups.filter(id=self.group.id).exists()
163 return False
164
[end of meinberlin/apps/plans/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/plans/admin.py b/meinberlin/apps/plans/admin.py
--- a/meinberlin/apps/plans/admin.py
+++ b/meinberlin/apps/plans/admin.py
@@ -4,7 +4,7 @@
@admin.register(models.Plan)
-class OfflineEventAdmin(admin.ModelAdmin):
+class PlanAdmin(admin.ModelAdmin):
list_display = ('__str__', 'organisation', 'created')
date_hierarchy = 'created'
diff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py
--- a/meinberlin/apps/plans/forms.py
+++ b/meinberlin/apps/plans/forms.py
@@ -39,6 +39,14 @@
super().__init__(*args, **kwargs)
self.fields['district'].empty_label = _('City wide')
+ def save(self, commit=True):
+ plan = super().save(commit=False)
+ group = plan._get_group(plan.creator, plan.organisation)
+ plan.group = group
+ if commit:
+ plan.save()
+ return plan
+
class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):
diff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py
--- a/meinberlin/apps/plans/models.py
+++ b/meinberlin/apps/plans/models.py
@@ -148,7 +148,6 @@
def save(self, *args, **kwargs):
self.description = transforms.clean_html_field(self.description)
- self.group = self._get_group(self.creator, self.organisation)
super().save(*args, **kwargs)
def _get_group(self, user, organisation):
| {"golden_diff": "diff --git a/meinberlin/apps/plans/admin.py b/meinberlin/apps/plans/admin.py\n--- a/meinberlin/apps/plans/admin.py\n+++ b/meinberlin/apps/plans/admin.py\n@@ -4,7 +4,7 @@\n \n \n @admin.register(models.Plan)\n-class OfflineEventAdmin(admin.ModelAdmin):\n+class PlanAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'organisation', 'created')\n \n date_hierarchy = 'created'\ndiff --git a/meinberlin/apps/plans/forms.py b/meinberlin/apps/plans/forms.py\n--- a/meinberlin/apps/plans/forms.py\n+++ b/meinberlin/apps/plans/forms.py\n@@ -39,6 +39,14 @@\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n \n+ def save(self, commit=True):\n+ plan = super().save(commit=False)\n+ group = plan._get_group(plan.creator, plan.organisation)\n+ plan.group = group\n+ if commit:\n+ plan.save()\n+ return plan\n+\n \n class CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n \ndiff --git a/meinberlin/apps/plans/models.py b/meinberlin/apps/plans/models.py\n--- a/meinberlin/apps/plans/models.py\n+++ b/meinberlin/apps/plans/models.py\n@@ -148,7 +148,6 @@\n \n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n- self.group = self._get_group(self.creator, self.organisation)\n super().save(*args, **kwargs)\n \n def _get_group(self, user, organisation):\n", "issue": "changing group of plan in django does not work\nI cannot change the group of a plan in django. Dropdown is there but changes won't be saved.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom . import models\n\n\[email protected](models.Plan)\nclass OfflineEventAdmin(admin.ModelAdmin):\n list_display = ('__str__', 'organisation', 'created')\n\n date_hierarchy = 'created'\n search_fields = ('title',)\n readonly_fields = ('creator', )\n", "path": "meinberlin/apps/plans/admin.py"}, {"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.components.forms import ProjectDashboardForm\nfrom adhocracy4.maps import widgets as maps_widgets\nfrom adhocracy4.projects import models as project_models\n\nfrom . import models\n\n\nclass PlanForm(forms.ModelForm):\n\n class Meta:\n model = models.Plan\n fields = [\n 'title',\n 'description_image',\n 'contact',\n 'point',\n 'point_label',\n 'district',\n 'cost',\n 'description',\n 'topics',\n 'status',\n 'participation']\n widgets = {\n 'point': maps_widgets.MapChoosePointWidget(\n polygon=settings.BERLIN_POLYGON)\n }\n error_messages = {\n 'point': {\n 'required': _('Please locate the plan on the map.')\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['district'].empty_label = _('City wide')\n\n\nclass CustomMultipleChoiceField(forms.ModelMultipleChoiceField):\n\n widget = forms.Select\n\n def clean(self, value):\n if not value:\n return super().clean([])\n return super().clean([value])\n\n\nclass ProjectPlansDashboardForm(ProjectDashboardForm):\n plans = CustomMultipleChoiceField(queryset=None,\n label=_('Plans'))\n\n class Meta:\n model = project_models.Project\n fields = ['plans']\n required = False\n\n def save(self, commit=False):\n plans = self.cleaned_data['plans']\n self.instance.plans.set(plans)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.initial['plans'] = self.instance.plans.all()\n self.fields['plans'].required = False\n self.fields['plans'].empty_label = '----------'\n self.fields['plans'].queryset = \\\n self.instance.organisation.plan_set.all()\n", "path": "meinberlin/apps/plans/forms.py"}, {"content": "from ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.administrative_districts.models import AdministrativeDistrict\nfrom adhocracy4.images.fields import ConfiguredImageField\nfrom adhocracy4.maps import fields as map_fields\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.phases.models import Phase\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.fields import TopicField\n\n\nclass Plan(UserGeneratedContentModel):\n\n PARTICIPATION_YES = 0\n PARTICIPATION_NO = 1\n PARTICIPATION_UNDECIDED = 2\n PARTICIPATION_CHOICES = (\n (PARTICIPATION_YES, _('with')),\n (PARTICIPATION_NO, _('without')),\n (PARTICIPATION_UNDECIDED, _('undecided')),\n )\n\n STATUS_ONGOING = 0\n STATUS_DONE = 1\n\n STATUS_CHOICES = (\n (STATUS_ONGOING, _('running')),\n (STATUS_DONE, _('done'))\n )\n\n title = models.CharField(max_length=120, verbose_name=_('Title'))\n organisation = models.ForeignKey(\n settings.A4_ORGANISATIONS_MODEL,\n on_delete=models.CASCADE,\n verbose_name=_('Organisation'))\n projects = models.ManyToManyField(\n project_models.Project,\n related_name='plans',\n blank=True\n )\n group = models.ForeignKey(\n Group,\n on_delete=models.SET_NULL,\n blank=True,\n null=True)\n point = map_fields.PointField(\n verbose_name=_('Where can the plan be located on a map?'),\n help_text=_('Click inside the marked area '\n 'or type in an address to set the marker. A set '\n 'marker can be dragged when pressed.'))\n point_label = models.CharField(\n default='',\n max_length=255,\n verbose_name=_('Label of the location'),\n help_text=_('The label of the location is '\n 'displayed in the detail view of the plan'),\n )\n district = models.ForeignKey(\n AdministrativeDistrict,\n verbose_name=_('District'),\n null=True,\n blank=True\n )\n contact = models.TextField(max_length=1000, verbose_name=_('Contact'))\n cost = models.CharField(\n blank=True,\n null=True,\n max_length=255,\n verbose_name=_('Cost'))\n description = RichTextField(verbose_name=_('Description'))\n description_image = ConfiguredImageField(\n 'plan_image',\n verbose_name=_('Add image'),\n upload_to='plan/description_image',\n blank=True,\n help_prefix=_(\n 'Visualize your plan.'\n ),\n )\n topics = TopicField(\n verbose_name=_('Topics'),\n help_text=_('Add topics to your project.'))\n status = models.SmallIntegerField(\n choices=STATUS_CHOICES,\n verbose_name=_('Status')\n )\n participation = models.SmallIntegerField(\n choices=PARTICIPATION_CHOICES,\n verbose_name=_('Participation')\n )\n\n class Meta:\n ordering = ['-created']\n\n @property\n def reference_number(self):\n return '{:d}-{:05d}'.format(self.created.year, self.pk)\n\n @property\n def administrative_district(self):\n return self.district\n\n @property\n def topic_names(self):\n if hasattr(settings, 'A4_PROJECT_TOPICS'):\n choices = dict(settings.A4_PROJECT_TOPICS)\n return [choices[topic] for topic in self.topics]\n return []\n\n @cached_property\n def published_projects(self):\n return self.projects.filter(\n is_draft=False, is_public=True, is_archived=False)\n\n @cached_property\n def participation_string(self):\n project_list = self.published_projects.values_list('id', flat=True)\n phases_in_plan = Phase.objects\\\n .select_related('module__project')\\\n .filter(module__project_id__in=project_list)\\\n .order_by('-start_date')\n\n if phases_in_plan.active_phases():\n return _('running')\n\n future_phases_with_start_date = phases_in_plan.future_phases()\\\n .exclude(start_date__isnull=True)\n\n if future_phases_with_start_date:\n future_phase = future_phases_with_start_date.first()\n return _('starts at {}')\\\n .format(future_phase.start_date.strftime('%d.%m.%Y'))\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('meinberlin_plans:plan-detail',\n kwargs=dict(pk='{:05d}'.format(self.pk),\n year=self.created.year))\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(self.description)\n self.group = self._get_group(self.creator, self.organisation)\n super().save(*args, **kwargs)\n\n def _get_group(self, user, organisation):\n user_groups = user.groups.all()\n org_groups = organisation.groups.all()\n shared_groups = user_groups & org_groups\n return shared_groups.distinct().first()\n\n def is_group_member(self, user):\n if self.group:\n return user.groups.filter(id=self.group.id).exists()\n return False\n", "path": "meinberlin/apps/plans/models.py"}]} | 2,810 | 380 |
gh_patches_debug_34435 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-923 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add UUID objects support for uuid()
- [x] Add parameter `as_object`
</issue>
<code>
[start of mimesis/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 """Mimesis is a Python library, which helps generate fake data.
4
5 Copyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)
6 Repository: https://github.com/lk-geimfari/mimesis
7 Email: <[email protected]>
8 """
9
10 from mimesis.providers import (
11 Address,
12 BaseDataProvider,
13 BaseProvider,
14 Business,
15 Choice,
16 Clothing,
17 Code,
18 Cryptographic,
19 Datetime,
20 Development,
21 File,
22 Food,
23 Generic,
24 Hardware,
25 Internet,
26 Numbers,
27 Path,
28 Payment,
29 Person,
30 Science,
31 Structure,
32 Text,
33 Transport,
34 UnitSystem,
35 )
36
37 __all__ = [
38 'Address',
39 'BaseDataProvider',
40 'BaseProvider',
41 'Business',
42 'Clothing',
43 'Code',
44 'Choice',
45 'Datetime',
46 'Development',
47 'File',
48 'Food',
49 'Hardware',
50 'Internet',
51 'Numbers',
52 'Path',
53 'Payment',
54 'Person',
55 'Science',
56 'Structure',
57 'Text',
58 'Transport',
59 'UnitSystem',
60 'Cryptographic',
61
62 # Has all:
63 'Generic',
64
65 # Meta:
66 '__version__',
67 '__title__',
68 '__description__',
69 '__url__',
70 '__author__',
71 '__author_email__',
72 '__license__',
73 ]
74
75 __version__ = '4.1.0'
76 __title__ = 'mimesis'
77 __description__ = 'Mimesis: fake data generator.'
78 __url__ = 'https://github.com/lk-geimfari/mimesis'
79 __author__ = 'Isaak Uchakaev (Likid Geimfari)'
80 __author_email__ = '[email protected]'
81 __license__ = 'MIT License'
82
[end of mimesis/__init__.py]
[start of mimesis/providers/cryptographic.py]
1 # -*- coding: utf-8 -*-
2
3 """Cryptographic data provider."""
4
5 import hashlib
6 import secrets
7 from typing import Optional, Union
8 from uuid import UUID
9
10 from mimesis.enums import Algorithm
11 from mimesis.providers.base import BaseProvider
12 from mimesis.providers.text import Text
13
14 __all__ = ['Cryptographic']
15
16
17 class Cryptographic(BaseProvider):
18 """Class that provides cryptographic data."""
19
20 def __init__(self, *args, **kwargs) -> None:
21 """Initialize attributes.
22
23 :param seed: Seed.
24 """
25 super().__init__(*args, **kwargs)
26 self.__words = Text('en')._data.get('words', {})
27
28 class Meta:
29 """Class for metadata."""
30
31 name = 'cryptographic'
32
33 def uuid(self, version: int = None,
34 as_object: bool = False) -> Union[UUID, str]:
35 """Generate random UUID.
36
37 This method returns string by default,
38 but yoy can make it return uuid.UUID object using
39 parameter **as_object**
40
41 :param as_object: Returns uuid.UUID object instead of string.
42 :param version: UUID version.
43 :return: UUID.
44 """
45 bits = self.random.getrandbits(128)
46 uuid_obj = UUID(int=bits, version=version)
47
48 if not as_object:
49 return str(uuid_obj)
50
51 return uuid_obj
52
53 def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003
54 """Generate random hash.
55
56 To change hashing algorithm, pass parameter ``algorithm``
57 with needed value of the enum object :class:`~mimesis.enums.Algorithm`
58
59 :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.
60 :return: Hash.
61 :raises NonEnumerableError: When algorithm is unsupported.
62 """
63 key = self._validate_enum(algorithm, Algorithm)
64
65 if hasattr(hashlib, key):
66 fn = getattr(hashlib, key)
67 return fn(self.uuid().encode()).hexdigest()
68
69 @staticmethod
70 def token_bytes(entropy: int = 32) -> bytes:
71 """Generate byte string containing ``entropy`` bytes.
72
73 The string has ``entropy`` random bytes, each byte
74 converted to two hex digits.
75
76 .. warning:: Seed is not applicable to this method,
77 because of its cryptographic-safe nature.
78
79 :param entropy: Number of bytes (default: 32).
80 :return: Random bytes.
81 """
82 return secrets.token_bytes(entropy)
83
84 @staticmethod
85 def token_hex(entropy: int = 32) -> str:
86 """Return a random text string, in hexadecimal.
87
88 The string has *entropy* random bytes, each byte converted to two
89 hex digits. If *entropy* is ``None`` or not supplied, a reasonable
90 default is used.
91
92 .. warning:: Seed is not applicable to this method,
93 because of its cryptographic-safe nature.
94
95 :param entropy: Number of bytes (default: 32).
96 :return: Token.
97 """
98 return secrets.token_hex(entropy)
99
100 @staticmethod
101 def token_urlsafe(entropy: int = 32):
102 """Return a random URL-safe text string, in Base64 encoding.
103
104 The string has *entropy* random bytes. If *entropy* is ``None``
105 or not supplied, a reasonable default is used.
106
107 .. warning:: Seed is not applicable to this method,
108 because of its cryptographic-safe nature.
109
110 :param entropy: Number of bytes (default: 32).
111 :return: URL-safe token.
112 """
113 return secrets.token_urlsafe(entropy)
114
115 def mnemonic_phrase(self, length: int = 12,
116 separator: Optional[str] = None) -> str:
117 """Generate pseudo mnemonic phrase.
118
119 Please, keep in mind that this method generates
120 crypto-insecure values.
121
122 :param separator: Separator of phrases (Default is " ").
123 :param length: Number of words.
124 :return: Mnemonic phrase.
125 """
126 if not separator:
127 separator = ' '
128
129 words = self.__words['normal']
130 words_generator = (self.random.choice(words) for _ in range(length))
131 return '{}'.format(separator).join(words_generator)
132
[end of mimesis/providers/cryptographic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mimesis/__init__.py b/mimesis/__init__.py
--- a/mimesis/__init__.py
+++ b/mimesis/__init__.py
@@ -72,7 +72,7 @@
'__license__',
]
-__version__ = '4.1.0'
+__version__ = '4.1.1'
__title__ = 'mimesis'
__description__ = 'Mimesis: fake data generator.'
__url__ = 'https://github.com/lk-geimfari/mimesis'
diff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py
--- a/mimesis/providers/cryptographic.py
+++ b/mimesis/providers/cryptographic.py
@@ -5,7 +5,7 @@
import hashlib
import secrets
from typing import Optional, Union
-from uuid import UUID
+from uuid import UUID, uuid4
from mimesis.enums import Algorithm
from mimesis.providers.base import BaseProvider
@@ -30,25 +30,23 @@
name = 'cryptographic'
- def uuid(self, version: int = None,
- as_object: bool = False) -> Union[UUID, str]:
- """Generate random UUID.
+ @staticmethod
+ def uuid(as_object: bool = False) -> Union[UUID, str]:
+ """Generate random UUID4.
This method returns string by default,
- but yoy can make it return uuid.UUID object using
+ but you can make it return uuid.UUID object using
parameter **as_object**
- :param as_object: Returns uuid.UUID object instead of string.
- :param version: UUID version.
+ :param as_object: Returns uuid.UUID.
:return: UUID.
"""
- bits = self.random.getrandbits(128)
- uuid_obj = UUID(int=bits, version=version)
+ _uuid = uuid4()
if not as_object:
- return str(uuid_obj)
+ return str(_uuid)
- return uuid_obj
+ return _uuid
def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003
"""Generate random hash.
@@ -64,7 +62,7 @@
if hasattr(hashlib, key):
fn = getattr(hashlib, key)
- return fn(self.uuid().encode()).hexdigest()
+ return fn(self.uuid().encode()).hexdigest() # type: ignore
@staticmethod
def token_bytes(entropy: int = 32) -> bytes:
| {"golden_diff": "diff --git a/mimesis/__init__.py b/mimesis/__init__.py\n--- a/mimesis/__init__.py\n+++ b/mimesis/__init__.py\n@@ -72,7 +72,7 @@\n '__license__',\n ]\n \n-__version__ = '4.1.0'\n+__version__ = '4.1.1'\n __title__ = 'mimesis'\n __description__ = 'Mimesis: fake data generator.'\n __url__ = 'https://github.com/lk-geimfari/mimesis'\ndiff --git a/mimesis/providers/cryptographic.py b/mimesis/providers/cryptographic.py\n--- a/mimesis/providers/cryptographic.py\n+++ b/mimesis/providers/cryptographic.py\n@@ -5,7 +5,7 @@\n import hashlib\n import secrets\n from typing import Optional, Union\n-from uuid import UUID\n+from uuid import UUID, uuid4\n \n from mimesis.enums import Algorithm\n from mimesis.providers.base import BaseProvider\n@@ -30,25 +30,23 @@\n \n name = 'cryptographic'\n \n- def uuid(self, version: int = None,\n- as_object: bool = False) -> Union[UUID, str]:\n- \"\"\"Generate random UUID.\n+ @staticmethod\n+ def uuid(as_object: bool = False) -> Union[UUID, str]:\n+ \"\"\"Generate random UUID4.\n \n This method returns string by default,\n- but yoy can make it return uuid.UUID object using\n+ but you can make it return uuid.UUID object using\n parameter **as_object**\n \n- :param as_object: Returns uuid.UUID object instead of string.\n- :param version: UUID version.\n+ :param as_object: Returns uuid.UUID.\n :return: UUID.\n \"\"\"\n- bits = self.random.getrandbits(128)\n- uuid_obj = UUID(int=bits, version=version)\n+ _uuid = uuid4()\n \n if not as_object:\n- return str(uuid_obj)\n+ return str(_uuid)\n \n- return uuid_obj\n+ return _uuid\n \n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n@@ -64,7 +62,7 @@\n \n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n- return fn(self.uuid().encode()).hexdigest()\n+ return fn(self.uuid().encode()).hexdigest() # type: ignore\n \n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n", "issue": "Add UUID objects support for uuid()\n- [x] Add parameter `as_object` \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Mimesis is a Python library, which helps generate fake data.\n\nCopyright (c) 2016 - 2020 Isaak Uchakaev (Likid Geimfari)\nRepository: https://github.com/lk-geimfari/mimesis\nEmail: <[email protected]>\n\"\"\"\n\nfrom mimesis.providers import (\n Address,\n BaseDataProvider,\n BaseProvider,\n Business,\n Choice,\n Clothing,\n Code,\n Cryptographic,\n Datetime,\n Development,\n File,\n Food,\n Generic,\n Hardware,\n Internet,\n Numbers,\n Path,\n Payment,\n Person,\n Science,\n Structure,\n Text,\n Transport,\n UnitSystem,\n)\n\n__all__ = [\n 'Address',\n 'BaseDataProvider',\n 'BaseProvider',\n 'Business',\n 'Clothing',\n 'Code',\n 'Choice',\n 'Datetime',\n 'Development',\n 'File',\n 'Food',\n 'Hardware',\n 'Internet',\n 'Numbers',\n 'Path',\n 'Payment',\n 'Person',\n 'Science',\n 'Structure',\n 'Text',\n 'Transport',\n 'UnitSystem',\n 'Cryptographic',\n\n # Has all:\n 'Generic',\n\n # Meta:\n '__version__',\n '__title__',\n '__description__',\n '__url__',\n '__author__',\n '__author_email__',\n '__license__',\n]\n\n__version__ = '4.1.0'\n__title__ = 'mimesis'\n__description__ = 'Mimesis: fake data generator.'\n__url__ = 'https://github.com/lk-geimfari/mimesis'\n__author__ = 'Isaak Uchakaev (Likid Geimfari)'\n__author_email__ = '[email protected]'\n__license__ = 'MIT License'\n", "path": "mimesis/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Cryptographic data provider.\"\"\"\n\nimport hashlib\nimport secrets\nfrom typing import Optional, Union\nfrom uuid import UUID\n\nfrom mimesis.enums import Algorithm\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.text import Text\n\n__all__ = ['Cryptographic']\n\n\nclass Cryptographic(BaseProvider):\n \"\"\"Class that provides cryptographic data.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initialize attributes.\n\n :param seed: Seed.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__words = Text('en')._data.get('words', {})\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'cryptographic'\n\n def uuid(self, version: int = None,\n as_object: bool = False) -> Union[UUID, str]:\n \"\"\"Generate random UUID.\n\n This method returns string by default,\n but yoy can make it return uuid.UUID object using\n parameter **as_object**\n\n :param as_object: Returns uuid.UUID object instead of string.\n :param version: UUID version.\n :return: UUID.\n \"\"\"\n bits = self.random.getrandbits(128)\n uuid_obj = UUID(int=bits, version=version)\n\n if not as_object:\n return str(uuid_obj)\n\n return uuid_obj\n\n def hash(self, algorithm: Algorithm = None) -> str: # noqa: A003\n \"\"\"Generate random hash.\n\n To change hashing algorithm, pass parameter ``algorithm``\n with needed value of the enum object :class:`~mimesis.enums.Algorithm`\n\n :param algorithm: Enum object :class:`~mimesis.enums.Algorithm`.\n :return: Hash.\n :raises NonEnumerableError: When algorithm is unsupported.\n \"\"\"\n key = self._validate_enum(algorithm, Algorithm)\n\n if hasattr(hashlib, key):\n fn = getattr(hashlib, key)\n return fn(self.uuid().encode()).hexdigest()\n\n @staticmethod\n def token_bytes(entropy: int = 32) -> bytes:\n \"\"\"Generate byte string containing ``entropy`` bytes.\n\n The string has ``entropy`` random bytes, each byte\n converted to two hex digits.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Random bytes.\n \"\"\"\n return secrets.token_bytes(entropy)\n\n @staticmethod\n def token_hex(entropy: int = 32) -> str:\n \"\"\"Return a random text string, in hexadecimal.\n\n The string has *entropy* random bytes, each byte converted to two\n hex digits. If *entropy* is ``None`` or not supplied, a reasonable\n default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: Token.\n \"\"\"\n return secrets.token_hex(entropy)\n\n @staticmethod\n def token_urlsafe(entropy: int = 32):\n \"\"\"Return a random URL-safe text string, in Base64 encoding.\n\n The string has *entropy* random bytes. If *entropy* is ``None``\n or not supplied, a reasonable default is used.\n\n .. warning:: Seed is not applicable to this method,\n because of its cryptographic-safe nature.\n\n :param entropy: Number of bytes (default: 32).\n :return: URL-safe token.\n \"\"\"\n return secrets.token_urlsafe(entropy)\n\n def mnemonic_phrase(self, length: int = 12,\n separator: Optional[str] = None) -> str:\n \"\"\"Generate pseudo mnemonic phrase.\n\n Please, keep in mind that this method generates\n crypto-insecure values.\n\n :param separator: Separator of phrases (Default is \" \").\n :param length: Number of words.\n :return: Mnemonic phrase.\n \"\"\"\n if not separator:\n separator = ' '\n\n words = self.__words['normal']\n words_generator = (self.random.choice(words) for _ in range(length))\n return '{}'.format(separator).join(words_generator)\n", "path": "mimesis/providers/cryptographic.py"}]} | 2,373 | 576 |
gh_patches_debug_506 | rasdani/github-patches | git_diff | liqd__a4-opin-765 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add test for email sending after moderator was added to project
</issue>
<code>
[start of euth/projects/emails.py]
1 from euth.contrib.emails import OpinEmail
2
3
4 class ModeratorAddedEmail(OpinEmail):
5 template_name = 'notify_new_moderator'
6
7 def get_recipients(self):
8 self.kwargs['user']
9
[end of euth/projects/emails.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/projects/emails.py b/euth/projects/emails.py
--- a/euth/projects/emails.py
+++ b/euth/projects/emails.py
@@ -4,5 +4,5 @@
class ModeratorAddedEmail(OpinEmail):
template_name = 'notify_new_moderator'
- def get_recipients(self):
- self.kwargs['user']
+ def get_receivers(self):
+ return [self.kwargs['user']]
| {"golden_diff": "diff --git a/euth/projects/emails.py b/euth/projects/emails.py\n--- a/euth/projects/emails.py\n+++ b/euth/projects/emails.py\n@@ -4,5 +4,5 @@\n class ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n \n- def get_recipients(self):\n- self.kwargs['user']\n+ def get_receivers(self):\n+ return [self.kwargs['user']]\n", "issue": "add test for email sending after moderator was added to project\n\n", "before_files": [{"content": "from euth.contrib.emails import OpinEmail\n\n\nclass ModeratorAddedEmail(OpinEmail):\n template_name = 'notify_new_moderator'\n\n def get_recipients(self):\n self.kwargs['user']\n", "path": "euth/projects/emails.py"}]} | 601 | 99 |
gh_patches_debug_14938 | rasdani/github-patches | git_diff | espnet__espnet-2230 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Could not find a version that satisfies the requirement cupy-cuda102==6.0.0
```
ERROR: Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 (from versions: 7.2.0, 7.3.0, 7.4.0, 7.5.0, 7.6.0, 8.0.0a1, 8.0.0b1, 8.0.0b2, 8.0.0b3, 8.0.0b4)
```
cupy-cuda102 doesn't have version 6.0.0 , upgrade ```CHAINER_VERSION := 6.0.0``` to ```CHAINER_VERSION := 7.4.0```, please
</issue>
<code>
[start of tools/check_install.py]
1 #!/usr/bin/env python3
2
3 """Script to check whether the installation is done correctly."""
4
5 # Copyright 2018 Nagoya University (Tomoki Hayashi)
6 # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
7
8 import argparse
9 import importlib
10 import logging
11 import sys
12
13 from distutils.version import LooseVersion
14
15
16 # NOTE: add the libraries which are not included in setup.py
17 MANUALLY_INSTALLED_LIBRARIES = [
18 ("espnet", None),
19 ("kaldiio", None),
20 ("matplotlib", None),
21 ("chainer", ("6.0.0")),
22 ("chainer_ctc", None),
23 ("warprnnt_pytorch", ("0.1")),
24 ]
25
26 # NOTE: list all torch versions which are compatible with espnet
27 COMPATIBLE_TORCH_VERSIONS = (
28 "0.4.1",
29 "1.0.0",
30 "1.0.1",
31 "1.0.1.post2",
32 "1.1.0",
33 "1.2.0",
34 "1.3.0",
35 "1.3.1",
36 "1.4.0",
37 "1.5.0",
38 "1.5.1",
39 )
40
41
42 def main(args):
43 """Check the installation."""
44 parser = argparse.ArgumentParser()
45 parser.add_argument(
46 "--no-cuda",
47 action="store_true",
48 default=False,
49 help="Disable cuda-related tests",
50 )
51 args = parser.parse_args(args)
52
53 logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
54 logging.info(f"python version = {sys.version}")
55
56 library_list = []
57
58 if not args.no_cuda:
59 library_list.append(("cupy", ("6.0.0")))
60
61 # check torch installation at first
62 try:
63 import torch
64
65 logging.info(f"pytorch version = {torch.__version__}")
66 if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:
67 logging.warning(f"{torch.__version__} is not tested. please be careful.")
68 except ImportError:
69 logging.warning("torch is not installed.")
70 logging.warning("please try to setup again and then re-run this script.")
71 sys.exit(1)
72
73 # warpctc can be installed only for pytorch < 1.2
74 if LooseVersion(torch.__version__) < LooseVersion("1.2.0"):
75 library_list.append(("warpctc_pytorch", ("0.1.1", "0.1.3")))
76
77 library_list.extend(MANUALLY_INSTALLED_LIBRARIES)
78
79 # check library availableness
80 logging.info("library availableness check start.")
81 logging.info("# libraries to be checked = %d" % len(library_list))
82 is_correct_installed_list = []
83 for idx, (name, version) in enumerate(library_list):
84 try:
85 importlib.import_module(name)
86 logging.info("--> %s is installed." % name)
87 is_correct_installed_list.append(True)
88 except ImportError:
89 logging.warning("--> %s is not installed." % name)
90 is_correct_installed_list.append(False)
91 logging.info("library availableness check done.")
92 logging.info(
93 "%d / %d libraries are correctly installed."
94 % (sum(is_correct_installed_list), len(library_list))
95 )
96
97 if len(library_list) != sum(is_correct_installed_list):
98 logging.warning("please try to setup again and then re-run this script.")
99 sys.exit(1)
100
101 # check library version
102 num_version_specified = sum(
103 [True if v is not None else False for n, v in library_list]
104 )
105 logging.info("library version check start.")
106 logging.info("# libraries to be checked = %d" % num_version_specified)
107 is_correct_version_list = []
108 for idx, (name, version) in enumerate(library_list):
109 if version is not None:
110 # Note: temp. fix for warprnnt_pytorch
111 # not found version with importlib
112 if name == "warprnnt_pytorch":
113 import pkg_resources
114
115 vers = pkg_resources.get_distribution(name).version
116 else:
117 vers = importlib.import_module(name).__version__
118 if vers is not None:
119 is_correct = vers in version
120 if is_correct:
121 logging.info("--> %s version is matched (%s)." % (name, vers))
122 is_correct_version_list.append(True)
123 else:
124 logging.warning(
125 "--> %s version is incorrect (%s is not in %s)."
126 % (name, vers, str(version))
127 )
128 is_correct_version_list.append(False)
129 else:
130 logging.info(
131 "--> %s has no version info, but version is specified." % name
132 )
133 logging.info("--> maybe it is better to reinstall the latest version.")
134 is_correct_version_list.append(False)
135 logging.info("library version check done.")
136 logging.info(
137 "%d / %d libraries are correct version."
138 % (sum(is_correct_version_list), num_version_specified)
139 )
140
141 if sum(is_correct_version_list) != num_version_specified:
142 logging.info("please try to setup again and then re-run this script.")
143 sys.exit(1)
144
145 # check cuda availableness
146 if args.no_cuda:
147 logging.info("cuda availableness check skipped.")
148 else:
149 logging.info("cuda availableness check start.")
150 import chainer
151 import torch
152
153 try:
154 assert torch.cuda.is_available()
155 logging.info("--> cuda is available in torch.")
156 except AssertionError:
157 logging.warning("--> it seems that cuda is not available in torch.")
158 try:
159 assert torch.backends.cudnn.is_available()
160 logging.info("--> cudnn is available in torch.")
161 except AssertionError:
162 logging.warning("--> it seems that cudnn is not available in torch.")
163 try:
164 assert chainer.backends.cuda.available
165 logging.info("--> cuda is available in chainer.")
166 except AssertionError:
167 logging.warning("--> it seems that cuda is not available in chainer.")
168 try:
169 assert chainer.backends.cuda.cudnn_enabled
170 logging.info("--> cudnn is available in chainer.")
171 except AssertionError:
172 logging.warning("--> it seems that cudnn is not available in chainer.")
173 try:
174 from cupy.cuda import nccl # NOQA
175
176 logging.info("--> nccl is installed.")
177 except ImportError:
178 logging.warning(
179 "--> it seems that nccl is not installed. multi-gpu is not enabled."
180 )
181 logging.warning(
182 "--> if you want to use multi-gpu, please install it and then re-setup."
183 )
184 try:
185 assert torch.cuda.device_count() > 1
186 logging.info(
187 f"--> multi-gpu is available (#gpus={torch.cuda.device_count()})."
188 )
189 except AssertionError:
190 logging.warning("--> it seems that only single gpu is available.")
191 logging.warning("--> maybe your machine has only one gpu.")
192 logging.info("cuda availableness check done.")
193
194 logging.info("installation check is done.")
195
196
197 if __name__ == "__main__":
198 main(sys.argv[1:])
199
[end of tools/check_install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/check_install.py b/tools/check_install.py
--- a/tools/check_install.py
+++ b/tools/check_install.py
@@ -48,14 +48,22 @@
default=False,
help="Disable cuda-related tests",
)
+ parser.add_argument(
+ "--no-cupy",
+ action="store_true",
+ default=False,
+ help="Disable cupy test",
+ )
args = parser.parse_args(args)
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
logging.info(f"python version = {sys.version}")
library_list = []
+ if args.no_cuda:
+ args.no_cupy = True
- if not args.no_cuda:
+ if not args.no_cupy:
library_list.append(("cupy", ("6.0.0")))
# check torch installation at first
| {"golden_diff": "diff --git a/tools/check_install.py b/tools/check_install.py\n--- a/tools/check_install.py\n+++ b/tools/check_install.py\n@@ -48,14 +48,22 @@\n default=False,\n help=\"Disable cuda-related tests\",\n )\n+ parser.add_argument(\n+ \"--no-cupy\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Disable cupy test\",\n+ )\n args = parser.parse_args(args)\n \n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n \n library_list = []\n+ if args.no_cuda:\n+ args.no_cupy = True\n \n- if not args.no_cuda:\n+ if not args.no_cupy:\n library_list.append((\"cupy\", (\"6.0.0\")))\n \n # check torch installation at first\n", "issue": "Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 \n```\r\nERROR: Could not find a version that satisfies the requirement cupy-cuda102==6.0.0 (from versions: 7.2.0, 7.3.0, 7.4.0, 7.5.0, 7.6.0, 8.0.0a1, 8.0.0b1, 8.0.0b2, 8.0.0b3, 8.0.0b4)\r\n``` \r\ncupy-cuda102 doesn't have version 6.0.0 , upgrade ```CHAINER_VERSION := 6.0.0``` to ```CHAINER_VERSION := 7.4.0```, please\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Script to check whether the installation is done correctly.\"\"\"\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport importlib\nimport logging\nimport sys\n\nfrom distutils.version import LooseVersion\n\n\n# NOTE: add the libraries which are not included in setup.py\nMANUALLY_INSTALLED_LIBRARIES = [\n (\"espnet\", None),\n (\"kaldiio\", None),\n (\"matplotlib\", None),\n (\"chainer\", (\"6.0.0\")),\n (\"chainer_ctc\", None),\n (\"warprnnt_pytorch\", (\"0.1\")),\n]\n\n# NOTE: list all torch versions which are compatible with espnet\nCOMPATIBLE_TORCH_VERSIONS = (\n \"0.4.1\",\n \"1.0.0\",\n \"1.0.1\",\n \"1.0.1.post2\",\n \"1.1.0\",\n \"1.2.0\",\n \"1.3.0\",\n \"1.3.1\",\n \"1.4.0\",\n \"1.5.0\",\n \"1.5.1\",\n)\n\n\ndef main(args):\n \"\"\"Check the installation.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--no-cuda\",\n action=\"store_true\",\n default=False,\n help=\"Disable cuda-related tests\",\n )\n args = parser.parse_args(args)\n\n logging.basicConfig(level=logging.INFO, format=\"%(levelname)s: %(message)s\")\n logging.info(f\"python version = {sys.version}\")\n\n library_list = []\n\n if not args.no_cuda:\n library_list.append((\"cupy\", (\"6.0.0\")))\n\n # check torch installation at first\n try:\n import torch\n\n logging.info(f\"pytorch version = {torch.__version__}\")\n if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:\n logging.warning(f\"{torch.__version__} is not tested. please be careful.\")\n except ImportError:\n logging.warning(\"torch is not installed.\")\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # warpctc can be installed only for pytorch < 1.2\n if LooseVersion(torch.__version__) < LooseVersion(\"1.2.0\"):\n library_list.append((\"warpctc_pytorch\", (\"0.1.1\", \"0.1.3\")))\n\n library_list.extend(MANUALLY_INSTALLED_LIBRARIES)\n\n # check library availableness\n logging.info(\"library availableness check start.\")\n logging.info(\"# libraries to be checked = %d\" % len(library_list))\n is_correct_installed_list = []\n for idx, (name, version) in enumerate(library_list):\n try:\n importlib.import_module(name)\n logging.info(\"--> %s is installed.\" % name)\n is_correct_installed_list.append(True)\n except ImportError:\n logging.warning(\"--> %s is not installed.\" % name)\n is_correct_installed_list.append(False)\n logging.info(\"library availableness check done.\")\n logging.info(\n \"%d / %d libraries are correctly installed.\"\n % (sum(is_correct_installed_list), len(library_list))\n )\n\n if len(library_list) != sum(is_correct_installed_list):\n logging.warning(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check library version\n num_version_specified = sum(\n [True if v is not None else False for n, v in library_list]\n )\n logging.info(\"library version check start.\")\n logging.info(\"# libraries to be checked = %d\" % num_version_specified)\n is_correct_version_list = []\n for idx, (name, version) in enumerate(library_list):\n if version is not None:\n # Note: temp. fix for warprnnt_pytorch\n # not found version with importlib\n if name == \"warprnnt_pytorch\":\n import pkg_resources\n\n vers = pkg_resources.get_distribution(name).version\n else:\n vers = importlib.import_module(name).__version__\n if vers is not None:\n is_correct = vers in version\n if is_correct:\n logging.info(\"--> %s version is matched (%s).\" % (name, vers))\n is_correct_version_list.append(True)\n else:\n logging.warning(\n \"--> %s version is incorrect (%s is not in %s).\"\n % (name, vers, str(version))\n )\n is_correct_version_list.append(False)\n else:\n logging.info(\n \"--> %s has no version info, but version is specified.\" % name\n )\n logging.info(\"--> maybe it is better to reinstall the latest version.\")\n is_correct_version_list.append(False)\n logging.info(\"library version check done.\")\n logging.info(\n \"%d / %d libraries are correct version.\"\n % (sum(is_correct_version_list), num_version_specified)\n )\n\n if sum(is_correct_version_list) != num_version_specified:\n logging.info(\"please try to setup again and then re-run this script.\")\n sys.exit(1)\n\n # check cuda availableness\n if args.no_cuda:\n logging.info(\"cuda availableness check skipped.\")\n else:\n logging.info(\"cuda availableness check start.\")\n import chainer\n import torch\n\n try:\n assert torch.cuda.is_available()\n logging.info(\"--> cuda is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in torch.\")\n try:\n assert torch.backends.cudnn.is_available()\n logging.info(\"--> cudnn is available in torch.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in torch.\")\n try:\n assert chainer.backends.cuda.available\n logging.info(\"--> cuda is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cuda is not available in chainer.\")\n try:\n assert chainer.backends.cuda.cudnn_enabled\n logging.info(\"--> cudnn is available in chainer.\")\n except AssertionError:\n logging.warning(\"--> it seems that cudnn is not available in chainer.\")\n try:\n from cupy.cuda import nccl # NOQA\n\n logging.info(\"--> nccl is installed.\")\n except ImportError:\n logging.warning(\n \"--> it seems that nccl is not installed. multi-gpu is not enabled.\"\n )\n logging.warning(\n \"--> if you want to use multi-gpu, please install it and then re-setup.\"\n )\n try:\n assert torch.cuda.device_count() > 1\n logging.info(\n f\"--> multi-gpu is available (#gpus={torch.cuda.device_count()}).\"\n )\n except AssertionError:\n logging.warning(\"--> it seems that only single gpu is available.\")\n logging.warning(\"--> maybe your machine has only one gpu.\")\n logging.info(\"cuda availableness check done.\")\n\n logging.info(\"installation check is done.\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "path": "tools/check_install.py"}]} | 2,764 | 194 |
gh_patches_debug_20006 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1104 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create and populate folder for static discovery documents
`googleapiclient` downloads the discovery docs for a Google API service at runtime and then constructs a client. Issue #1063 captures the need to move to a static package where the discovery docs are not fetched at run time. An incremental step towards satisfying #1063 would be to create a folder with static Google API discovery documents in this repo. The [discoveries folder](https://github.com/googleapis/discovery-artifact-manager/tree/master/discoveries) in the googleapis/discovery-artifact-manager repo has an up to date copy of the discovery documents. One approach would be to modify [synth.py](https://github.com/googleapis/google-api-python-client/blob/master/synth.py) to copy the discovery documents from discovery-artifact-manager similar to how it is done [here](https://github.com/googleapis/python-api-common-protos/blob/master/synth.py#L38).
</issue>
<code>
[start of synth.py]
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import synthtool as s
16 from synthtool import gcp
17
18 common = gcp.CommonTemplates()
19
20 # ----------------------------------------------------------------------------
21 # Add templated files
22 # ----------------------------------------------------------------------------
23 templated_files = common.py_library()
24
25 # Copy kokoro configs.
26 # Docs are excluded as repo docs cannot currently be generated using sphinx.
27 s.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])
28
29 # Also move issue templates
30 s.move(templated_files / '.github')
31
32 s.shell.run(["nox", "-s", "docs"], hide_output=False)
[end of synth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synth.py b/synth.py
--- a/synth.py
+++ b/synth.py
@@ -14,6 +14,9 @@
import synthtool as s
from synthtool import gcp
+from synthtool.sources import git
+
+DISCOVERY_ARTIFACT_MANAGER_REPO = "googleapis/discovery-artifact-manager"
common = gcp.CommonTemplates()
@@ -29,4 +32,20 @@
# Also move issue templates
s.move(templated_files / '.github')
-s.shell.run(["nox", "-s", "docs"], hide_output=False)
\ No newline at end of file
+# ----------------------------------------------------------------------------
+# Copy discoveries folder from discovery-artifact-manager repo
+# ----------------------------------------------------------------------------
+
+discovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO)
+discovery_artifacts = git.clone(discovery_artifact_manager_url) / "discoveries"
+
+excludes = [
+ "**/BUILD.bazel",
+]
+s.copy(discovery_artifacts,
+ "googleapiclient/discovery_cache/documents", excludes=excludes)
+
+# ----------------------------------------------------------------------------
+# Generate docs
+# ----------------------------------------------------------------------------
+s.shell.run(["nox", "-s", "docs"], hide_output=False)
| {"golden_diff": "diff --git a/synth.py b/synth.py\n--- a/synth.py\n+++ b/synth.py\n@@ -14,6 +14,9 @@\n \n import synthtool as s\n from synthtool import gcp\n+from synthtool.sources import git\n+\n+DISCOVERY_ARTIFACT_MANAGER_REPO = \"googleapis/discovery-artifact-manager\"\n \n common = gcp.CommonTemplates()\n \n@@ -29,4 +32,20 @@\n # Also move issue templates\n s.move(templated_files / '.github')\n \n-s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n\\ No newline at end of file\n+# ----------------------------------------------------------------------------\n+# Copy discoveries folder from discovery-artifact-manager repo\n+# ----------------------------------------------------------------------------\n+\n+discovery_artifact_manager_url = git.make_repo_clone_url(DISCOVERY_ARTIFACT_MANAGER_REPO)\n+discovery_artifacts = git.clone(discovery_artifact_manager_url) / \"discoveries\"\n+\n+excludes = [\n+ \"**/BUILD.bazel\",\n+]\n+s.copy(discovery_artifacts,\n+ \"googleapiclient/discovery_cache/documents\", excludes=excludes)\n+\n+# ----------------------------------------------------------------------------\n+# Generate docs\n+# ----------------------------------------------------------------------------\n+s.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)\n", "issue": "Create and populate folder for static discovery documents \n`googleapiclient` downloads the discovery docs for a Google API service at runtime and then constructs a client. Issue #1063 captures the need to move to a static package where the discovery docs are not fetched at run time. An incremental step towards satisfying #1063 would be to create a folder with static Google API discovery documents in this repo. The [discoveries folder](https://github.com/googleapis/discovery-artifact-manager/tree/master/discoveries) in the googleapis/discovery-artifact-manager repo has an up to date copy of the discovery documents. One approach would be to modify [synth.py](https://github.com/googleapis/google-api-python-client/blob/master/synth.py) to copy the discovery documents from discovery-artifact-manager similar to how it is done [here](https://github.com/googleapis/python-api-common-protos/blob/master/synth.py#L38).\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport synthtool as s\nfrom synthtool import gcp\n\ncommon = gcp.CommonTemplates()\n\n# ----------------------------------------------------------------------------\n# Add templated files\n# ----------------------------------------------------------------------------\ntemplated_files = common.py_library()\n\n# Copy kokoro configs.\n# Docs are excluded as repo docs cannot currently be generated using sphinx.\ns.move(templated_files / '.kokoro', excludes=['**/docs/*', 'publish-docs.sh'])\n\n# Also move issue templates\ns.move(templated_files / '.github')\n\ns.shell.run([\"nox\", \"-s\", \"docs\"], hide_output=False)", "path": "synth.py"}]} | 1,030 | 262 |
gh_patches_debug_37059 | rasdani/github-patches | git_diff | mantl__mantl-377 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use Consul DNS instead of .novalocal
Templatizing the hosts file to include `.novalocal` worked when we were only on OpenStack, but now that we have multiple providers we need a better way to do service discovery. Fortunately, we have Consul DNS, which will probably work.
- [ ] change Mesos to use DNS name
- [ ] change Marathon to use DNS name
</issue>
<code>
[start of plugins/inventory/terraform.py]
1 #!/usr/bin/env python
2 """\
3 Dynamic inventory for Terraform - finds all `.tfstate` files below the working
4 directory and generates an inventory based on them.
5 """
6 from __future__ import unicode_literals, print_function
7 import argparse
8 from collections import defaultdict
9 from functools import wraps
10 import json
11 import os
12 import re
13
14
15 def tfstates(root=None):
16 root = root or os.getcwd()
17 for dirpath, _, filenames in os.walk(root):
18 for name in filenames:
19 if os.path.splitext(name)[-1] == '.tfstate':
20 yield os.path.join(dirpath, name)
21
22
23 def iterresources(filenames):
24 for filename in filenames:
25 with open(filename, 'r') as json_file:
26 state = json.load(json_file)
27 for module in state['modules']:
28 for key, resource in module['resources'].items():
29 yield key, resource
30
31 ## READ RESOURCES
32 PARSERS = {}
33
34
35 def _clean_dc(dcname):
36 # Consul DCs are strictly alphanumeric with underscores and hyphens -
37 # ensure that the consul_dc attribute meets these requirements.
38 return re.sub('[^\w_\-]', '-', dcname)
39
40
41 def iterhosts(resources):
42 '''yield host tuples of (name, attributes, groups)'''
43 for key, resource in resources:
44 resource_type, name = key.split('.', 1)
45 try:
46 parser = PARSERS[resource_type]
47 except KeyError:
48 continue
49
50 yield parser(resource)
51
52
53 def parses(prefix):
54 def inner(func):
55 PARSERS[prefix] = func
56 return func
57
58 return inner
59
60
61 def calculate_mi_vars(func):
62 """calculate microservices-infrastructure vars"""
63
64 @wraps(func)
65 def inner(*args, **kwargs):
66 name, attrs, groups = func(*args, **kwargs)
67
68 # attrs
69 if attrs['role'] == 'control':
70 attrs['consul_is_server'] = True
71 elif attrs['role'] == 'worker':
72 attrs['consul_is_server'] = False
73
74 # groups
75 if attrs.get('publicly_routable', False):
76 groups.append('publicly_routable')
77
78 return name, attrs, groups
79
80 return inner
81
82
83 def _parse_prefix(source, prefix, sep='.'):
84 for compkey, value in source.items():
85 try:
86 curprefix, rest = compkey.split(sep, 1)
87 except ValueError:
88 continue
89
90 if curprefix != prefix or rest == '#':
91 continue
92
93 yield rest, value
94
95
96 def parse_attr_list(source, prefix, sep='.'):
97 size_key = '%s%s#' % (prefix, sep)
98 try:
99 size = int(source[size_key])
100 except KeyError:
101 return []
102
103 attrs = [{} for _ in range(size)]
104 for compkey, value in _parse_prefix(source, prefix, sep):
105 nth, key = compkey.split(sep, 1)
106 attrs[int(nth)][key] = value
107
108 return attrs
109
110
111 def parse_dict(source, prefix, sep='.'):
112 return dict(_parse_prefix(source, prefix, sep))
113
114
115 def parse_list(source, prefix, sep='.'):
116 return [value for _, value in _parse_prefix(source, prefix, sep)]
117
118
119 @parses('openstack_compute_instance_v2')
120 @calculate_mi_vars
121 def openstack_host(resource, tfvars=None):
122 raw_attrs = resource['primary']['attributes']
123 name = raw_attrs['name']
124 groups = []
125
126 attrs = {
127 'access_ip_v4': raw_attrs['access_ip_v4'],
128 'access_ip_v6': raw_attrs['access_ip_v6'],
129 'flavor': parse_dict(raw_attrs, 'flavor',
130 sep='_'),
131 'id': raw_attrs['id'],
132 'image': parse_dict(raw_attrs, 'image',
133 sep='_'),
134 'key_pair': raw_attrs['key_pair'],
135 'metadata': parse_dict(raw_attrs, 'metadata'),
136 'network': parse_attr_list(raw_attrs, 'network'),
137 'region': raw_attrs['region'],
138 'security_groups': parse_list(raw_attrs, 'security_groups'),
139 #ansible
140 'ansible_ssh_port': 22,
141 'ansible_ssh_user': 'centos',
142 }
143
144 try:
145 attrs.update({
146 'ansible_ssh_host': raw_attrs['access_ip_v4'],
147 'publicly_routable': True,
148 })
149 except (KeyError, ValueError):
150 attrs.update({'ansible_ssh_host': '', 'publicly_routable': False, })
151
152 # attrs specific to microservices-infrastructure
153 attrs.update({
154 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
155 'role': attrs['metadata'].get('role', 'none')
156 })
157
158 # add groups based on attrs
159 groups.append('os_image=' + attrs['image']['name'])
160 groups.append('os_flavor=' + attrs['flavor']['name'])
161 groups.extend('os_metadata_%s=%s' % item
162 for item in attrs['metadata'].items())
163 groups.append('os_region=' + attrs['region'])
164
165 # groups specific to microservices-infrastructure
166 groups.append('role=' + attrs['metadata'].get('role', 'none'))
167 groups.append('dc=' + attrs['consul_dc'])
168
169 return name, attrs, groups
170
171
172 @parses('google_compute_instance')
173 @calculate_mi_vars
174 def gce_host(resource, tfvars=None):
175 name = resource['primary']['id']
176 raw_attrs = resource['primary']['attributes']
177 groups = []
178
179 # network interfaces
180 interfaces = parse_attr_list(raw_attrs, 'network_interface')
181 for interface in interfaces:
182 interface['access_config'] = parse_attr_list(interface,
183 'access_config')
184 for key in interface.keys():
185 if '.' in key:
186 del interface[key]
187
188 # general attrs
189 attrs = {
190 'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
191 'disks': parse_attr_list(raw_attrs, 'disk'),
192 'machine_type': raw_attrs['machine_type'],
193 'metadata': parse_dict(raw_attrs, 'metadata'),
194 'network': parse_attr_list(raw_attrs, 'network'),
195 'network_interface': interfaces,
196 'self_link': raw_attrs['self_link'],
197 'service_account': parse_attr_list(raw_attrs, 'service_account'),
198 'tags': parse_list(raw_attrs, 'tags'),
199 'zone': raw_attrs['zone'],
200 # ansible
201 'ansible_ssh_port': 22,
202 'ansible_ssh_user': 'deploy',
203 }
204
205 # attrs specific to microservices-infrastructure
206 attrs.update({
207 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['zone'])),
208 'role': attrs['metadata'].get('role', 'none')
209 })
210
211 try:
212 attrs.update({
213 'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],
214 'publicly_routable': True,
215 })
216 except (KeyError, ValueError):
217 attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
218
219 # add groups based on attrs
220 groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
221 groups.append('gce_machine_type=' + attrs['machine_type'])
222 groups.extend('gce_metadata_%s=%s' % (key, value)
223 for (key, value) in attrs['metadata'].items()
224 if key not in set(['sshKeys']))
225 groups.extend('gce_tag=' + tag for tag in attrs['tags'])
226 groups.append('gce_zone=' + attrs['zone'])
227
228 if attrs['can_ip_forward']:
229 groups.append('gce_ip_forward')
230 if attrs['publicly_routable']:
231 groups.append('gce_publicly_routable')
232
233 # groups specific to microservices-infrastructure
234 groups.append('role=' + attrs['metadata'].get('role', 'none'))
235 groups.append('dc=' + attrs['consul_dc'])
236
237 return name, attrs, groups
238
239
240 ## QUERY TYPES
241 def query_host(hosts, target):
242 for name, attrs, _ in hosts:
243 if name == target:
244 return attrs
245
246 return {}
247
248
249 def query_list(hosts):
250 groups = defaultdict(dict)
251 meta = {}
252
253 for name, attrs, hostgroups in hosts:
254 for group in set(hostgroups):
255 groups[group].setdefault('hosts', [])
256 groups[group]['hosts'].append(name)
257
258 meta[name] = attrs
259
260 groups['_meta'] = {'hostvars': meta}
261 return groups
262
263
264 def main():
265
266 parser = argparse.ArgumentParser(__file__, __doc__)
267 modes = parser.add_mutually_exclusive_group(required=True)
268 modes.add_argument('--list',
269 action='store_true',
270 help='list all variables')
271 modes.add_argument('--host', help='list variables for a single host')
272 parser.add_argument('--pretty',
273 action='store_true',
274 help='pretty-print output JSON')
275 parser.add_argument('--nometa',
276 action='store_true',
277 help='with --list, exclude hostvars')
278
279 args = parser.parse_args()
280
281 hosts = iterhosts(iterresources(tfstates()))
282 if args.list:
283 output = query_list(hosts)
284 if args.nometa:
285 del output['_meta']
286 else:
287 output = query_host(hosts, args.host)
288
289 print(json.dumps(output, indent=4 if args.pretty else None))
290 parser.exit()
291
292
293 if __name__ == '__main__':
294 main()
295
[end of plugins/inventory/terraform.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/inventory/terraform.py b/plugins/inventory/terraform.py
--- a/plugins/inventory/terraform.py
+++ b/plugins/inventory/terraform.py
@@ -116,6 +116,17 @@
return [value for _, value in _parse_prefix(source, prefix, sep)]
+def parse_bool(string_form):
+ token = string_form.lower()[0]
+
+ if token == 't':
+ return True
+ elif token == 'f':
+ return False
+ else:
+ raise ValueError('could not convert %r to a bool' % string_form)
+
+
@parses('openstack_compute_instance_v2')
@calculate_mi_vars
def openstack_host(resource, tfvars=None):
@@ -169,6 +180,65 @@
return name, attrs, groups
+@parses('aws_instance')
+@calculate_mi_vars
+def aws_host(resource, tfvars=None):
+ name = resource['primary']['attributes']['tags.Name']
+ raw_attrs = resource['primary']['attributes']
+
+ groups = []
+
+ attrs = {
+ 'ami': raw_attrs['ami'],
+ 'availability_zone': raw_attrs['availability_zone'],
+ 'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
+ 'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
+ 'ephemeral_block_device': parse_attr_list(raw_attrs,
+ 'ephemeral_block_device'),
+ 'id': raw_attrs['id'],
+ 'key_name': raw_attrs['key_name'],
+ 'private': parse_dict(raw_attrs, 'private',
+ sep='_'),
+ 'public': parse_dict(raw_attrs, 'public',
+ sep='_'),
+ 'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
+ 'security_groups': parse_attr_list(raw_attrs, 'security_groups'),
+ 'subnet': parse_dict(raw_attrs, 'subnet',
+ sep='_'),
+ 'tags': parse_dict(raw_attrs, 'tags'),
+ 'tenancy': raw_attrs['tenancy'],
+ 'vpc_security_group_ids': parse_list(raw_attrs,
+ 'vpc_security_group_ids'),
+ # ansible-specific
+ 'ansible_ssh_port': 22,
+ 'ansible_ssh_user': raw_attrs['tags.sshUser'],
+ 'ansible_ssh_host': raw_attrs['public_ip'],
+ }
+
+ # attrs specific to microservices-infrastructure
+ attrs.update({
+ 'consul_dc': attrs['tags'].get('dc'),
+ 'role': attrs['tags'].get('role', 'none')
+ })
+
+ # groups specific to microservices-infrastructure
+ groups.extend(['aws_ami=' + attrs['ami'],
+ 'aws_az=' + attrs['availability_zone'],
+ 'aws_key_name=' + attrs['key_name'],
+ 'aws_tenancy=' + attrs['tenancy']])
+ groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
+ groups.extend('aws_vpc_security_group=' + group
+ for group in attrs['vpc_security_group_ids'])
+ groups.extend('aws_subnet_%s=%s' % subnet
+ for subnet in attrs['subnet'].items())
+
+ # groups specific to microservices-infrastructure
+ groups.append('role=' + attrs['role'])
+ groups.append('dc=' + attrs['consul_dc'])
+
+ return name, attrs, groups
+
+
@parses('google_compute_instance')
@calculate_mi_vars
def gce_host(resource, tfvars=None):
| {"golden_diff": "diff --git a/plugins/inventory/terraform.py b/plugins/inventory/terraform.py\n--- a/plugins/inventory/terraform.py\n+++ b/plugins/inventory/terraform.py\n@@ -116,6 +116,17 @@\n return [value for _, value in _parse_prefix(source, prefix, sep)]\n \n \n+def parse_bool(string_form):\n+ token = string_form.lower()[0]\n+\n+ if token == 't':\n+ return True\n+ elif token == 'f':\n+ return False\n+ else:\n+ raise ValueError('could not convert %r to a bool' % string_form)\n+\n+\n @parses('openstack_compute_instance_v2')\n @calculate_mi_vars\n def openstack_host(resource, tfvars=None):\n@@ -169,6 +180,65 @@\n return name, attrs, groups\n \n \n+@parses('aws_instance')\n+@calculate_mi_vars\n+def aws_host(resource, tfvars=None):\n+ name = resource['primary']['attributes']['tags.Name']\n+ raw_attrs = resource['primary']['attributes']\n+\n+ groups = []\n+\n+ attrs = {\n+ 'ami': raw_attrs['ami'],\n+ 'availability_zone': raw_attrs['availability_zone'],\n+ 'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),\n+ 'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),\n+ 'ephemeral_block_device': parse_attr_list(raw_attrs,\n+ 'ephemeral_block_device'),\n+ 'id': raw_attrs['id'],\n+ 'key_name': raw_attrs['key_name'],\n+ 'private': parse_dict(raw_attrs, 'private',\n+ sep='_'),\n+ 'public': parse_dict(raw_attrs, 'public',\n+ sep='_'),\n+ 'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),\n+ 'security_groups': parse_attr_list(raw_attrs, 'security_groups'),\n+ 'subnet': parse_dict(raw_attrs, 'subnet',\n+ sep='_'),\n+ 'tags': parse_dict(raw_attrs, 'tags'),\n+ 'tenancy': raw_attrs['tenancy'],\n+ 'vpc_security_group_ids': parse_list(raw_attrs,\n+ 'vpc_security_group_ids'),\n+ # ansible-specific\n+ 'ansible_ssh_port': 22,\n+ 'ansible_ssh_user': raw_attrs['tags.sshUser'],\n+ 'ansible_ssh_host': raw_attrs['public_ip'],\n+ }\n+\n+ # attrs specific to microservices-infrastructure\n+ attrs.update({\n+ 'consul_dc': attrs['tags'].get('dc'),\n+ 'role': attrs['tags'].get('role', 'none')\n+ })\n+\n+ # groups specific to microservices-infrastructure\n+ groups.extend(['aws_ami=' + attrs['ami'],\n+ 'aws_az=' + attrs['availability_zone'],\n+ 'aws_key_name=' + attrs['key_name'],\n+ 'aws_tenancy=' + attrs['tenancy']])\n+ groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())\n+ groups.extend('aws_vpc_security_group=' + group\n+ for group in attrs['vpc_security_group_ids'])\n+ groups.extend('aws_subnet_%s=%s' % subnet\n+ for subnet in attrs['subnet'].items())\n+\n+ # groups specific to microservices-infrastructure\n+ groups.append('role=' + attrs['role'])\n+ groups.append('dc=' + attrs['consul_dc'])\n+\n+ return name, attrs, groups\n+\n+\n @parses('google_compute_instance')\n @calculate_mi_vars\n def gce_host(resource, tfvars=None):\n", "issue": "Use Consul DNS instead of .novalocal\nTemplatizing the hosts file to include `.novalocal` worked when we were only on OpenStack, but now that we have multiple providers we need a better way to do service discovery. Fortunately, we have Consul DNS, which will probably work.\n- [ ] change Mesos to use DNS name\n- [ ] change Marathon to use DNS name\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\\\nDynamic inventory for Terraform - finds all `.tfstate` files below the working\ndirectory and generates an inventory based on them.\n\"\"\"\nfrom __future__ import unicode_literals, print_function\nimport argparse\nfrom collections import defaultdict\nfrom functools import wraps\nimport json\nimport os\nimport re\n\n\ndef tfstates(root=None):\n root = root or os.getcwd()\n for dirpath, _, filenames in os.walk(root):\n for name in filenames:\n if os.path.splitext(name)[-1] == '.tfstate':\n yield os.path.join(dirpath, name)\n\n\ndef iterresources(filenames):\n for filename in filenames:\n with open(filename, 'r') as json_file:\n state = json.load(json_file)\n for module in state['modules']:\n for key, resource in module['resources'].items():\n yield key, resource\n\n## READ RESOURCES\nPARSERS = {}\n\n\ndef _clean_dc(dcname):\n # Consul DCs are strictly alphanumeric with underscores and hyphens -\n # ensure that the consul_dc attribute meets these requirements.\n return re.sub('[^\\w_\\-]', '-', dcname)\n\n\ndef iterhosts(resources):\n '''yield host tuples of (name, attributes, groups)'''\n for key, resource in resources:\n resource_type, name = key.split('.', 1)\n try:\n parser = PARSERS[resource_type]\n except KeyError:\n continue\n\n yield parser(resource)\n\n\ndef parses(prefix):\n def inner(func):\n PARSERS[prefix] = func\n return func\n\n return inner\n\n\ndef calculate_mi_vars(func):\n \"\"\"calculate microservices-infrastructure vars\"\"\"\n\n @wraps(func)\n def inner(*args, **kwargs):\n name, attrs, groups = func(*args, **kwargs)\n\n # attrs\n if attrs['role'] == 'control':\n attrs['consul_is_server'] = True\n elif attrs['role'] == 'worker':\n attrs['consul_is_server'] = False\n\n # groups\n if attrs.get('publicly_routable', False):\n groups.append('publicly_routable')\n\n return name, attrs, groups\n\n return inner\n\n\ndef _parse_prefix(source, prefix, sep='.'):\n for compkey, value in source.items():\n try:\n curprefix, rest = compkey.split(sep, 1)\n except ValueError:\n continue\n\n if curprefix != prefix or rest == '#':\n continue\n\n yield rest, value\n\n\ndef parse_attr_list(source, prefix, sep='.'):\n size_key = '%s%s#' % (prefix, sep)\n try:\n size = int(source[size_key])\n except KeyError:\n return []\n\n attrs = [{} for _ in range(size)]\n for compkey, value in _parse_prefix(source, prefix, sep):\n nth, key = compkey.split(sep, 1)\n attrs[int(nth)][key] = value\n\n return attrs\n\n\ndef parse_dict(source, prefix, sep='.'):\n return dict(_parse_prefix(source, prefix, sep))\n\n\ndef parse_list(source, prefix, sep='.'):\n return [value for _, value in _parse_prefix(source, prefix, sep)]\n\n\n@parses('openstack_compute_instance_v2')\n@calculate_mi_vars\ndef openstack_host(resource, tfvars=None):\n raw_attrs = resource['primary']['attributes']\n name = raw_attrs['name']\n groups = []\n\n attrs = {\n 'access_ip_v4': raw_attrs['access_ip_v4'],\n 'access_ip_v6': raw_attrs['access_ip_v6'],\n 'flavor': parse_dict(raw_attrs, 'flavor',\n sep='_'),\n 'id': raw_attrs['id'],\n 'image': parse_dict(raw_attrs, 'image',\n sep='_'),\n 'key_pair': raw_attrs['key_pair'],\n 'metadata': parse_dict(raw_attrs, 'metadata'),\n 'network': parse_attr_list(raw_attrs, 'network'),\n 'region': raw_attrs['region'],\n 'security_groups': parse_list(raw_attrs, 'security_groups'),\n #ansible\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': 'centos',\n }\n\n try:\n attrs.update({\n 'ansible_ssh_host': raw_attrs['access_ip_v4'],\n 'publicly_routable': True,\n })\n except (KeyError, ValueError):\n attrs.update({'ansible_ssh_host': '', 'publicly_routable': False, })\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),\n 'role': attrs['metadata'].get('role', 'none')\n })\n\n # add groups based on attrs\n groups.append('os_image=' + attrs['image']['name'])\n groups.append('os_flavor=' + attrs['flavor']['name'])\n groups.extend('os_metadata_%s=%s' % item\n for item in attrs['metadata'].items())\n groups.append('os_region=' + attrs['region'])\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['metadata'].get('role', 'none'))\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n@parses('google_compute_instance')\n@calculate_mi_vars\ndef gce_host(resource, tfvars=None):\n name = resource['primary']['id']\n raw_attrs = resource['primary']['attributes']\n groups = []\n\n # network interfaces\n interfaces = parse_attr_list(raw_attrs, 'network_interface')\n for interface in interfaces:\n interface['access_config'] = parse_attr_list(interface,\n 'access_config')\n for key in interface.keys():\n if '.' in key:\n del interface[key]\n\n # general attrs\n attrs = {\n 'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',\n 'disks': parse_attr_list(raw_attrs, 'disk'),\n 'machine_type': raw_attrs['machine_type'],\n 'metadata': parse_dict(raw_attrs, 'metadata'),\n 'network': parse_attr_list(raw_attrs, 'network'),\n 'network_interface': interfaces,\n 'self_link': raw_attrs['self_link'],\n 'service_account': parse_attr_list(raw_attrs, 'service_account'),\n 'tags': parse_list(raw_attrs, 'tags'),\n 'zone': raw_attrs['zone'],\n # ansible\n 'ansible_ssh_port': 22,\n 'ansible_ssh_user': 'deploy',\n }\n\n # attrs specific to microservices-infrastructure\n attrs.update({\n 'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['zone'])),\n 'role': attrs['metadata'].get('role', 'none')\n })\n\n try:\n attrs.update({\n 'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],\n 'publicly_routable': True,\n })\n except (KeyError, ValueError):\n attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})\n\n # add groups based on attrs\n groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])\n groups.append('gce_machine_type=' + attrs['machine_type'])\n groups.extend('gce_metadata_%s=%s' % (key, value)\n for (key, value) in attrs['metadata'].items()\n if key not in set(['sshKeys']))\n groups.extend('gce_tag=' + tag for tag in attrs['tags'])\n groups.append('gce_zone=' + attrs['zone'])\n\n if attrs['can_ip_forward']:\n groups.append('gce_ip_forward')\n if attrs['publicly_routable']:\n groups.append('gce_publicly_routable')\n\n # groups specific to microservices-infrastructure\n groups.append('role=' + attrs['metadata'].get('role', 'none'))\n groups.append('dc=' + attrs['consul_dc'])\n\n return name, attrs, groups\n\n\n## QUERY TYPES\ndef query_host(hosts, target):\n for name, attrs, _ in hosts:\n if name == target:\n return attrs\n\n return {}\n\n\ndef query_list(hosts):\n groups = defaultdict(dict)\n meta = {}\n\n for name, attrs, hostgroups in hosts:\n for group in set(hostgroups):\n groups[group].setdefault('hosts', [])\n groups[group]['hosts'].append(name)\n\n meta[name] = attrs\n\n groups['_meta'] = {'hostvars': meta}\n return groups\n\n\ndef main():\n\n parser = argparse.ArgumentParser(__file__, __doc__)\n modes = parser.add_mutually_exclusive_group(required=True)\n modes.add_argument('--list',\n action='store_true',\n help='list all variables')\n modes.add_argument('--host', help='list variables for a single host')\n parser.add_argument('--pretty',\n action='store_true',\n help='pretty-print output JSON')\n parser.add_argument('--nometa',\n action='store_true',\n help='with --list, exclude hostvars')\n\n args = parser.parse_args()\n\n hosts = iterhosts(iterresources(tfstates()))\n if args.list:\n output = query_list(hosts)\n if args.nometa:\n del output['_meta']\n else:\n output = query_host(hosts, args.host)\n\n print(json.dumps(output, indent=4 if args.pretty else None))\n parser.exit()\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/inventory/terraform.py"}]} | 3,473 | 801 |
gh_patches_debug_24701 | rasdani/github-patches | git_diff | ietf-tools__datatracker-6328 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Migrate DocHistory objects for RFCs
### Describe the issue
`DocHistory` objects that pertain to the RFC rather than the I-D need to be migrated to hang off the new `type_id="rfc"` `Document`. The `name` and `rfc_number` fields should be updated to match.
### Code of Conduct
- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of ietf/doc/migrations/0017_move_dochistory.py]
1 # Generated by Django 4.2.5 on 2023-09-11 17:52
2
3 from django.db import migrations
4
5 from django.db.models import Subquery, OuterRef, F
6
7
8 def forward(apps, schema_editor):
9 DocHistory = apps.get_model("doc", "DocHistory")
10 RelatedDocument = apps.get_model("doc", "RelatedDocument")
11 DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc")
12 DocHistory.objects.filter(
13 type_id="draft", doc__type_id="draft", name__startswith="rfc"
14 ).annotate(
15 rfc_id=Subquery(
16 RelatedDocument.objects.filter(
17 source_id=OuterRef("doc_id"), relationship_id="became_rfc"
18 ).values_list("target_id", flat=True)[:1]
19 )
20 ).update(
21 doc_id=F("rfc_id"), type_id="rfc"
22 )
23 assert not DocHistory.objects.filter(
24 name__startswith="rfc", type_id="draft"
25 ).exists()
26
27
28 class Migration(migrations.Migration):
29 dependencies = [
30 ("doc", "0016_delete_docalias"),
31 ]
32
33 # There is no going back
34 operations = [migrations.RunPython(forward)]
35
[end of ietf/doc/migrations/0017_move_dochistory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/doc/migrations/0017_move_dochistory.py b/ietf/doc/migrations/0017_move_dochistory.py
--- a/ietf/doc/migrations/0017_move_dochistory.py
+++ b/ietf/doc/migrations/0017_move_dochistory.py
@@ -8,6 +8,7 @@
def forward(apps, schema_editor):
DocHistory = apps.get_model("doc", "DocHistory")
RelatedDocument = apps.get_model("doc", "RelatedDocument")
+ Document = apps.get_model("doc", "Document")
DocHistory.objects.filter(type_id="draft", doc__type_id="rfc").update(type_id="rfc")
DocHistory.objects.filter(
type_id="draft", doc__type_id="draft", name__startswith="rfc"
@@ -20,9 +21,19 @@
).update(
doc_id=F("rfc_id"), type_id="rfc"
)
+ DocHistory.objects.filter(type_id="rfc").annotate(
+ rfcno=Subquery(
+ Document.objects.filter(pk=OuterRef("doc_id")).values_list(
+ "rfc_number", flat=True
+ )[:1]
+ )
+ ).update(rfc_number=F("rfcno"))
assert not DocHistory.objects.filter(
name__startswith="rfc", type_id="draft"
).exists()
+ assert not DocHistory.objects.filter(
+ type_id="rfc", rfc_number__isnull=True
+ ).exists()
class Migration(migrations.Migration):
| {"golden_diff": "diff --git a/ietf/doc/migrations/0017_move_dochistory.py b/ietf/doc/migrations/0017_move_dochistory.py\n--- a/ietf/doc/migrations/0017_move_dochistory.py\n+++ b/ietf/doc/migrations/0017_move_dochistory.py\n@@ -8,6 +8,7 @@\n def forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n+ Document = apps.get_model(\"doc\", \"Document\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n@@ -20,9 +21,19 @@\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n+ DocHistory.objects.filter(type_id=\"rfc\").annotate(\n+ rfcno=Subquery(\n+ Document.objects.filter(pk=OuterRef(\"doc_id\")).values_list(\n+ \"rfc_number\", flat=True\n+ )[:1]\n+ )\n+ ).update(rfc_number=F(\"rfcno\"))\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n+ assert not DocHistory.objects.filter(\n+ type_id=\"rfc\", rfc_number__isnull=True\n+ ).exists()\n \n \n class Migration(migrations.Migration):\n", "issue": "Migrate DocHistory objects for RFCs\n### Describe the issue\n\n`DocHistory` objects that pertain to the RFC rather than the I-D need to be migrated to hang off the new `type_id=\"rfc\"` `Document`. The `name` and `rfc_number` fields should be updated to match.\n\n### Code of Conduct\n\n- [X] I agree to follow the [IETF's Code of Conduct](https://github.com/ietf-tools/.github/blob/main/CODE_OF_CONDUCT.md)\n", "before_files": [{"content": "# Generated by Django 4.2.5 on 2023-09-11 17:52\n\nfrom django.db import migrations\n\nfrom django.db.models import Subquery, OuterRef, F\n\n\ndef forward(apps, schema_editor):\n DocHistory = apps.get_model(\"doc\", \"DocHistory\")\n RelatedDocument = apps.get_model(\"doc\", \"RelatedDocument\")\n DocHistory.objects.filter(type_id=\"draft\", doc__type_id=\"rfc\").update(type_id=\"rfc\")\n DocHistory.objects.filter(\n type_id=\"draft\", doc__type_id=\"draft\", name__startswith=\"rfc\"\n ).annotate(\n rfc_id=Subquery(\n RelatedDocument.objects.filter(\n source_id=OuterRef(\"doc_id\"), relationship_id=\"became_rfc\"\n ).values_list(\"target_id\", flat=True)[:1]\n )\n ).update(\n doc_id=F(\"rfc_id\"), type_id=\"rfc\"\n )\n assert not DocHistory.objects.filter(\n name__startswith=\"rfc\", type_id=\"draft\"\n ).exists()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"doc\", \"0016_delete_docalias\"),\n ]\n\n # There is no going back\n operations = [migrations.RunPython(forward)]\n", "path": "ietf/doc/migrations/0017_move_dochistory.py"}]} | 1,009 | 350 |
gh_patches_debug_11776 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-273 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
It's possible to create a FollowRequest for a Follow that already exists
Send a follow request.
User accepts.
Send a second follow request (eg. using an old tab)
Because requests and follows are stored in separate tables it's not possible to enforce this with a constraint. Pre-checking would leave a race condition, so I guess we either need to mess with table locks or create the follow request, and then delete it if its unneeded. Can we enforce this in the pre save of the model, perhaps?
</issue>
<code>
[start of bookwyrm/models/relationship.py]
1 ''' defines relationships between users '''
2 from django.db import models
3
4 from bookwyrm import activitypub
5 from .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel
6
7
8 class UserRelationship(ActivitypubMixin, BookWyrmModel):
9 ''' many-to-many through table for followers '''
10 user_subject = models.ForeignKey(
11 'User',
12 on_delete=models.PROTECT,
13 related_name='%(class)s_user_subject'
14 )
15 user_object = models.ForeignKey(
16 'User',
17 on_delete=models.PROTECT,
18 related_name='%(class)s_user_object'
19 )
20
21 class Meta:
22 ''' relationships should be unique '''
23 abstract = True
24 constraints = [
25 models.UniqueConstraint(
26 fields=['user_subject', 'user_object'],
27 name='%(class)s_unique'
28 ),
29 models.CheckConstraint(
30 check=~models.Q(user_subject=models.F('user_object')),
31 name='%(class)s_no_self'
32 )
33 ]
34
35 activity_mappings = [
36 ActivityMapping('id', 'remote_id'),
37 ActivityMapping('actor', 'user_subject'),
38 ActivityMapping('object', 'user_object'),
39 ]
40 activity_serializer = activitypub.Follow
41
42 def get_remote_id(self, status=None):
43 ''' use shelf identifier in remote_id '''
44 status = status or 'follows'
45 base_path = self.user_subject.remote_id
46 return '%s#%s/%d' % (base_path, status, self.id)
47
48
49 def to_accept_activity(self):
50 ''' generate an Accept for this follow request '''
51 return activitypub.Accept(
52 id=self.get_remote_id(status='accepts'),
53 actor=self.user_object.remote_id,
54 object=self.to_activity()
55 ).serialize()
56
57
58 def to_reject_activity(self):
59 ''' generate an Accept for this follow request '''
60 return activitypub.Reject(
61 id=self.get_remote_id(status='rejects'),
62 actor=self.user_object.remote_id,
63 object=self.to_activity()
64 ).serialize()
65
66
67 class UserFollows(UserRelationship):
68 ''' Following a user '''
69 status = 'follows'
70
71 @classmethod
72 def from_request(cls, follow_request):
73 ''' converts a follow request into a follow relationship '''
74 return cls(
75 user_subject=follow_request.user_subject,
76 user_object=follow_request.user_object,
77 remote_id=follow_request.remote_id,
78 )
79
80
81 class UserFollowRequest(UserRelationship):
82 ''' following a user requires manual or automatic confirmation '''
83 status = 'follow_request'
84
85
86 class UserBlocks(UserRelationship):
87 ''' prevent another user from following you and seeing your posts '''
88 # TODO: not implemented
89 status = 'blocks'
90
[end of bookwyrm/models/relationship.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py
--- a/bookwyrm/models/relationship.py
+++ b/bookwyrm/models/relationship.py
@@ -82,6 +82,17 @@
''' following a user requires manual or automatic confirmation '''
status = 'follow_request'
+ def save(self, *args, **kwargs):
+ ''' make sure the follow relationship doesn't already exist '''
+ try:
+ UserFollows.objects.get(
+ user_subject=self.user_subject,
+ user_object=self.user_object
+ )
+ return None
+ except UserFollows.DoesNotExist:
+ return super().save(*args, **kwargs)
+
class UserBlocks(UserRelationship):
''' prevent another user from following you and seeing your posts '''
| {"golden_diff": "diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py\n--- a/bookwyrm/models/relationship.py\n+++ b/bookwyrm/models/relationship.py\n@@ -82,6 +82,17 @@\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n \n+ def save(self, *args, **kwargs):\n+ ''' make sure the follow relationship doesn't already exist '''\n+ try:\n+ UserFollows.objects.get(\n+ user_subject=self.user_subject,\n+ user_object=self.user_object\n+ )\n+ return None\n+ except UserFollows.DoesNotExist:\n+ return super().save(*args, **kwargs)\n+\n \n class UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n", "issue": "It's possible to create a FollowRequest for a Follow that already exists\nSend a follow request.\r\nUser accepts.\r\nSend a second follow request (eg. using an old tab)\r\n\r\nBecause requests and follows are stored in separate tables it's not possible to enforce this with a constraint. Pre-checking would leave a race condition, so I guess we either need to mess with table locks or create the follow request, and then delete it if its unneeded. Can we enforce this in the pre save of the model, perhaps?\n", "before_files": [{"content": "''' defines relationships between users '''\nfrom django.db import models\n\nfrom bookwyrm import activitypub\nfrom .base_model import ActivitypubMixin, ActivityMapping, BookWyrmModel\n\n\nclass UserRelationship(ActivitypubMixin, BookWyrmModel):\n ''' many-to-many through table for followers '''\n user_subject = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_subject'\n )\n user_object = models.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_object'\n )\n\n class Meta:\n ''' relationships should be unique '''\n abstract = True\n constraints = [\n models.UniqueConstraint(\n fields=['user_subject', 'user_object'],\n name='%(class)s_unique'\n ),\n models.CheckConstraint(\n check=~models.Q(user_subject=models.F('user_object')),\n name='%(class)s_no_self'\n )\n ]\n\n activity_mappings = [\n ActivityMapping('id', 'remote_id'),\n ActivityMapping('actor', 'user_subject'),\n ActivityMapping('object', 'user_object'),\n ]\n activity_serializer = activitypub.Follow\n\n def get_remote_id(self, status=None):\n ''' use shelf identifier in remote_id '''\n status = status or 'follows'\n base_path = self.user_subject.remote_id\n return '%s#%s/%d' % (base_path, status, self.id)\n\n\n def to_accept_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Accept(\n id=self.get_remote_id(status='accepts'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\n def to_reject_activity(self):\n ''' generate an Accept for this follow request '''\n return activitypub.Reject(\n id=self.get_remote_id(status='rejects'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n\n\nclass UserFollows(UserRelationship):\n ''' Following a user '''\n status = 'follows'\n\n @classmethod\n def from_request(cls, follow_request):\n ''' converts a follow request into a follow relationship '''\n return cls(\n user_subject=follow_request.user_subject,\n user_object=follow_request.user_object,\n remote_id=follow_request.remote_id,\n )\n\n\nclass UserFollowRequest(UserRelationship):\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n\n\nclass UserBlocks(UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n # TODO: not implemented\n status = 'blocks'\n", "path": "bookwyrm/models/relationship.py"}]} | 1,371 | 174 |
gh_patches_debug_3212 | rasdani/github-patches | git_diff | scikit-image__scikit-image-6007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
installation issues with setuptools 58.5.x
## Description
I observed some failures on CI in a recent PR that appear to be related to use of setuptools >= 58.5.0. Example: https://github.com/scikit-image/scikit-image/runs/4104921276?check_suite_focus=true
I was able to reproduce the issue locally with 58.5.0, but it does not occur for 58.4.0. A temporary workaround would be to pin setuptools <58.5 on CI.
## Way to reproduce
build from source with setuptools >= 58.5
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2
3 import os
4 import sys
5 import tempfile
6 import shutil
7 import builtins
8 import textwrap
9
10 import setuptools
11 from distutils.command.build_py import build_py
12 from distutils.command.sdist import sdist
13 from distutils.errors import CompileError, LinkError
14 from numpy.distutils.command.build_ext import build_ext
15
16
17 DISTNAME = 'scikit-image'
18 DESCRIPTION = 'Image processing in Python'
19 MAINTAINER = 'Stefan van der Walt'
20 MAINTAINER_EMAIL = '[email protected]'
21 URL = 'https://scikit-image.org'
22 LICENSE = 'Modified BSD'
23 DOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'
24 PROJECT_URLS = {
25 "Bug Tracker": 'https://github.com/scikit-image/scikit-image/issues',
26 "Documentation": 'https://scikit-image.org/docs/stable/',
27 "Source Code": 'https://github.com/scikit-image/scikit-image'
28 }
29
30 with open('README.md', encoding='utf-8') as f:
31 LONG_DESCRIPTION = f.read()
32
33 if sys.version_info < (3, 6):
34
35 error = """Python {py} detected.
36
37 scikit-image 0.18+ supports only Python 3.7 and above.
38
39 For Python 2.7, please install the 0.14.x Long Term Support release using:
40
41 $ pip install 'scikit-image<0.15'
42 """.format(py='.'.join([str(v) for v in sys.version_info[:3]]))
43
44 sys.stderr.write(error + "\n")
45 sys.exit(1)
46
47 # This is a bit (!) hackish: we are setting a global variable so that the main
48 # skimage __init__ can detect if it is being loaded by the setup routine, to
49 # avoid attempting to load components that aren't built yet:
50 # the numpy distutils extensions that are used by scikit-image to recursively
51 # build the compiled extensions in sub-packages is based on the Python import
52 # machinery.
53 builtins.__SKIMAGE_SETUP__ = True
54
55
56 # Support for openmp
57
58 class ConditionalOpenMP(build_ext):
59
60 def can_compile_link(self, compile_flags, link_flags):
61
62 if "PYODIDE_PACKAGE_ABI" in os.environ:
63 # pyodide doesn't support OpenMP
64 return False
65
66 cc = self.compiler
67 fname = 'test.c'
68 cwd = os.getcwd()
69 tmpdir = tempfile.mkdtemp()
70
71 code = ("#include <omp.h>"
72 "int main(int argc, char** argv) { return(0); }")
73
74 if self.compiler.compiler_type == "msvc":
75 # make sure we build a DLL on Windows
76 local_link_flags = link_flags + ["/DLL"]
77 else:
78 local_link_flags = link_flags
79
80 try:
81 os.chdir(tmpdir)
82 with open(fname, 'wt') as fobj:
83 fobj.write(code)
84 try:
85 objects = cc.compile([fname],
86 extra_postargs=compile_flags)
87 except CompileError:
88 return False
89 try:
90 # Link shared lib rather then executable to avoid
91 # http://bugs.python.org/issue4431 with MSVC 10+
92 cc.link_shared_lib(objects, "testlib",
93 extra_postargs=local_link_flags)
94 except (LinkError, TypeError):
95 return False
96 finally:
97 os.chdir(cwd)
98 shutil.rmtree(tmpdir)
99 return True
100
101 def build_extensions(self):
102 """ Hook into extension building to set compiler flags """
103
104 compile_flags = list()
105 link_flags = list()
106
107 # check which compiler is being used
108 if self.compiler.compiler_type == "msvc":
109 # '-fopenmp' is called '/openmp' in msvc
110 compile_flags += ['/openmp']
111 else:
112 compile_flags += ['-fopenmp']
113 link_flags += ['-fopenmp']
114
115 if self.can_compile_link(compile_flags, link_flags):
116 for ext in self.extensions:
117 ext.extra_compile_args += compile_flags
118 ext.extra_link_args += link_flags
119
120 build_ext.build_extensions(self)
121
122
123 with open('skimage/__init__.py', encoding='utf-8') as fid:
124 for line in fid:
125 if line.startswith('__version__'):
126 VERSION = line.strip().split()[-1][1:-1]
127 break
128
129
130 def parse_requirements_file(filename):
131 with open(filename, encoding='utf-8') as fid:
132 requires = [line.strip() for line in fid.readlines() if line]
133
134 return requires
135
136
137 INSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')
138 # The `requirements/extras.txt` file is explicitely omitted because
139 # it contains requirements that do not have wheels uploaded to pip
140 # for the platforms we wish to support.
141 extras_require = {
142 dep: parse_requirements_file('requirements/' + dep + '.txt')
143 for dep in ['docs', 'optional', 'test', 'data']
144 }
145
146 # requirements for those browsing PyPI
147 REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
148 REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
149 REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
150
151
152 def configuration(parent_package='', top_path=None):
153 if os.path.exists('MANIFEST'):
154 os.remove('MANIFEST')
155
156 from numpy.distutils.misc_util import Configuration
157 config = Configuration(None, parent_package, top_path)
158
159 config.set_options(
160 ignore_setup_xxx_py=True,
161 assume_default_configuration=True,
162 delegate_options_to_subpackages=True,
163 quiet=True)
164
165 config.add_subpackage('skimage')
166
167 return config
168
169
170 if __name__ == "__main__":
171 cmdclass = {'build_py': build_py,
172 'sdist': sdist}
173 try:
174 # test if build dependencies exist.
175 # if not, some commands are still viable.
176 # note: this must be kept in sync with pyproject.toml
177 from numpy.distutils.core import setup
178 import cython
179 extra = {'configuration': configuration}
180 cmdclass['build_ext'] = ConditionalOpenMP
181 except ImportError:
182 if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
183 sys.argv[1] in ('--help-commands',
184 '--version',
185 'clean',
186 'egg_info',
187 'install_egg_info',
188 'rotate',
189 'sdist')):
190 # For these actions, compilation is not required.
191 #
192 # They are required to succeed for example when pip is
193 # used to install scikit-image when Numpy/cython are not
194 # yet present in the system.
195 from setuptools import setup
196 extra = {}
197 else:
198 print(textwrap.dedent("""
199 To install scikit-image from source, you will need NumPy
200 and Cython.
201 Install NumPy and Cython with your python package manager.
202 If you are using pip, the commands are:
203
204 pip install numpy cython
205
206 For more details, see:
207
208 https://scikit-image.org/docs/stable/install.html
209 """))
210 sys.exit(1)
211
212 setup(
213 name=DISTNAME,
214 description=DESCRIPTION,
215 long_description=LONG_DESCRIPTION,
216 long_description_content_type="text/markdown",
217 maintainer=MAINTAINER,
218 maintainer_email=MAINTAINER_EMAIL,
219 url=URL,
220 license=LICENSE,
221 download_url=DOWNLOAD_URL,
222 project_urls=PROJECT_URLS,
223 version=VERSION,
224 classifiers=[
225 'Development Status :: 4 - Beta',
226 'Environment :: Console',
227 'Intended Audience :: Developers',
228 'Intended Audience :: Science/Research',
229 'License :: OSI Approved :: BSD License',
230 'Programming Language :: C',
231 'Programming Language :: Python',
232 'Programming Language :: Python :: 3',
233 'Programming Language :: Python :: 3.7',
234 'Programming Language :: Python :: 3.8',
235 'Programming Language :: Python :: 3.9',
236 'Programming Language :: Python :: 3 :: Only',
237 'Topic :: Scientific/Engineering',
238 'Operating System :: Microsoft :: Windows',
239 'Operating System :: POSIX',
240 'Operating System :: Unix',
241 'Operating System :: MacOS',
242 ],
243 install_requires=INSTALL_REQUIRES,
244 requires=REQUIRES,
245 extras_require=extras_require,
246 python_requires='>=3.7',
247 packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),
248 include_package_data=True,
249 zip_safe=False, # the package can run out of an .egg file
250 entry_points={
251 'console_scripts': ['skivi = skimage.scripts.skivi:main'],
252 },
253 cmdclass=cmdclass,
254 **extra
255 )
256
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,8 +8,8 @@
import textwrap
import setuptools
-from distutils.command.build_py import build_py
-from distutils.command.sdist import sdist
+from setuptools.command.build_py import build_py
+from setuptools.command.sdist import sdist
from distutils.errors import CompileError, LinkError
from numpy.distutils.command.build_ext import build_ext
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,8 +8,8 @@\n import textwrap\n \n import setuptools\n-from distutils.command.build_py import build_py\n-from distutils.command.sdist import sdist\n+from setuptools.command.build_py import build_py\n+from setuptools.command.sdist import sdist\n from distutils.errors import CompileError, LinkError\n from numpy.distutils.command.build_ext import build_ext\n", "issue": "installation issues with setuptools 58.5.x\n## Description\r\n\r\nI observed some failures on CI in a recent PR that appear to be related to use of setuptools >= 58.5.0. Example: https://github.com/scikit-image/scikit-image/runs/4104921276?check_suite_focus=true\r\n\r\nI was able to reproduce the issue locally with 58.5.0, but it does not occur for 58.4.0. A temporary workaround would be to pin setuptools <58.5 on CI.\r\n\r\n\r\n\r\n## Way to reproduce\r\n\r\nbuild from source with setuptools >= 58.5\r\n\r\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\nimport os\nimport sys\nimport tempfile\nimport shutil\nimport builtins\nimport textwrap\n\nimport setuptools\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\nfrom distutils.errors import CompileError, LinkError\nfrom numpy.distutils.command.build_ext import build_ext\n\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing in Python'\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'https://scikit-image.org/docs/stable/install.html'\nPROJECT_URLS = {\n \"Bug Tracker\": 'https://github.com/scikit-image/scikit-image/issues',\n \"Documentation\": 'https://scikit-image.org/docs/stable/',\n \"Source Code\": 'https://github.com/scikit-image/scikit-image'\n}\n\nwith open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\nif sys.version_info < (3, 6):\n\n error = \"\"\"Python {py} detected.\n\nscikit-image 0.18+ supports only Python 3.7 and above.\n\nFor Python 2.7, please install the 0.14.x Long Term Support release using:\n\n $ pip install 'scikit-image<0.15'\n\"\"\".format(py='.'.join([str(v) for v in sys.version_info[:3]]))\n\n sys.stderr.write(error + \"\\n\")\n sys.exit(1)\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\n# Support for openmp\n\nclass ConditionalOpenMP(build_ext):\n\n def can_compile_link(self, compile_flags, link_flags):\n\n if \"PYODIDE_PACKAGE_ABI\" in os.environ:\n # pyodide doesn't support OpenMP\n return False\n\n cc = self.compiler\n fname = 'test.c'\n cwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n\n code = (\"#include <omp.h>\"\n \"int main(int argc, char** argv) { return(0); }\")\n\n if self.compiler.compiler_type == \"msvc\":\n # make sure we build a DLL on Windows\n local_link_flags = link_flags + [\"/DLL\"]\n else:\n local_link_flags = link_flags\n\n try:\n os.chdir(tmpdir)\n with open(fname, 'wt') as fobj:\n fobj.write(code)\n try:\n objects = cc.compile([fname],\n extra_postargs=compile_flags)\n except CompileError:\n return False\n try:\n # Link shared lib rather then executable to avoid\n # http://bugs.python.org/issue4431 with MSVC 10+\n cc.link_shared_lib(objects, \"testlib\",\n extra_postargs=local_link_flags)\n except (LinkError, TypeError):\n return False\n finally:\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n return True\n\n def build_extensions(self):\n \"\"\" Hook into extension building to set compiler flags \"\"\"\n\n compile_flags = list()\n link_flags = list()\n\n # check which compiler is being used\n if self.compiler.compiler_type == \"msvc\":\n # '-fopenmp' is called '/openmp' in msvc\n compile_flags += ['/openmp']\n else:\n compile_flags += ['-fopenmp']\n link_flags += ['-fopenmp']\n\n if self.can_compile_link(compile_flags, link_flags):\n for ext in self.extensions:\n ext.extra_compile_args += compile_flags\n ext.extra_link_args += link_flags\n\n build_ext.build_extensions(self)\n\n\nwith open('skimage/__init__.py', encoding='utf-8') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\n\ndef parse_requirements_file(filename):\n with open(filename, encoding='utf-8') as fid:\n requires = [line.strip() for line in fid.readlines() if line]\n\n return requires\n\n\nINSTALL_REQUIRES = parse_requirements_file('requirements/default.txt')\n# The `requirements/extras.txt` file is explicitely omitted because\n# it contains requirements that do not have wheels uploaded to pip\n# for the platforms we wish to support.\nextras_require = {\n dep: parse_requirements_file('requirements/' + dep + '.txt')\n for dep in ['docs', 'optional', 'test', 'data']\n}\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n\n return config\n\n\nif __name__ == \"__main__\":\n cmdclass = {'build_py': build_py,\n 'sdist': sdist}\n try:\n # test if build dependencies exist.\n # if not, some commands are still viable.\n # note: this must be kept in sync with pyproject.toml\n from numpy.distutils.core import setup\n import cython\n extra = {'configuration': configuration}\n cmdclass['build_ext'] = ConditionalOpenMP\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate',\n 'sdist')):\n # For these actions, compilation is not required.\n #\n # They are required to succeed for example when pip is\n # used to install scikit-image when Numpy/cython are not\n # yet present in the system.\n from setuptools import setup\n extra = {}\n else:\n print(textwrap.dedent(\"\"\"\n To install scikit-image from source, you will need NumPy\n and Cython.\n Install NumPy and Cython with your python package manager.\n If you are using pip, the commands are:\n\n pip install numpy cython\n\n For more details, see:\n\n https://scikit-image.org/docs/stable/install.html\n \"\"\"))\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n project_urls=PROJECT_URLS,\n version=VERSION,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n requires=REQUIRES,\n extras_require=extras_require,\n python_requires='>=3.7',\n packages=setuptools.find_packages(exclude=['doc', 'benchmarks']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n cmdclass=cmdclass,\n **extra\n )\n", "path": "setup.py"}]} | 3,235 | 99 |
gh_patches_debug_36568 | rasdani/github-patches | git_diff | feast-dev__feast-2240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Redis can delete incorrect keys
I haven't tested this explicitly but from the data model + this function it looks to be as if two FeatureViews have the same Entities (but are different tables) and one view is deleted the online keys for the other view will also be deleted... This seems like a bit of a flaw either in the Redis data model or in the deletion mechanism.
https://github.com/feast-dev/feast/blob/ec4165396f70ab20b42246b093f777dfcc9f5277/sdk/python/feast/infra/online_stores/redis.py#L75-L88
</issue>
<code>
[start of sdk/python/feast/infra/online_stores/redis.py]
1 # Copyright 2021 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import json
15 import logging
16 from datetime import datetime
17 from enum import Enum
18 from typing import (
19 Any,
20 ByteString,
21 Callable,
22 Dict,
23 List,
24 Optional,
25 Sequence,
26 Tuple,
27 Union,
28 )
29
30 from google.protobuf.timestamp_pb2 import Timestamp
31 from pydantic import StrictStr
32 from pydantic.typing import Literal
33
34 from feast import Entity, FeatureView, RepoConfig, utils
35 from feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix
36 from feast.infra.online_stores.online_store import OnlineStore
37 from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
38 from feast.protos.feast.types.Value_pb2 import Value as ValueProto
39 from feast.repo_config import FeastConfigBaseModel
40 from feast.usage import log_exceptions_and_usage, tracing_span
41
42 try:
43 from redis import Redis
44 from redis.cluster import RedisCluster
45 except ImportError as e:
46 from feast.errors import FeastExtrasDependencyImportError
47
48 raise FeastExtrasDependencyImportError("redis", str(e))
49
50 logger = logging.getLogger(__name__)
51
52
53 class RedisType(str, Enum):
54 redis = "redis"
55 redis_cluster = "redis_cluster"
56
57
58 class RedisOnlineStoreConfig(FeastConfigBaseModel):
59 """Online store config for Redis store"""
60
61 type: Literal["redis"] = "redis"
62 """Online store type selector"""
63
64 redis_type: RedisType = RedisType.redis
65 """Redis type: redis or redis_cluster"""
66
67 connection_string: StrictStr = "localhost:6379"
68 """Connection string containing the host, port, and configuration parameters for Redis
69 format: host:port,parameter1,parameter2 eg. redis:6379,db=0 """
70
71
72 class RedisOnlineStore(OnlineStore):
73 _client: Optional[Union[Redis, RedisCluster]] = None
74
75 def delete_table_values(self, config: RepoConfig, table: FeatureView):
76 client = self._get_client(config.online_store)
77 deleted_count = 0
78 pipeline = client.pipeline()
79 prefix = _redis_key_prefix(table.entities)
80
81 for _k in client.scan_iter(
82 b"".join([prefix, b"*", config.project.encode("utf8")])
83 ):
84 pipeline.delete(_k)
85 deleted_count += 1
86 pipeline.execute()
87
88 logger.debug(f"Deleted {deleted_count} keys for {table.name}")
89
90 @log_exceptions_and_usage(online_store="redis")
91 def update(
92 self,
93 config: RepoConfig,
94 tables_to_delete: Sequence[FeatureView],
95 tables_to_keep: Sequence[FeatureView],
96 entities_to_delete: Sequence[Entity],
97 entities_to_keep: Sequence[Entity],
98 partial: bool,
99 ):
100 """
101 We delete the keys in redis for tables/views being removed.
102 """
103 for table in tables_to_delete:
104 self.delete_table_values(config, table)
105
106 def teardown(
107 self,
108 config: RepoConfig,
109 tables: Sequence[FeatureView],
110 entities: Sequence[Entity],
111 ):
112 """
113 We delete the keys in redis for tables/views being removed.
114 """
115 for table in tables:
116 self.delete_table_values(config, table)
117
118 @staticmethod
119 def _parse_connection_string(connection_string: str):
120 """
121 Reads Redis connections string using format
122 for RedisCluster:
123 redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...
124 for Redis:
125 redis_master:6379,db=0,ssl=true,password=...
126 """
127 startup_nodes = [
128 dict(zip(["host", "port"], c.split(":")))
129 for c in connection_string.split(",")
130 if "=" not in c
131 ]
132 params = {}
133 for c in connection_string.split(","):
134 if "=" in c:
135 kv = c.split("=", 1)
136 try:
137 kv[1] = json.loads(kv[1])
138 except json.JSONDecodeError:
139 ...
140
141 it = iter(kv)
142 params.update(dict(zip(it, it)))
143
144 return startup_nodes, params
145
146 def _get_client(self, online_store_config: RedisOnlineStoreConfig):
147 """
148 Creates the Redis client RedisCluster or Redis depending on configuration
149 """
150 if not self._client:
151 startup_nodes, kwargs = self._parse_connection_string(
152 online_store_config.connection_string
153 )
154 if online_store_config.redis_type == RedisType.redis_cluster:
155 kwargs["startup_nodes"] = startup_nodes
156 self._client = RedisCluster(**kwargs)
157 else:
158 kwargs["host"] = startup_nodes[0]["host"]
159 kwargs["port"] = startup_nodes[0]["port"]
160 self._client = Redis(**kwargs)
161 return self._client
162
163 @log_exceptions_and_usage(online_store="redis")
164 def online_write_batch(
165 self,
166 config: RepoConfig,
167 table: FeatureView,
168 data: List[
169 Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
170 ],
171 progress: Optional[Callable[[int], Any]],
172 ) -> None:
173 online_store_config = config.online_store
174 assert isinstance(online_store_config, RedisOnlineStoreConfig)
175
176 client = self._get_client(online_store_config)
177 project = config.project
178
179 feature_view = table.name
180 ts_key = f"_ts:{feature_view}"
181 keys = []
182 # redis pipelining optimization: send multiple commands to redis server without waiting for every reply
183 with client.pipeline() as pipe:
184 # check if a previous record under the key bin exists
185 # TODO: investigate if check and set is a better approach rather than pulling all entity ts and then setting
186 # it may be significantly slower but avoids potential (rare) race conditions
187 for entity_key, _, _, _ in data:
188 redis_key_bin = _redis_key(project, entity_key)
189 keys.append(redis_key_bin)
190 pipe.hmget(redis_key_bin, ts_key)
191 prev_event_timestamps = pipe.execute()
192 # flattening the list of lists. `hmget` does the lookup assuming a list of keys in the key bin
193 prev_event_timestamps = [i[0] for i in prev_event_timestamps]
194
195 for redis_key_bin, prev_event_time, (_, values, timestamp, _) in zip(
196 keys, prev_event_timestamps, data
197 ):
198 event_time_seconds = int(utils.make_tzaware(timestamp).timestamp())
199
200 # ignore if event_timestamp is before the event features that are currently in the feature store
201 if prev_event_time:
202 prev_ts = Timestamp()
203 prev_ts.ParseFromString(prev_event_time)
204 if prev_ts.seconds and event_time_seconds <= prev_ts.seconds:
205 # TODO: somehow signal that it's not overwriting the current record?
206 if progress:
207 progress(1)
208 continue
209
210 ts = Timestamp()
211 ts.seconds = event_time_seconds
212 entity_hset = dict()
213 entity_hset[ts_key] = ts.SerializeToString()
214
215 for feature_name, val in values.items():
216 f_key = _mmh3(f"{feature_view}:{feature_name}")
217 entity_hset[f_key] = val.SerializeToString()
218
219 pipe.hset(redis_key_bin, mapping=entity_hset)
220 # TODO: support expiring the entity / features in Redis
221 # otherwise entity features remain in redis until cleaned up in separate process
222 # client.expire redis_key_bin based a ttl setting
223 results = pipe.execute()
224 if progress:
225 progress(len(results))
226
227 @log_exceptions_and_usage(online_store="redis")
228 def online_read(
229 self,
230 config: RepoConfig,
231 table: FeatureView,
232 entity_keys: List[EntityKeyProto],
233 requested_features: Optional[List[str]] = None,
234 ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
235 online_store_config = config.online_store
236 assert isinstance(online_store_config, RedisOnlineStoreConfig)
237
238 client = self._get_client(online_store_config)
239 feature_view = table.name
240 project = config.project
241
242 result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []
243
244 if not requested_features:
245 requested_features = [f.name for f in table.features]
246
247 hset_keys = [_mmh3(f"{feature_view}:{k}") for k in requested_features]
248
249 ts_key = f"_ts:{feature_view}"
250 hset_keys.append(ts_key)
251 requested_features.append(ts_key)
252
253 keys = []
254 for entity_key in entity_keys:
255 redis_key_bin = _redis_key(project, entity_key)
256 keys.append(redis_key_bin)
257 with client.pipeline() as pipe:
258 for redis_key_bin in keys:
259 pipe.hmget(redis_key_bin, hset_keys)
260 with tracing_span(name="remote_call"):
261 redis_values = pipe.execute()
262 for values in redis_values:
263 features = self._get_features_for_entity(
264 values, feature_view, requested_features
265 )
266 result.append(features)
267 return result
268
269 def _get_features_for_entity(
270 self,
271 values: List[ByteString],
272 feature_view: str,
273 requested_features: List[str],
274 ) -> Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]:
275 res_val = dict(zip(requested_features, values))
276
277 res_ts = Timestamp()
278 ts_val = res_val.pop(f"_ts:{feature_view}")
279 if ts_val:
280 res_ts.ParseFromString(bytes(ts_val))
281
282 res = {}
283 for feature_name, val_bin in res_val.items():
284 val = ValueProto()
285 if val_bin:
286 val.ParseFromString(bytes(val_bin))
287 res[feature_name] = val
288
289 if not res:
290 return None, None
291 else:
292 timestamp = datetime.fromtimestamp(res_ts.seconds)
293 return timestamp, res
294
[end of sdk/python/feast/infra/online_stores/redis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sdk/python/feast/infra/online_stores/redis.py b/sdk/python/feast/infra/online_stores/redis.py
--- a/sdk/python/feast/infra/online_stores/redis.py
+++ b/sdk/python/feast/infra/online_stores/redis.py
@@ -72,11 +72,11 @@
class RedisOnlineStore(OnlineStore):
_client: Optional[Union[Redis, RedisCluster]] = None
- def delete_table_values(self, config: RepoConfig, table: FeatureView):
+ def delete_entity_values(self, config: RepoConfig, join_keys: List[str]):
client = self._get_client(config.online_store)
deleted_count = 0
pipeline = client.pipeline()
- prefix = _redis_key_prefix(table.entities)
+ prefix = _redis_key_prefix(join_keys)
for _k in client.scan_iter(
b"".join([prefix, b"*", config.project.encode("utf8")])
@@ -85,7 +85,7 @@
deleted_count += 1
pipeline.execute()
- logger.debug(f"Deleted {deleted_count} keys for {table.name}")
+ logger.debug(f"Deleted {deleted_count} rows for entity {', '.join(join_keys)}")
@log_exceptions_and_usage(online_store="redis")
def update(
@@ -98,10 +98,16 @@
partial: bool,
):
"""
- We delete the keys in redis for tables/views being removed.
+ Look for join_keys (list of entities) that are not in use anymore
+ (usually this happens when the last feature view that was using specific compound key is deleted)
+ and remove all features attached to this "join_keys".
"""
- for table in tables_to_delete:
- self.delete_table_values(config, table)
+ join_keys_to_keep = set(tuple(table.entities) for table in tables_to_keep)
+
+ join_keys_to_delete = set(tuple(table.entities) for table in tables_to_delete)
+
+ for join_keys in join_keys_to_delete - join_keys_to_keep:
+ self.delete_entity_values(config, list(join_keys))
def teardown(
self,
@@ -112,8 +118,10 @@
"""
We delete the keys in redis for tables/views being removed.
"""
- for table in tables:
- self.delete_table_values(config, table)
+ join_keys_to_delete = set(tuple(table.entities) for table in tables)
+
+ for join_keys in join_keys_to_delete:
+ self.delete_entity_values(config, list(join_keys))
@staticmethod
def _parse_connection_string(connection_string: str):
| {"golden_diff": "diff --git a/sdk/python/feast/infra/online_stores/redis.py b/sdk/python/feast/infra/online_stores/redis.py\n--- a/sdk/python/feast/infra/online_stores/redis.py\n+++ b/sdk/python/feast/infra/online_stores/redis.py\n@@ -72,11 +72,11 @@\n class RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n \n- def delete_table_values(self, config: RepoConfig, table: FeatureView):\n+ def delete_entity_values(self, config: RepoConfig, join_keys: List[str]):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n- prefix = _redis_key_prefix(table.entities)\n+ prefix = _redis_key_prefix(join_keys)\n \n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n@@ -85,7 +85,7 @@\n deleted_count += 1\n pipeline.execute()\n \n- logger.debug(f\"Deleted {deleted_count} keys for {table.name}\")\n+ logger.debug(f\"Deleted {deleted_count} rows for entity {', '.join(join_keys)}\")\n \n @log_exceptions_and_usage(online_store=\"redis\")\n def update(\n@@ -98,10 +98,16 @@\n partial: bool,\n ):\n \"\"\"\n- We delete the keys in redis for tables/views being removed.\n+ Look for join_keys (list of entities) that are not in use anymore\n+ (usually this happens when the last feature view that was using specific compound key is deleted)\n+ and remove all features attached to this \"join_keys\".\n \"\"\"\n- for table in tables_to_delete:\n- self.delete_table_values(config, table)\n+ join_keys_to_keep = set(tuple(table.entities) for table in tables_to_keep)\n+\n+ join_keys_to_delete = set(tuple(table.entities) for table in tables_to_delete)\n+\n+ for join_keys in join_keys_to_delete - join_keys_to_keep:\n+ self.delete_entity_values(config, list(join_keys))\n \n def teardown(\n self,\n@@ -112,8 +118,10 @@\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n- for table in tables:\n- self.delete_table_values(config, table)\n+ join_keys_to_delete = set(tuple(table.entities) for table in tables)\n+\n+ for join_keys in join_keys_to_delete:\n+ self.delete_entity_values(config, list(join_keys))\n \n @staticmethod\n def _parse_connection_string(connection_string: str):\n", "issue": "Redis can delete incorrect keys\nI haven't tested this explicitly but from the data model + this function it looks to be as if two FeatureViews have the same Entities (but are different tables) and one view is deleted the online keys for the other view will also be deleted... This seems like a bit of a flaw either in the Redis data model or in the deletion mechanism. \r\n\r\nhttps://github.com/feast-dev/feast/blob/ec4165396f70ab20b42246b093f777dfcc9f5277/sdk/python/feast/infra/online_stores/redis.py#L75-L88\n", "before_files": [{"content": "# Copyright 2021 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport json\nimport logging\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import (\n Any,\n ByteString,\n Callable,\n Dict,\n List,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom pydantic import StrictStr\nfrom pydantic.typing import Literal\n\nfrom feast import Entity, FeatureView, RepoConfig, utils\nfrom feast.infra.online_stores.helpers import _mmh3, _redis_key, _redis_key_prefix\nfrom feast.infra.online_stores.online_store import OnlineStore\nfrom feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto\nfrom feast.protos.feast.types.Value_pb2 import Value as ValueProto\nfrom feast.repo_config import FeastConfigBaseModel\nfrom feast.usage import log_exceptions_and_usage, tracing_span\n\ntry:\n from redis import Redis\n from redis.cluster import RedisCluster\nexcept ImportError as e:\n from feast.errors import FeastExtrasDependencyImportError\n\n raise FeastExtrasDependencyImportError(\"redis\", str(e))\n\nlogger = logging.getLogger(__name__)\n\n\nclass RedisType(str, Enum):\n redis = \"redis\"\n redis_cluster = \"redis_cluster\"\n\n\nclass RedisOnlineStoreConfig(FeastConfigBaseModel):\n \"\"\"Online store config for Redis store\"\"\"\n\n type: Literal[\"redis\"] = \"redis\"\n \"\"\"Online store type selector\"\"\"\n\n redis_type: RedisType = RedisType.redis\n \"\"\"Redis type: redis or redis_cluster\"\"\"\n\n connection_string: StrictStr = \"localhost:6379\"\n \"\"\"Connection string containing the host, port, and configuration parameters for Redis\n format: host:port,parameter1,parameter2 eg. redis:6379,db=0 \"\"\"\n\n\nclass RedisOnlineStore(OnlineStore):\n _client: Optional[Union[Redis, RedisCluster]] = None\n\n def delete_table_values(self, config: RepoConfig, table: FeatureView):\n client = self._get_client(config.online_store)\n deleted_count = 0\n pipeline = client.pipeline()\n prefix = _redis_key_prefix(table.entities)\n\n for _k in client.scan_iter(\n b\"\".join([prefix, b\"*\", config.project.encode(\"utf8\")])\n ):\n pipeline.delete(_k)\n deleted_count += 1\n pipeline.execute()\n\n logger.debug(f\"Deleted {deleted_count} keys for {table.name}\")\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def update(\n self,\n config: RepoConfig,\n tables_to_delete: Sequence[FeatureView],\n tables_to_keep: Sequence[FeatureView],\n entities_to_delete: Sequence[Entity],\n entities_to_keep: Sequence[Entity],\n partial: bool,\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables_to_delete:\n self.delete_table_values(config, table)\n\n def teardown(\n self,\n config: RepoConfig,\n tables: Sequence[FeatureView],\n entities: Sequence[Entity],\n ):\n \"\"\"\n We delete the keys in redis for tables/views being removed.\n \"\"\"\n for table in tables:\n self.delete_table_values(config, table)\n\n @staticmethod\n def _parse_connection_string(connection_string: str):\n \"\"\"\n Reads Redis connections string using format\n for RedisCluster:\n redis1:6379,redis2:6379,decode_responses=true,skip_full_coverage_check=true,ssl=true,password=...\n for Redis:\n redis_master:6379,db=0,ssl=true,password=...\n \"\"\"\n startup_nodes = [\n dict(zip([\"host\", \"port\"], c.split(\":\")))\n for c in connection_string.split(\",\")\n if \"=\" not in c\n ]\n params = {}\n for c in connection_string.split(\",\"):\n if \"=\" in c:\n kv = c.split(\"=\", 1)\n try:\n kv[1] = json.loads(kv[1])\n except json.JSONDecodeError:\n ...\n\n it = iter(kv)\n params.update(dict(zip(it, it)))\n\n return startup_nodes, params\n\n def _get_client(self, online_store_config: RedisOnlineStoreConfig):\n \"\"\"\n Creates the Redis client RedisCluster or Redis depending on configuration\n \"\"\"\n if not self._client:\n startup_nodes, kwargs = self._parse_connection_string(\n online_store_config.connection_string\n )\n if online_store_config.redis_type == RedisType.redis_cluster:\n kwargs[\"startup_nodes\"] = startup_nodes\n self._client = RedisCluster(**kwargs)\n else:\n kwargs[\"host\"] = startup_nodes[0][\"host\"]\n kwargs[\"port\"] = startup_nodes[0][\"port\"]\n self._client = Redis(**kwargs)\n return self._client\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def online_write_batch(\n self,\n config: RepoConfig,\n table: FeatureView,\n data: List[\n Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]\n ],\n progress: Optional[Callable[[int], Any]],\n ) -> None:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n project = config.project\n\n feature_view = table.name\n ts_key = f\"_ts:{feature_view}\"\n keys = []\n # redis pipelining optimization: send multiple commands to redis server without waiting for every reply\n with client.pipeline() as pipe:\n # check if a previous record under the key bin exists\n # TODO: investigate if check and set is a better approach rather than pulling all entity ts and then setting\n # it may be significantly slower but avoids potential (rare) race conditions\n for entity_key, _, _, _ in data:\n redis_key_bin = _redis_key(project, entity_key)\n keys.append(redis_key_bin)\n pipe.hmget(redis_key_bin, ts_key)\n prev_event_timestamps = pipe.execute()\n # flattening the list of lists. `hmget` does the lookup assuming a list of keys in the key bin\n prev_event_timestamps = [i[0] for i in prev_event_timestamps]\n\n for redis_key_bin, prev_event_time, (_, values, timestamp, _) in zip(\n keys, prev_event_timestamps, data\n ):\n event_time_seconds = int(utils.make_tzaware(timestamp).timestamp())\n\n # ignore if event_timestamp is before the event features that are currently in the feature store\n if prev_event_time:\n prev_ts = Timestamp()\n prev_ts.ParseFromString(prev_event_time)\n if prev_ts.seconds and event_time_seconds <= prev_ts.seconds:\n # TODO: somehow signal that it's not overwriting the current record?\n if progress:\n progress(1)\n continue\n\n ts = Timestamp()\n ts.seconds = event_time_seconds\n entity_hset = dict()\n entity_hset[ts_key] = ts.SerializeToString()\n\n for feature_name, val in values.items():\n f_key = _mmh3(f\"{feature_view}:{feature_name}\")\n entity_hset[f_key] = val.SerializeToString()\n\n pipe.hset(redis_key_bin, mapping=entity_hset)\n # TODO: support expiring the entity / features in Redis\n # otherwise entity features remain in redis until cleaned up in separate process\n # client.expire redis_key_bin based a ttl setting\n results = pipe.execute()\n if progress:\n progress(len(results))\n\n @log_exceptions_and_usage(online_store=\"redis\")\n def online_read(\n self,\n config: RepoConfig,\n table: FeatureView,\n entity_keys: List[EntityKeyProto],\n requested_features: Optional[List[str]] = None,\n ) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:\n online_store_config = config.online_store\n assert isinstance(online_store_config, RedisOnlineStoreConfig)\n\n client = self._get_client(online_store_config)\n feature_view = table.name\n project = config.project\n\n result: List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]] = []\n\n if not requested_features:\n requested_features = [f.name for f in table.features]\n\n hset_keys = [_mmh3(f\"{feature_view}:{k}\") for k in requested_features]\n\n ts_key = f\"_ts:{feature_view}\"\n hset_keys.append(ts_key)\n requested_features.append(ts_key)\n\n keys = []\n for entity_key in entity_keys:\n redis_key_bin = _redis_key(project, entity_key)\n keys.append(redis_key_bin)\n with client.pipeline() as pipe:\n for redis_key_bin in keys:\n pipe.hmget(redis_key_bin, hset_keys)\n with tracing_span(name=\"remote_call\"):\n redis_values = pipe.execute()\n for values in redis_values:\n features = self._get_features_for_entity(\n values, feature_view, requested_features\n )\n result.append(features)\n return result\n\n def _get_features_for_entity(\n self,\n values: List[ByteString],\n feature_view: str,\n requested_features: List[str],\n ) -> Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]:\n res_val = dict(zip(requested_features, values))\n\n res_ts = Timestamp()\n ts_val = res_val.pop(f\"_ts:{feature_view}\")\n if ts_val:\n res_ts.ParseFromString(bytes(ts_val))\n\n res = {}\n for feature_name, val_bin in res_val.items():\n val = ValueProto()\n if val_bin:\n val.ParseFromString(bytes(val_bin))\n res[feature_name] = val\n\n if not res:\n return None, None\n else:\n timestamp = datetime.fromtimestamp(res_ts.seconds)\n return timestamp, res\n", "path": "sdk/python/feast/infra/online_stores/redis.py"}]} | 3,728 | 588 |
gh_patches_debug_31832 | rasdani/github-patches | git_diff | pypa__virtualenv-1521 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
20b1: problems with editable installs
I've decided to try running `tox` on [all the Python projects I maintain](https://projects.gedmin.as/) with virtualenv 20.0.0b1, to check for regressions in the new beta. Three of the projects failed, all on Python 2.7:
- gtimelog (python: No module named gtimelog)
- irclog2html (ImportError: No module named zope.testrunner)
- zodbbrowser (ImportError: No module named zope.testrunner)
Here's how to reproduce the gtimelog issue:
- git clone https://github.com/gtimelog/gtimelog
- virtualenv -p python2.7 /tmp/venv
- /tmp/venv/bin/pip install -e gtimelog --no-deps
- /tmp/venv/bin/python -c 'import gtimelog'
I expect: no output, the import should succeed.
I get
```
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: No module named gtimelog
```
The `--no-deps` to pip install is there to make the test simpler and faster; I get the same failure if I do a full `pip install -e gtimelog`.
I'll create a separate bug for the irclog2html/zodbbrowser issue (they're the same issue): #1518.
I'm on Ubuntu 19.10. I installed virtualenv 20.0.0b1 with `pipx install --pip-args=--pre` using my system Python 3.7. `pipx runpip virtualenv list` produces
```
Package Version
------------------ --------
appdirs 1.4.3
filelock 3.0.12
importlib-metadata 1.5.0
pip 20.0.2
pkg-resources 0.0.0
setuptools 45.1.0
six 1.14.0
virtualenv 20.0.0b1
wheel 0.34.1
zipp 2.1.0
```
</issue>
<code>
[start of src/virtualenv/create/via_global_ref/builtin/python2/site.py]
1 """
2 A simple shim module to fix up things on Python 2 only.
3
4 Note: until we setup correctly the paths we can only import built-ins.
5 """
6 import sys
7
8
9 def main():
10 """Patch what needed, and invoke the original site.py"""
11 config = read_pyvenv()
12 sys.real_prefix = sys.base_prefix = config["base-prefix"]
13 sys.base_exec_prefix = config["base-exec-prefix"]
14 sys.base_executable = config["base-executable"]
15 global_site_package_enabled = config.get("include-system-site-packages", False) == "true"
16 rewrite_standard_library_sys_path()
17 disable_user_site_package()
18 load_host_site()
19 if global_site_package_enabled:
20 add_global_site_package()
21
22
23 def load_host_site():
24 """trigger reload of site.py - now it will use the standard library instance that will take care of init"""
25 # the standard library will be the first element starting with the real prefix, not zip, must be present
26 custom_site_package_path = __file__
27 reload(sys.modules["site"]) # noqa
28
29 # ensure that our expected site packages is on the sys.path
30 import os
31
32 site_packages = r"""
33 ___EXPECTED_SITE_PACKAGES___
34 """
35 import json
36
37 for path in json.loads(site_packages):
38 full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode("utf-8")))
39 if full_path not in sys.path:
40 sys.path.append(full_path)
41
42
43 def read_pyvenv():
44 """read pyvenv.cfg"""
45 os_sep = "\\" if sys.platform == "win32" else "/" # no os module here yet - poor mans version
46 config_file = "{}{}pyvenv.cfg".format(sys.prefix, os_sep)
47 with open(config_file) as file_handler:
48 lines = file_handler.readlines()
49 config = {}
50 for line in lines:
51 try:
52 split_at = line.index("=")
53 except ValueError:
54 continue # ignore bad/empty lines
55 else:
56 config[line[:split_at].strip()] = line[split_at + 1 :].strip()
57 return config
58
59
60 def rewrite_standard_library_sys_path():
61 """Once this site file is loaded the standard library paths have already been set, fix them up"""
62 sep = "\\" if sys.platform == "win32" else "/"
63 exe_dir = sys.executable[: sys.executable.rfind(sep)]
64 for at, value in enumerate(sys.path):
65 # replace old sys prefix path starts with new
66 if value == exe_dir:
67 pass # don't fix the current executable location, notably on Windows this gets added
68 elif value.startswith(exe_dir):
69 # content inside the exe folder needs to remap to original executables folder
70 orig_exe_folder = sys.base_executable[: sys.base_executable.rfind(sep)]
71 value = "{}{}".format(orig_exe_folder, value[len(exe_dir) :])
72 elif value.startswith(sys.prefix):
73 value = "{}{}".format(sys.base_prefix, value[len(sys.prefix) :])
74 elif value.startswith(sys.exec_prefix):
75 value = "{}{}".format(sys.base_exec_prefix, value[len(sys.exec_prefix) :])
76 sys.path[at] = value
77
78
79 def disable_user_site_package():
80 """Flip the switch on enable user site package"""
81 # sys.flags is a c-extension type, so we cannot monkey patch it, replace it with a python class to flip it
82 sys.original_flags = sys.flags
83
84 class Flags(object):
85 def __init__(self):
86 self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith("_")}
87
88 sys.flags = Flags()
89 sys.flags.no_user_site = 1
90
91
92 def add_global_site_package():
93 """add the global site package"""
94 import site
95
96 # add user site package
97 sys.flags = sys.original_flags # restore original
98 site.ENABLE_USER_SITE = None # reset user site check
99 # add the global site package to the path - use new prefix and delegate to site.py
100 orig_prefixes = None
101 try:
102 orig_prefixes = site.PREFIXES
103 site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]
104 site.main()
105 finally:
106 site.PREFIXES = orig_prefixes
107
108
109 main()
110
[end of src/virtualenv/create/via_global_ref/builtin/python2/site.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py
--- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py
+++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py
@@ -22,11 +22,24 @@
def load_host_site():
"""trigger reload of site.py - now it will use the standard library instance that will take care of init"""
- # the standard library will be the first element starting with the real prefix, not zip, must be present
- custom_site_package_path = __file__
+ # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies
+ # because this is what pip will be using; the host site.py though may contain it's own pattern for where the
+ # platform and pure library paths should exist
+
+ # notably on Ubuntu there's a patch for getsitepackages to point to
+ # - prefix + local/lib/pythonx.y/dist-packages
+ # - prefix + lib/pythonx.y/dist-packages
+ # while distutils.install.cmd still points both of these to
+ # - prefix + lib/python2.7/site-packages
+
+ # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,
+ # as this will ensure that initialization code within host site.py runs
+
+ here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here
reload(sys.modules["site"]) # noqa
- # ensure that our expected site packages is on the sys.path
+ # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add
+ # them by invoking add_site_dir to trigger the processing of pth files
import os
site_packages = r"""
@@ -34,10 +47,11 @@
"""
import json
+ add_site_dir = sys.modules["site"].addsitedir
for path in json.loads(site_packages):
- full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode("utf-8")))
+ full_path = os.path.abspath(os.path.join(here, path.encode("utf-8")))
if full_path not in sys.path:
- sys.path.append(full_path)
+ add_site_dir(full_path)
def read_pyvenv():
| {"golden_diff": "diff --git a/src/virtualenv/create/via_global_ref/builtin/python2/site.py b/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n--- a/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n+++ b/src/virtualenv/create/via_global_ref/builtin/python2/site.py\n@@ -22,11 +22,24 @@\n \n def load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n- # the standard library will be the first element starting with the real prefix, not zip, must be present\n- custom_site_package_path = __file__\n+ # we have a duality here, we generate the platform and pure library path based on what distutils.install specifies\n+ # because this is what pip will be using; the host site.py though may contain it's own pattern for where the\n+ # platform and pure library paths should exist\n+\n+ # notably on Ubuntu there's a patch for getsitepackages to point to\n+ # - prefix + local/lib/pythonx.y/dist-packages\n+ # - prefix + lib/pythonx.y/dist-packages\n+ # while distutils.install.cmd still points both of these to\n+ # - prefix + lib/python2.7/site-packages\n+\n+ # to facilitate when the two match, or not we first reload the site.py, now triggering the import of host site.py,\n+ # as this will ensure that initialization code within host site.py runs\n+\n+ here = __file__ # the distutils.install patterns will be injected relative to this site.py, save it here\n reload(sys.modules[\"site\"]) # noqa\n \n- # ensure that our expected site packages is on the sys.path\n+ # and then if the distutils site packages are not on the sys.path we add them via add_site_dir; note we must add\n+ # them by invoking add_site_dir to trigger the processing of pth files\n import os\n \n site_packages = r\"\"\"\n@@ -34,10 +47,11 @@\n \"\"\"\n import json\n \n+ add_site_dir = sys.modules[\"site\"].addsitedir\n for path in json.loads(site_packages):\n- full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode(\"utf-8\")))\n+ full_path = os.path.abspath(os.path.join(here, path.encode(\"utf-8\")))\n if full_path not in sys.path:\n- sys.path.append(full_path)\n+ add_site_dir(full_path)\n \n \n def read_pyvenv():\n", "issue": "20b1: problems with editable installs\nI've decided to try running `tox` on [all the Python projects I maintain](https://projects.gedmin.as/) with virtualenv 20.0.0b1, to check for regressions in the new beta. Three of the projects failed, all on Python 2.7:\r\n\r\n- gtimelog (python: No module named gtimelog)\r\n- irclog2html (ImportError: No module named zope.testrunner)\r\n- zodbbrowser (ImportError: No module named zope.testrunner)\r\n\r\nHere's how to reproduce the gtimelog issue:\r\n\r\n- git clone https://github.com/gtimelog/gtimelog\r\n- virtualenv -p python2.7 /tmp/venv\r\n- /tmp/venv/bin/pip install -e gtimelog --no-deps\r\n- /tmp/venv/bin/python -c 'import gtimelog'\r\n\r\nI expect: no output, the import should succeed.\r\n\r\nI get\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: No module named gtimelog\r\n```\r\n\r\nThe `--no-deps` to pip install is there to make the test simpler and faster; I get the same failure if I do a full `pip install -e gtimelog`.\r\n\r\nI'll create a separate bug for the irclog2html/zodbbrowser issue (they're the same issue): #1518.\r\n\r\n\r\nI'm on Ubuntu 19.10. I installed virtualenv 20.0.0b1 with `pipx install --pip-args=--pre` using my system Python 3.7. `pipx runpip virtualenv list` produces\r\n\r\n```\r\nPackage Version \r\n------------------ --------\r\nappdirs 1.4.3 \r\nfilelock 3.0.12 \r\nimportlib-metadata 1.5.0 \r\npip 20.0.2 \r\npkg-resources 0.0.0 \r\nsetuptools 45.1.0 \r\nsix 1.14.0 \r\nvirtualenv 20.0.0b1\r\nwheel 0.34.1 \r\nzipp 2.1.0 \r\n```\n", "before_files": [{"content": "\"\"\"\nA simple shim module to fix up things on Python 2 only.\n\nNote: until we setup correctly the paths we can only import built-ins.\n\"\"\"\nimport sys\n\n\ndef main():\n \"\"\"Patch what needed, and invoke the original site.py\"\"\"\n config = read_pyvenv()\n sys.real_prefix = sys.base_prefix = config[\"base-prefix\"]\n sys.base_exec_prefix = config[\"base-exec-prefix\"]\n sys.base_executable = config[\"base-executable\"]\n global_site_package_enabled = config.get(\"include-system-site-packages\", False) == \"true\"\n rewrite_standard_library_sys_path()\n disable_user_site_package()\n load_host_site()\n if global_site_package_enabled:\n add_global_site_package()\n\n\ndef load_host_site():\n \"\"\"trigger reload of site.py - now it will use the standard library instance that will take care of init\"\"\"\n # the standard library will be the first element starting with the real prefix, not zip, must be present\n custom_site_package_path = __file__\n reload(sys.modules[\"site\"]) # noqa\n\n # ensure that our expected site packages is on the sys.path\n import os\n\n site_packages = r\"\"\"\n ___EXPECTED_SITE_PACKAGES___\n \"\"\"\n import json\n\n for path in json.loads(site_packages):\n full_path = os.path.abspath(os.path.join(custom_site_package_path, path.encode(\"utf-8\")))\n if full_path not in sys.path:\n sys.path.append(full_path)\n\n\ndef read_pyvenv():\n \"\"\"read pyvenv.cfg\"\"\"\n os_sep = \"\\\\\" if sys.platform == \"win32\" else \"/\" # no os module here yet - poor mans version\n config_file = \"{}{}pyvenv.cfg\".format(sys.prefix, os_sep)\n with open(config_file) as file_handler:\n lines = file_handler.readlines()\n config = {}\n for line in lines:\n try:\n split_at = line.index(\"=\")\n except ValueError:\n continue # ignore bad/empty lines\n else:\n config[line[:split_at].strip()] = line[split_at + 1 :].strip()\n return config\n\n\ndef rewrite_standard_library_sys_path():\n \"\"\"Once this site file is loaded the standard library paths have already been set, fix them up\"\"\"\n sep = \"\\\\\" if sys.platform == \"win32\" else \"/\"\n exe_dir = sys.executable[: sys.executable.rfind(sep)]\n for at, value in enumerate(sys.path):\n # replace old sys prefix path starts with new\n if value == exe_dir:\n pass # don't fix the current executable location, notably on Windows this gets added\n elif value.startswith(exe_dir):\n # content inside the exe folder needs to remap to original executables folder\n orig_exe_folder = sys.base_executable[: sys.base_executable.rfind(sep)]\n value = \"{}{}\".format(orig_exe_folder, value[len(exe_dir) :])\n elif value.startswith(sys.prefix):\n value = \"{}{}\".format(sys.base_prefix, value[len(sys.prefix) :])\n elif value.startswith(sys.exec_prefix):\n value = \"{}{}\".format(sys.base_exec_prefix, value[len(sys.exec_prefix) :])\n sys.path[at] = value\n\n\ndef disable_user_site_package():\n \"\"\"Flip the switch on enable user site package\"\"\"\n # sys.flags is a c-extension type, so we cannot monkey patch it, replace it with a python class to flip it\n sys.original_flags = sys.flags\n\n class Flags(object):\n def __init__(self):\n self.__dict__ = {key: getattr(sys.flags, key) for key in dir(sys.flags) if not key.startswith(\"_\")}\n\n sys.flags = Flags()\n sys.flags.no_user_site = 1\n\n\ndef add_global_site_package():\n \"\"\"add the global site package\"\"\"\n import site\n\n # add user site package\n sys.flags = sys.original_flags # restore original\n site.ENABLE_USER_SITE = None # reset user site check\n # add the global site package to the path - use new prefix and delegate to site.py\n orig_prefixes = None\n try:\n orig_prefixes = site.PREFIXES\n site.PREFIXES = [sys.base_prefix, sys.base_exec_prefix]\n site.main()\n finally:\n site.PREFIXES = orig_prefixes\n\n\nmain()\n", "path": "src/virtualenv/create/via_global_ref/builtin/python2/site.py"}]} | 2,198 | 565 |
gh_patches_debug_63093 | rasdani/github-patches | git_diff | translate__pootle-4270 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PootleCommand handles obsolete translation projects
`update_stores` command can make obsolete TP directory many times.
It doesn't affect us much except we get extra unnecessary log messages like this:
```
set(['get_last_updated', 'get_checks', 'get_mtime', 'get_suggestion_count', 'get_last_action', 'get_wordcount_stats']) deleted from /uk/android_announcements_evernote/ cache
```
which tells us that cache for TP has been deleted.
I think any `PootleCommand` shouldn't handle TP with obsolete directories. `update_stores` command will resurrect TP directory when it's added.
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/__init__.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import datetime
11 import logging
12
13 from optparse import make_option
14
15 from django.core.management.base import BaseCommand, NoArgsCommand
16
17 from pootle.runner import set_sync_mode
18 from pootle_project.models import Project
19 from pootle_translationproject.models import TranslationProject
20
21
22 class PootleCommand(NoArgsCommand):
23 """Base class for handling recursive pootle store management commands."""
24
25 shared_option_list = (
26 make_option(
27 '--project',
28 action='append',
29 dest='projects',
30 help='Project to refresh',
31 ),
32 make_option(
33 '--language',
34 action='append',
35 dest='languages',
36 help='Language to refresh',
37 ),
38 make_option(
39 "--noinput",
40 action="store_true",
41 default=False,
42 help=u"Never prompt for input",
43 ),
44 make_option(
45 "--no-rq",
46 action="store_true",
47 default=False,
48 help=(u"Run all jobs in a single process, without "
49 "using rq workers"),
50 ),
51 )
52 option_list = NoArgsCommand.option_list + shared_option_list
53 process_disabled_projects = False
54
55 def __init__(self, *args, **kwargs):
56 self.languages = []
57 self.projects = []
58 super(PootleCommand, self).__init__(*args, **kwargs)
59
60 def do_translation_project(self, tp, **options):
61 process_stores = True
62
63 if hasattr(self, "handle_translation_project"):
64 logging.info(u"Running %s over %s", self.name, tp)
65 try:
66 process_stores = self.handle_translation_project(tp, **options)
67 except Exception:
68 logging.exception(u"Failed to run %s over %s", self.name, tp)
69 return
70
71 if not process_stores:
72 return
73
74 if hasattr(self, "handle_all_stores"):
75 logging.info(u"Running %s over %s's files", self.name, tp)
76 try:
77 self.handle_all_stores(tp, **options)
78 except Exception:
79 logging.exception(u"Failed to run %s over %s's files",
80 self.name, tp)
81 return
82 elif hasattr(self, "handle_store"):
83 store_query = tp.stores.live()
84 for store in store_query.iterator():
85 logging.info(u"Running %s over %s",
86 self.name, store.pootle_path)
87 try:
88 self.handle_store(store, **options)
89 except Exception:
90 logging.exception(u"Failed to run %s over %s",
91 self.name, store.pootle_path)
92
93 def handle_noargs(self, **options):
94 # adjust debug level to the verbosity option
95 verbosity = int(options.get('verbosity', 1))
96 debug_levels = {
97 0: logging.ERROR,
98 1: logging.WARNING,
99 2: logging.INFO,
100 3: logging.DEBUG
101 }
102 debug_level = debug_levels.get(verbosity, logging.DEBUG)
103 logging.getLogger().setLevel(debug_level)
104
105 # reduce size of parse pool early on
106 self.name = self.__class__.__module__.split('.')[-1]
107 from pootle_store.fields import TranslationStoreFieldFile
108 TranslationStoreFieldFile._store_cache.maxsize = 2
109 TranslationStoreFieldFile._store_cache.cullsize = 2
110 TranslationProject._non_db_state_cache.maxsize = 2
111 TranslationProject._non_db_state_cache.cullsize = 2
112
113 self.projects = options.pop('projects', [])
114 self.languages = options.pop('languages', [])
115
116 # info start
117 start = datetime.datetime.now()
118 logging.info('Start running of %s', self.name)
119
120 self.handle_all(**options)
121
122 # info finish
123 end = datetime.datetime.now()
124 logging.info('All done for %s in %s', self.name, end - start)
125
126 def handle_all(self, **options):
127 if options.get("no_rq", False):
128 set_sync_mode(options.get('noinput', False))
129
130 if self.process_disabled_projects:
131 project_query = Project.objects.all()
132 else:
133 project_query = Project.objects.enabled()
134
135 if self.projects:
136 project_query = project_query.filter(code__in=self.projects)
137
138 for project in project_query.iterator():
139 tp_query = project.translationproject_set \
140 .order_by('language__code')
141
142 if self.languages:
143 tp_query = tp_query.filter(language__code__in=self.languages)
144
145 for tp in tp_query.iterator():
146 self.do_translation_project(tp, **options)
147
148
149 class BaseRunCommand(BaseCommand):
150 """Base class to build new server runners.
151
152 Based on code from `django-shoes
153 <https://bitbucket.org/mlzboy/django-shoes/>`_.
154 """
155
156 hostport_option_list = (
157 make_option(
158 '--host',
159 action='store',
160 dest='host',
161 default='127.0.0.1',
162 help='Hostname to listen on.',
163 ),
164 make_option(
165 '--port',
166 action='store',
167 dest='port',
168 default=8000,
169 type=int,
170 help='The TCP port to listen on.',
171 ),
172 )
173
174 option_list = BaseCommand.option_list + hostport_option_list
175
176 def handle(self, *args, **options):
177 return self.serve_forever(*args, **options)
178
179 def get_app(self):
180 from django.contrib.staticfiles.handlers import StaticFilesHandler
181 from django.core.handlers.wsgi import WSGIHandler
182
183 app = StaticFilesHandler(WSGIHandler())
184 return app
185
186 def serve_forever(self, *args, **kwargs):
187 raise NotImplementedError
188
[end of pootle/apps/pootle_app/management/commands/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/management/commands/__init__.py b/pootle/apps/pootle_app/management/commands/__init__.py
--- a/pootle/apps/pootle_app/management/commands/__init__.py
+++ b/pootle/apps/pootle_app/management/commands/__init__.py
@@ -136,7 +136,7 @@
project_query = project_query.filter(code__in=self.projects)
for project in project_query.iterator():
- tp_query = project.translationproject_set \
+ tp_query = project.translationproject_set.live() \
.order_by('language__code')
if self.languages:
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/__init__.py b/pootle/apps/pootle_app/management/commands/__init__.py\n--- a/pootle/apps/pootle_app/management/commands/__init__.py\n+++ b/pootle/apps/pootle_app/management/commands/__init__.py\n@@ -136,7 +136,7 @@\n project_query = project_query.filter(code__in=self.projects)\n \n for project in project_query.iterator():\n- tp_query = project.translationproject_set \\\n+ tp_query = project.translationproject_set.live() \\\n .order_by('language__code')\n \n if self.languages:\n", "issue": "PootleCommand handles obsolete translation projects\n`update_stores` command can make obsolete TP directory many times. \nIt doesn't affect us much except we get extra unnecessary log messages like this:\n\n```\nset(['get_last_updated', 'get_checks', 'get_mtime', 'get_suggestion_count', 'get_last_action', 'get_wordcount_stats']) deleted from /uk/android_announcements_evernote/ cache\n```\n\nwhich tells us that cache for TP has been deleted.\n\nI think any `PootleCommand` shouldn't handle TP with obsolete directories. `update_stores` command will resurrect TP directory when it's added.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport datetime\nimport logging\n\nfrom optparse import make_option\n\nfrom django.core.management.base import BaseCommand, NoArgsCommand\n\nfrom pootle.runner import set_sync_mode\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\n\n\nclass PootleCommand(NoArgsCommand):\n \"\"\"Base class for handling recursive pootle store management commands.\"\"\"\n\n shared_option_list = (\n make_option(\n '--project',\n action='append',\n dest='projects',\n help='Project to refresh',\n ),\n make_option(\n '--language',\n action='append',\n dest='languages',\n help='Language to refresh',\n ),\n make_option(\n \"--noinput\",\n action=\"store_true\",\n default=False,\n help=u\"Never prompt for input\",\n ),\n make_option(\n \"--no-rq\",\n action=\"store_true\",\n default=False,\n help=(u\"Run all jobs in a single process, without \"\n \"using rq workers\"),\n ),\n )\n option_list = NoArgsCommand.option_list + shared_option_list\n process_disabled_projects = False\n\n def __init__(self, *args, **kwargs):\n self.languages = []\n self.projects = []\n super(PootleCommand, self).__init__(*args, **kwargs)\n\n def do_translation_project(self, tp, **options):\n process_stores = True\n\n if hasattr(self, \"handle_translation_project\"):\n logging.info(u\"Running %s over %s\", self.name, tp)\n try:\n process_stores = self.handle_translation_project(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\", self.name, tp)\n return\n\n if not process_stores:\n return\n\n if hasattr(self, \"handle_all_stores\"):\n logging.info(u\"Running %s over %s's files\", self.name, tp)\n try:\n self.handle_all_stores(tp, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s's files\",\n self.name, tp)\n return\n elif hasattr(self, \"handle_store\"):\n store_query = tp.stores.live()\n for store in store_query.iterator():\n logging.info(u\"Running %s over %s\",\n self.name, store.pootle_path)\n try:\n self.handle_store(store, **options)\n except Exception:\n logging.exception(u\"Failed to run %s over %s\",\n self.name, store.pootle_path)\n\n def handle_noargs(self, **options):\n # adjust debug level to the verbosity option\n verbosity = int(options.get('verbosity', 1))\n debug_levels = {\n 0: logging.ERROR,\n 1: logging.WARNING,\n 2: logging.INFO,\n 3: logging.DEBUG\n }\n debug_level = debug_levels.get(verbosity, logging.DEBUG)\n logging.getLogger().setLevel(debug_level)\n\n # reduce size of parse pool early on\n self.name = self.__class__.__module__.split('.')[-1]\n from pootle_store.fields import TranslationStoreFieldFile\n TranslationStoreFieldFile._store_cache.maxsize = 2\n TranslationStoreFieldFile._store_cache.cullsize = 2\n TranslationProject._non_db_state_cache.maxsize = 2\n TranslationProject._non_db_state_cache.cullsize = 2\n\n self.projects = options.pop('projects', [])\n self.languages = options.pop('languages', [])\n\n # info start\n start = datetime.datetime.now()\n logging.info('Start running of %s', self.name)\n\n self.handle_all(**options)\n\n # info finish\n end = datetime.datetime.now()\n logging.info('All done for %s in %s', self.name, end - start)\n\n def handle_all(self, **options):\n if options.get(\"no_rq\", False):\n set_sync_mode(options.get('noinput', False))\n\n if self.process_disabled_projects:\n project_query = Project.objects.all()\n else:\n project_query = Project.objects.enabled()\n\n if self.projects:\n project_query = project_query.filter(code__in=self.projects)\n\n for project in project_query.iterator():\n tp_query = project.translationproject_set \\\n .order_by('language__code')\n\n if self.languages:\n tp_query = tp_query.filter(language__code__in=self.languages)\n\n for tp in tp_query.iterator():\n self.do_translation_project(tp, **options)\n\n\nclass BaseRunCommand(BaseCommand):\n \"\"\"Base class to build new server runners.\n\n Based on code from `django-shoes\n <https://bitbucket.org/mlzboy/django-shoes/>`_.\n \"\"\"\n\n hostport_option_list = (\n make_option(\n '--host',\n action='store',\n dest='host',\n default='127.0.0.1',\n help='Hostname to listen on.',\n ),\n make_option(\n '--port',\n action='store',\n dest='port',\n default=8000,\n type=int,\n help='The TCP port to listen on.',\n ),\n )\n\n option_list = BaseCommand.option_list + hostport_option_list\n\n def handle(self, *args, **options):\n return self.serve_forever(*args, **options)\n\n def get_app(self):\n from django.contrib.staticfiles.handlers import StaticFilesHandler\n from django.core.handlers.wsgi import WSGIHandler\n\n app = StaticFilesHandler(WSGIHandler())\n return app\n\n def serve_forever(self, *args, **kwargs):\n raise NotImplementedError\n", "path": "pootle/apps/pootle_app/management/commands/__init__.py"}]} | 2,436 | 149 |
gh_patches_debug_22637 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1463 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "Following" tab to the user profile view
We recently removed some features from the user Profile view and now want to reimplement them using only Django.
This task will be to add a "Following" tab to the user profile view. The tab should link to a sub-path of the user profile, so it is clear to the viewer that they are viewing users that a particular user is following.
## Task
All of these tasks should be done in the `accounts` app.
- [ ] create a Django view called `UserFollowing` (in `accounts/views.py`)
- [ ] create a URL definition `profile/<str:username>/following` that renders the `UserFollowing` view ( in `accounts/urls.py`)
- [ ] ensure the `UserFollowing` view context contains a list of users that the relevant user is following (given via the `username` in the URL)
- note, it should be possible to get profiles that the user follower via `user.profile.following` or just `profile.following`
- [ ] create a template to render the followed usernames - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)
</issue>
<code>
[start of project/accounts/views.py]
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from accounts.authentication import account_activation_token, send_activation_email
8 from accounts.forms import ProfileEditForm, UserRegistrationForm
9 from accounts.models import Profile
10 from django.conf import settings
11 from django.contrib.auth import get_user_model, login
12 from django.contrib.auth import views as auth_views
13 from django.contrib.auth.decorators import login_required
14 from django.contrib.auth.mixins import LoginRequiredMixin
15 from django.contrib.sites.shortcuts import get_current_site
16 from django.http import HttpResponseRedirect
17 from django.shortcuts import get_object_or_404, redirect
18 from django.template.response import TemplateResponse
19 from django.urls import reverse, reverse_lazy
20 from django.utils.encoding import force_str
21 from django.utils.http import urlsafe_base64_decode
22 from django.views import View
23 from django.views.generic.edit import FormView, UpdateView
24
25
26 class ProfileFollow(LoginRequiredMixin, View):
27 def get(self, request, *args, **kwargs):
28 # Prevent users from following themselves.
29 if request.user.username == kwargs["username"]:
30 pass
31 else:
32 following_profile = Profile.objects.get(user__username=kwargs["username"])
33
34 self.request.user.profile.following.add(following_profile)
35
36 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
37
38 return HttpResponseRedirect(redirect_to)
39
40
41 class ProfileUnfollow(LoginRequiredMixin, View):
42 def get(self, request, *args, **kwargs):
43 # Prevent users from following themselves.
44 if request.user.username == kwargs["username"]:
45 pass
46 else:
47 following_profile = Profile.objects.get(user__username=kwargs["username"])
48
49 self.request.user.profile.following.remove(following_profile)
50
51 redirect_to = reverse("profile", kwargs={"username": kwargs["username"]})
52
53 return HttpResponseRedirect(redirect_to)
54
55
56 class RegisterView(FormView):
57 """
58 A form view that handles user registration.
59 """
60
61 template_name = "accounts/register/register.html"
62 form_class = UserRegistrationForm
63 success_url = "/"
64
65 def _create_user(self, form):
66 username = form.cleaned_data["username"]
67 password = form.cleaned_data["password"]
68 email = form.cleaned_data["email"]
69 user = get_user_model().objects.create_user(username, email, password)
70 return user
71
72 def _send_email(self, user):
73 domain = get_current_site(self.request).domain
74 send_activation_email(user, domain)
75
76 def _login(self, user):
77 login(self.request, user)
78
79 def form_valid(self, form):
80 user = self._create_user(form)
81
82 self._send_email(user)
83 self._login(user)
84
85 return super(RegisterView, self).form_valid(form)
86
87
88 class ProfileActivationView(View):
89 """
90 This shows different views to the user when they are verifying
91 their account based on whether they are already verified or not.
92 """
93
94 def get(self, request, uidb64, token):
95
96 try:
97 uid = force_str(urlsafe_base64_decode(uidb64))
98 user = get_user_model().objects.get(pk=uid)
99
100 except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):
101 user = None
102
103 redirect_link = {"href": "/", "label": "Back to Main"}
104
105 template_var = {
106 "link": redirect_link,
107 }
108
109 if user is not None and account_activation_token.check_token(user, token):
110 profile = user.profile
111
112 if profile.is_verified:
113 template_var["title"] = "Email Already Verified"
114 template_var["content"] = "You have already verified your email."
115 else:
116 profile.is_verified = True
117 profile.save()
118
119 template_var["title"] = "Email Verification Successful"
120 template_var["content"] = "Thank you for verifying your email."
121 else:
122 # invalid link
123 template_var["title"] = "Email Verification Error"
124 template_var["content"] = "Email could not be verified"
125
126 return TemplateResponse(request, "general_message.html", template_var)
127
128
129 class PasswordResetView(auth_views.PasswordResetView):
130 template_name = "accounts/users/password_reset.html"
131 email_template_name = "accounts/users/password_reset_email.html"
132 subject_template_name = "accounts/users/password_reset_subject.txt"
133 from_email = settings.EMAIL_HOST_USER
134 success_url = reverse_lazy("accounts_password_reset_done")
135
136
137 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
138 template_name = "accounts/users/password_reset_done.html"
139
140
141 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
142 template_name = "accounts/users/password_reset_confirm.html"
143 success_url = reverse_lazy("accounts_password_reset_complete")
144
145
146 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
147 template_name = "accounts/users/password_reset_complete.html"
148
149
150 class SettingsView(LoginRequiredMixin, UpdateView):
151 """A form view to edit Profile"""
152
153 login_url = "accounts_login"
154 form_class = ProfileEditForm
155 success_url = reverse_lazy("accounts_settings")
156 template_name = "accounts/settings.html"
157
158 def get_object(self, queryset=None):
159 return Profile.objects.get(user=self.request.user)
160
161 def get_initial(self):
162 profile = Profile.objects.get(user=self.request.user)
163 self.initial.update(
164 {
165 "username": profile.user.username,
166 "email": profile.user.email,
167 "first_name": profile.first_name or None,
168 "last_name": profile.last_name or None,
169 "about_me": profile.about_me or None,
170 "profile_image": profile.profile_image or None,
171 }
172 )
173 return super(SettingsView, self).get_initial()
174
175
176 class UserProfileView(LoginRequiredMixin, View):
177 """A view that shows profile for authorized users"""
178
179 def get(self, request, username=None):
180 profile = get_object_or_404(Profile, user__username=username)
181
182 return TemplateResponse(
183 request,
184 "account.html",
185 {
186 "profile": profile,
187 },
188 )
189
190
191 @login_required
192 def expunge_user(request):
193 """
194 Delete User Information
195 """
196
197 user_model = get_user_model()
198 user = get_object_or_404(user_model, username=request.user.username)
199
200 profile = get_object_or_404(Profile, user=user)
201
202 # Expunge personally identifiable data in user
203 expunged_user_data = {
204 "is_active": False,
205 "email": "",
206 "first_name": "",
207 "last_name": "",
208 "username": f"expunged-{ user.id }",
209 }
210 user.__dict__.update(expunged_user_data)
211 user.save()
212
213 # Expunge personally identifiable data in profile
214 expunged_profile_data = {
215 "first_name": "",
216 "last_name": "",
217 "about_me": "",
218 }
219 profile.__dict__.update(expunged_profile_data)
220 profile.save()
221
222 return redirect("/")
223
[end of project/accounts/views.py]
[start of project/accounts/urls.py]
1 from accounts.views import (
2 PasswordResetCompleteView,
3 PasswordResetConfirmView,
4 PasswordResetDoneView,
5 PasswordResetView,
6 ProfileActivationView,
7 ProfileFollow,
8 ProfileUnfollow,
9 RegisterView,
10 SettingsView,
11 UserProfileView,
12 expunge_user,
13 )
14 from django.contrib.auth import views as auth_views
15 from django.urls import path
16
17 urlpatterns = [
18 path(
19 "login/",
20 auth_views.LoginView.as_view(template_name="accounts/register/login.html"),
21 name="accounts_login",
22 ),
23 path("logout/", auth_views.LogoutView.as_view(), name="accounts_logout"),
24 path("register/", RegisterView.as_view(), name="accounts_register"),
25 path("settings/", SettingsView.as_view(), name="accounts_settings"),
26 path(
27 "activate_account/<uidb64>/<token>/",
28 ProfileActivationView.as_view(),
29 name="accounts_activate",
30 ),
31 path("profile/<str:username>/", UserProfileView.as_view(), name="profile"),
32 path(
33 "profile/<str:username>/follow", ProfileFollow.as_view(), name="profile-follow"
34 ),
35 path(
36 "profile/<str:username>/unfollow",
37 ProfileUnfollow.as_view(),
38 name="profile-unfollow",
39 ),
40 path(
41 "accounts/password_reset/",
42 PasswordResetView.as_view(),
43 name="accounts_password_reset",
44 ),
45 path(
46 "accounts/password_reset_done/",
47 PasswordResetDoneView.as_view(),
48 name="accounts_password_reset_done",
49 ),
50 path(
51 "accounts/password_reset_confirm/<uidb64>/<token>/",
52 PasswordResetConfirmView.as_view(),
53 name="accounts_password_reset_confirm",
54 ),
55 path(
56 "accounts/password_reset_complete/",
57 PasswordResetCompleteView.as_view(),
58 name="accounts_password_reset_complete",
59 ),
60 path("accounts/expunge/", expunge_user, name="expunge_user"),
61 ]
62
[end of project/accounts/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/project/accounts/urls.py b/project/accounts/urls.py
--- a/project/accounts/urls.py
+++ b/project/accounts/urls.py
@@ -8,6 +8,7 @@
ProfileUnfollow,
RegisterView,
SettingsView,
+ ProfileFollowing,
UserProfileView,
expunge_user,
)
@@ -37,6 +38,11 @@
ProfileUnfollow.as_view(),
name="profile-unfollow",
),
+ path(
+ "profile/<str:username>/following",
+ ProfileFollowing.as_view(),
+ name="profile-following",
+ ),
path(
"accounts/password_reset/",
PasswordResetView.as_view(),
diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -188,6 +188,24 @@
)
+class ProfileFollowing(LoginRequiredMixin, View):
+ """
+ A view that shows list of profiles
+ that profile with given username is following
+ """
+
+ def get(self, request, username=None):
+ profile = get_object_or_404(Profile, user__username=username)
+
+ return TemplateResponse(
+ request,
+ "profile_following.html",
+ {
+ "profile": profile,
+ },
+ )
+
+
@login_required
def expunge_user(request):
"""
| {"golden_diff": "diff --git a/project/accounts/urls.py b/project/accounts/urls.py\n--- a/project/accounts/urls.py\n+++ b/project/accounts/urls.py\n@@ -8,6 +8,7 @@\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n+ ProfileFollowing,\n UserProfileView,\n expunge_user,\n )\n@@ -37,6 +38,11 @@\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n+ path(\n+ \"profile/<str:username>/following\",\n+ ProfileFollowing.as_view(),\n+ name=\"profile-following\",\n+ ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\ndiff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -188,6 +188,24 @@\n )\n \n \n+class ProfileFollowing(LoginRequiredMixin, View):\n+ \"\"\"\n+ A view that shows list of profiles\n+ that profile with given username is following\n+ \"\"\"\n+\n+ def get(self, request, username=None):\n+ profile = get_object_or_404(Profile, user__username=username)\n+\n+ return TemplateResponse(\n+ request,\n+ \"profile_following.html\",\n+ {\n+ \"profile\": profile,\n+ },\n+ )\n+\n+\n @login_required\n def expunge_user(request):\n \"\"\"\n", "issue": "Add \"Following\" tab to the user profile view\nWe recently removed some features from the user Profile view and now want to reimplement them using only Django.\r\n\r\nThis task will be to add a \"Following\" tab to the user profile view. The tab should link to a sub-path of the user profile, so it is clear to the viewer that they are viewing users that a particular user is following.\r\n\r\n## Task\r\nAll of these tasks should be done in the `accounts` app.\r\n\r\n- [ ] create a Django view called `UserFollowing` (in `accounts/views.py`)\r\n- [ ] create a URL definition `profile/<str:username>/following` that renders the `UserFollowing` view ( in `accounts/urls.py`)\r\n- [ ] ensure the `UserFollowing` view context contains a list of users that the relevant user is following (given via the `username` in the URL) \r\n - note, it should be possible to get profiles that the user follower via `user.profile.following` or just `profile.following`\r\n- [ ] create a template to render the followed usernames - for now, extending `base.html` so we have consistent branding (in `accounts/templates/accounts/user_civis.html`)\r\n\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UserRegistrationForm\nfrom accounts.models import Profile\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\n\nclass ProfileFollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.add(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass ProfileUnfollow(LoginRequiredMixin, View):\n def get(self, request, *args, **kwargs):\n # Prevent users from following themselves.\n if request.user.username == kwargs[\"username\"]:\n pass\n else:\n following_profile = Profile.objects.get(user__username=kwargs[\"username\"])\n\n self.request.user.profile.following.remove(following_profile)\n\n redirect_to = reverse(\"profile\", kwargs={\"username\": kwargs[\"username\"]})\n\n return HttpResponseRedirect(redirect_to)\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = get_user_model().objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, get_user_model().DoesNotExist):\n user = None\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n\n template_var = {\n \"link\": redirect_link,\n }\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = user.profile\n\n if profile.is_verified:\n template_var[\"title\"] = \"Email Already Verified\"\n template_var[\"content\"] = \"You have already verified your email.\"\n else:\n profile.is_verified = True\n profile.save()\n\n template_var[\"title\"] = \"Email Verification Successful\"\n template_var[\"content\"] = \"Thank you for verifying your email.\"\n else:\n # invalid link\n template_var[\"title\"] = \"Email Verification Error\"\n template_var[\"content\"] = \"Email could not be verified\"\n\n return TemplateResponse(request, \"general_message.html\", template_var)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n \"profile_image\": profile.profile_image or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass UserProfileView(LoginRequiredMixin, View):\n \"\"\"A view that shows profile for authorized users\"\"\"\n\n def get(self, request, username=None):\n profile = get_object_or_404(Profile, user__username=username)\n\n return TemplateResponse(\n request,\n \"account.html\",\n {\n \"profile\": profile,\n },\n )\n\n\n@login_required\ndef expunge_user(request):\n \"\"\"\n Delete User Information\n \"\"\"\n\n user_model = get_user_model()\n user = get_object_or_404(user_model, username=request.user.username)\n\n profile = get_object_or_404(Profile, user=user)\n\n # Expunge personally identifiable data in user\n expunged_user_data = {\n \"is_active\": False,\n \"email\": \"\",\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"username\": f\"expunged-{ user.id }\",\n }\n user.__dict__.update(expunged_user_data)\n user.save()\n\n # Expunge personally identifiable data in profile\n expunged_profile_data = {\n \"first_name\": \"\",\n \"last_name\": \"\",\n \"about_me\": \"\",\n }\n profile.__dict__.update(expunged_profile_data)\n profile.save()\n\n return redirect(\"/\")\n", "path": "project/accounts/views.py"}, {"content": "from accounts.views import (\n PasswordResetCompleteView,\n PasswordResetConfirmView,\n PasswordResetDoneView,\n PasswordResetView,\n ProfileActivationView,\n ProfileFollow,\n ProfileUnfollow,\n RegisterView,\n SettingsView,\n UserProfileView,\n expunge_user,\n)\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"login/\",\n auth_views.LoginView.as_view(template_name=\"accounts/register/login.html\"),\n name=\"accounts_login\",\n ),\n path(\"logout/\", auth_views.LogoutView.as_view(), name=\"accounts_logout\"),\n path(\"register/\", RegisterView.as_view(), name=\"accounts_register\"),\n path(\"settings/\", SettingsView.as_view(), name=\"accounts_settings\"),\n path(\n \"activate_account/<uidb64>/<token>/\",\n ProfileActivationView.as_view(),\n name=\"accounts_activate\",\n ),\n path(\"profile/<str:username>/\", UserProfileView.as_view(), name=\"profile\"),\n path(\n \"profile/<str:username>/follow\", ProfileFollow.as_view(), name=\"profile-follow\"\n ),\n path(\n \"profile/<str:username>/unfollow\",\n ProfileUnfollow.as_view(),\n name=\"profile-unfollow\",\n ),\n path(\n \"accounts/password_reset/\",\n PasswordResetView.as_view(),\n name=\"accounts_password_reset\",\n ),\n path(\n \"accounts/password_reset_done/\",\n PasswordResetDoneView.as_view(),\n name=\"accounts_password_reset_done\",\n ),\n path(\n \"accounts/password_reset_confirm/<uidb64>/<token>/\",\n PasswordResetConfirmView.as_view(),\n name=\"accounts_password_reset_confirm\",\n ),\n path(\n \"accounts/password_reset_complete/\",\n PasswordResetCompleteView.as_view(),\n name=\"accounts_password_reset_complete\",\n ),\n path(\"accounts/expunge/\", expunge_user, name=\"expunge_user\"),\n]\n", "path": "project/accounts/urls.py"}]} | 3,354 | 308 |
gh_patches_debug_19966 | rasdani/github-patches | git_diff | freedomofpress__securedrop-5116 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update UI/docs consistent with "New Identity" behavior change in Tor Browser 9.0
The "New Identity" behavior has changed once again in Tor Browser 9.0. There's now a dedicated button for it:

So we have to update these instructions:
> Thank you for exiting your session! Please select "New Identity" from the onion button in the Tor browser's toolbar to clear all history of your SecureDrop usage from this device.
And also the relevant portion in the [source guide](https://docs.securedrop.org/en/release-1.1.0/source.html).
</issue>
<code>
[start of securedrop/source_app/main.py]
1 import operator
2 import os
3 import io
4
5 from base64 import urlsafe_b64encode
6 from datetime import datetime
7 from flask import (Blueprint, render_template, flash, redirect, url_for, g,
8 session, current_app, request, Markup, abort)
9 from flask_babel import gettext
10 from sqlalchemy.exc import IntegrityError
11
12 import store
13
14 from db import db
15 from models import Source, Submission, Reply, get_one_or_else
16 from source_app.decorators import login_required
17 from source_app.utils import (logged_in, generate_unique_codename,
18 async_genkey, normalize_timestamps,
19 valid_codename, get_entropy_estimate)
20 from source_app.forms import LoginForm
21
22
23 def make_blueprint(config):
24 view = Blueprint('main', __name__)
25
26 @view.route('/')
27 def index():
28 return render_template('index.html')
29
30 @view.route('/generate', methods=('GET', 'POST'))
31 def generate():
32 if logged_in():
33 flash(gettext(
34 "You were redirected because you are already logged in. "
35 "If you want to create a new account, you should log out "
36 "first."),
37 "notification")
38 return redirect(url_for('.lookup'))
39
40 codename = generate_unique_codename(config)
41
42 # Generate a unique id for each browser tab and associate the codename with this id.
43 # This will allow retrieval of the codename displayed in the tab from which the source has
44 # clicked to proceed to /generate (ref. issue #4458)
45 tab_id = urlsafe_b64encode(os.urandom(64)).decode()
46 codenames = session.get('codenames', {})
47 codenames[tab_id] = codename
48 session['codenames'] = codenames
49
50 session['new_user'] = True
51 return render_template('generate.html', codename=codename, tab_id=tab_id)
52
53 @view.route('/org-logo')
54 def select_logo():
55 if os.path.exists(os.path.join(current_app.static_folder, 'i',
56 'custom_logo.png')):
57 return redirect(url_for('static', filename='i/custom_logo.png'))
58 else:
59 return redirect(url_for('static', filename='i/logo.png'))
60
61 @view.route('/create', methods=['POST'])
62 def create():
63 if session.get('logged_in', False):
64 flash(gettext("You are already logged in. Please verify your codename below as it " +
65 "may differ from the one displayed on the previous page."),
66 'notification')
67 else:
68 tab_id = request.form['tab_id']
69 codename = session['codenames'][tab_id]
70 session['codename'] = codename
71
72 del session['codenames']
73
74 filesystem_id = current_app.crypto_util.hash_codename(codename)
75
76 source = Source(filesystem_id, current_app.crypto_util.display_id())
77 db.session.add(source)
78 try:
79 db.session.commit()
80 except IntegrityError as e:
81 db.session.rollback()
82 current_app.logger.error(
83 "Attempt to create a source with duplicate codename: %s" %
84 (e,))
85
86 # Issue 2386: don't log in on duplicates
87 del session['codename']
88
89 # Issue 4361: Delete 'logged_in' if it's in the session
90 try:
91 del session['logged_in']
92 except KeyError:
93 pass
94
95 abort(500)
96 else:
97 os.mkdir(current_app.storage.path(filesystem_id))
98
99 session['logged_in'] = True
100 return redirect(url_for('.lookup'))
101
102 @view.route('/lookup', methods=('GET',))
103 @login_required
104 def lookup():
105 replies = []
106 source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \
107 .filter(Reply.deleted_by_source == False).all() # noqa
108
109 for reply in source_inbox:
110 reply_path = current_app.storage.path(
111 g.filesystem_id,
112 reply.filename,
113 )
114 try:
115 with io.open(reply_path, "rb") as f:
116 contents = f.read()
117 reply_obj = current_app.crypto_util.decrypt(g.codename, contents)
118 reply.decrypted = reply_obj
119 except UnicodeDecodeError:
120 current_app.logger.error("Could not decode reply %s" %
121 reply.filename)
122 else:
123 reply.date = datetime.utcfromtimestamp(
124 os.stat(reply_path).st_mtime)
125 replies.append(reply)
126
127 # Sort the replies by date
128 replies.sort(key=operator.attrgetter('date'), reverse=True)
129
130 # Generate a keypair to encrypt replies from the journalist
131 # Only do this if the journalist has flagged the source as one
132 # that they would like to reply to. (Issue #140.)
133 if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \
134 g.source.flagged:
135 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']
136 async_genkey(current_app.crypto_util,
137 db_uri,
138 g.filesystem_id,
139 g.codename)
140
141 return render_template(
142 'lookup.html',
143 allow_document_uploads=current_app.instance_config.allow_document_uploads,
144 codename=g.codename,
145 replies=replies,
146 flagged=g.source.flagged,
147 new_user=session.get('new_user', None),
148 haskey=current_app.crypto_util.get_fingerprint(
149 g.filesystem_id))
150
151 @view.route('/submit', methods=('POST',))
152 @login_required
153 def submit():
154 allow_document_uploads = current_app.instance_config.allow_document_uploads
155 msg = request.form['msg']
156 fh = None
157 if allow_document_uploads and 'fh' in request.files:
158 fh = request.files['fh']
159
160 # Don't submit anything if it was an "empty" submission. #878
161 if not (msg or fh):
162 if allow_document_uploads:
163 flash(gettext(
164 "You must enter a message or choose a file to submit."),
165 "error")
166 else:
167 flash(gettext("You must enter a message."), "error")
168 return redirect(url_for('main.lookup'))
169
170 fnames = []
171 journalist_filename = g.source.journalist_filename
172 first_submission = g.source.interaction_count == 0
173
174 if msg:
175 g.source.interaction_count += 1
176 fnames.append(
177 current_app.storage.save_message_submission(
178 g.filesystem_id,
179 g.source.interaction_count,
180 journalist_filename,
181 msg))
182 if fh:
183 g.source.interaction_count += 1
184 fnames.append(
185 current_app.storage.save_file_submission(
186 g.filesystem_id,
187 g.source.interaction_count,
188 journalist_filename,
189 fh.filename,
190 fh.stream))
191
192 if first_submission:
193 msg = render_template('first_submission_flashed_message.html')
194 flash(Markup(msg), "success")
195
196 else:
197 if msg and not fh:
198 html_contents = gettext('Thanks! We received your message.')
199 elif not msg and fh:
200 html_contents = gettext('Thanks! We received your document.')
201 else:
202 html_contents = gettext('Thanks! We received your message and '
203 'document.')
204
205 msg = render_template('next_submission_flashed_message.html',
206 html_contents=html_contents)
207 flash(Markup(msg), "success")
208
209 new_submissions = []
210 for fname in fnames:
211 submission = Submission(g.source, fname)
212 db.session.add(submission)
213 new_submissions.append(submission)
214
215 if g.source.pending:
216 g.source.pending = False
217
218 # Generate a keypair now, if there's enough entropy (issue #303)
219 # (gpg reads 300 bytes from /dev/random)
220 entropy_avail = get_entropy_estimate()
221 if entropy_avail >= 2400:
222 db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']
223
224 async_genkey(current_app.crypto_util,
225 db_uri,
226 g.filesystem_id,
227 g.codename)
228 current_app.logger.info("generating key, entropy: {}".format(
229 entropy_avail))
230 else:
231 current_app.logger.warn(
232 "skipping key generation. entropy: {}".format(
233 entropy_avail))
234
235 g.source.last_updated = datetime.utcnow()
236 db.session.commit()
237
238 for sub in new_submissions:
239 store.async_add_checksum_for_file(sub)
240
241 normalize_timestamps(g.filesystem_id)
242
243 return redirect(url_for('main.lookup'))
244
245 @view.route('/delete', methods=('POST',))
246 @login_required
247 def delete():
248 """This deletes the reply from the source's inbox, but preserves
249 the history for journalists such that they can view conversation
250 history.
251 """
252
253 query = Reply.query.filter_by(
254 filename=request.form['reply_filename'],
255 source_id=g.source.id)
256 reply = get_one_or_else(query, current_app.logger, abort)
257 reply.deleted_by_source = True
258 db.session.add(reply)
259 db.session.commit()
260
261 flash(gettext("Reply deleted"), "notification")
262 return redirect(url_for('.lookup'))
263
264 @view.route('/delete-all', methods=('POST',))
265 @login_required
266 def batch_delete():
267 replies = Reply.query.filter(Reply.source_id == g.source.id) \
268 .filter(Reply.deleted_by_source == False).all() # noqa
269 if len(replies) == 0:
270 current_app.logger.error("Found no replies when at least one was "
271 "expected")
272 return redirect(url_for('.lookup'))
273
274 for reply in replies:
275 reply.deleted_by_source = True
276 db.session.add(reply)
277 db.session.commit()
278
279 flash(gettext("All replies have been deleted"), "notification")
280 return redirect(url_for('.lookup'))
281
282 @view.route('/login', methods=('GET', 'POST'))
283 def login():
284 form = LoginForm()
285 if form.validate_on_submit():
286 codename = request.form['codename'].strip()
287 if valid_codename(codename):
288 session.update(codename=codename, logged_in=True)
289 return redirect(url_for('.lookup', from_login='1'))
290 else:
291 current_app.logger.info(
292 "Login failed for invalid codename")
293 flash(gettext("Sorry, that is not a recognized codename."),
294 "error")
295 return render_template('login.html', form=form)
296
297 @view.route('/logout')
298 def logout():
299 if logged_in():
300 msg = render_template('logout_flashed_message.html')
301
302 # Clear the session after we render the message so it's localized
303 # If a user specified a locale, save it and restore it
304 user_locale = g.locale
305 session.clear()
306 session['locale'] = user_locale
307
308 flash(Markup(msg), "important hide-if-not-tor-browser")
309 return redirect(url_for('.index'))
310
311 return view
312
[end of securedrop/source_app/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py
--- a/securedrop/source_app/main.py
+++ b/securedrop/source_app/main.py
@@ -296,8 +296,12 @@
@view.route('/logout')
def logout():
+ """
+ If a user is logged in, show them a logout page that prompts them to
+ click the New Identity button in Tor Browser to complete their session.
+ Otherwise redirect to the main Source Interface page.
+ """
if logged_in():
- msg = render_template('logout_flashed_message.html')
# Clear the session after we render the message so it's localized
# If a user specified a locale, save it and restore it
@@ -305,7 +309,8 @@
session.clear()
session['locale'] = user_locale
- flash(Markup(msg), "important hide-if-not-tor-browser")
- return redirect(url_for('.index'))
+ return render_template('logout.html')
+ else:
+ return redirect(url_for('.index'))
return view
| {"golden_diff": "diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py\n--- a/securedrop/source_app/main.py\n+++ b/securedrop/source_app/main.py\n@@ -296,8 +296,12 @@\n \n @view.route('/logout')\n def logout():\n+ \"\"\"\n+ If a user is logged in, show them a logout page that prompts them to\n+ click the New Identity button in Tor Browser to complete their session.\n+ Otherwise redirect to the main Source Interface page.\n+ \"\"\"\n if logged_in():\n- msg = render_template('logout_flashed_message.html')\n \n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n@@ -305,7 +309,8 @@\n session.clear()\n session['locale'] = user_locale\n \n- flash(Markup(msg), \"important hide-if-not-tor-browser\")\n- return redirect(url_for('.index'))\n+ return render_template('logout.html')\n+ else:\n+ return redirect(url_for('.index'))\n \n return view\n", "issue": "Update UI/docs consistent with \"New Identity\" behavior change in Tor Browser 9.0\nThe \"New Identity\" behavior has changed once again in Tor Browser 9.0. There's now a dedicated button for it:\r\n\r\n\r\nSo we have to update these instructions:\r\n\r\n> Thank you for exiting your session! Please select \"New Identity\" from the onion button in the Tor browser's toolbar to clear all history of your SecureDrop usage from this device.\r\n\r\nAnd also the relevant portion in the [source guide](https://docs.securedrop.org/en/release-1.1.0/source.html).\n", "before_files": [{"content": "import operator\nimport os\nimport io\n\nfrom base64 import urlsafe_b64encode\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n\n # Generate a unique id for each browser tab and associate the codename with this id.\n # This will allow retrieval of the codename displayed in the tab from which the source has\n # clicked to proceed to /generate (ref. issue #4458)\n tab_id = urlsafe_b64encode(os.urandom(64)).decode()\n codenames = session.get('codenames', {})\n codenames[tab_id] = codename\n session['codenames'] = codenames\n\n session['new_user'] = True\n return render_template('generate.html', codename=codename, tab_id=tab_id)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n if session.get('logged_in', False):\n flash(gettext(\"You are already logged in. Please verify your codename below as it \" +\n \"may differ from the one displayed on the previous page.\"),\n 'notification')\n else:\n tab_id = request.form['tab_id']\n codename = session['codenames'][tab_id]\n session['codename'] = codename\n\n del session['codenames']\n\n filesystem_id = current_app.crypto_util.hash_codename(codename)\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.get_fingerprint(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n allow_document_uploads=current_app.instance_config.allow_document_uploads,\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.get_fingerprint(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n allow_document_uploads = current_app.instance_config.allow_document_uploads\n msg = request.form['msg']\n fh = None\n if allow_document_uploads and 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n if allow_document_uploads:\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n else:\n flash(gettext(\"You must enter a message.\"), \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n msg = render_template('logout_flashed_message.html')\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n", "path": "securedrop/source_app/main.py"}]} | 3,828 | 247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.