problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_3993
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-382
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NY: Committees don't have members
Taken from bug #195
http://openstates.org/browse/ny/committees/
http://www.nysenate.gov/committee/state-native-american-relations
http://assembly.state.ny.us/comm/?sec=mem&id=60
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/ny/committees.py`
Content:
```
1 import re
2
3 from billy.scrape import NoDataForPeriod
4 from billy.scrape.committees import CommitteeScraper, Committee
5
6 import lxml.html
7
8
9 def parse_name(name):
10 """
11 Split a committee membership string into name and role.
12
13 >>> parse_name('Felix Ortiz')
14 ('Felix Ortiz', 'member')
15 >>> parse_name('Felix Ortiz (Chair)')
16 ('Felix Ortiz', 'chair')
17 >>> parse_name('Hon. Felix Ortiz, Co-Chair')
18 ('Felix Ortiz', 'co-chair')
19 >>> parse_name('Owen H.\\r\\nJohnson (Vice Chairperson)')
20 ('Owen H. Johnson', 'vice chairperson')
21 """
22 name = re.sub(r'^(Hon\.|Assemblyman|Assemblywoman)\s+', '', name)
23 name = re.sub(r'\s+', ' ', name)
24
25 roles = ["Chairwoman", "Chairperson", "Chair", "Secretary", "Treasurer",
26 "Parliamentarian", "Chaplain"]
27 match = re.match(
28 r'([^(]+),? \(?((Co|Vice)?-?\s*(%s))\)?' % '|'.join(roles),
29 name)
30
31 if match:
32 name = match.group(1).strip(' ,')
33 role = match.group(2).lower()
34 return (name, role)
35 return (name, 'member')
36
37
38 class NYCommitteeScraper(CommitteeScraper):
39 state = "ny"
40 latest_only = True
41
42 def scrape(self, chamber, term):
43 getattr(self, 'scrape_' + chamber)()
44
45 def scrape_lower(self, only_names=None):
46 committees = []
47 url = "http://assembly.state.ny.us/comm/"
48 page = self.urlopen(url)
49 page = lxml.html.fromstring(page)
50 page.make_links_absolute(url)
51
52 for link in page.xpath("//a[contains(@href, 'sec=mem')]"):
53 name = link.xpath("string(../strong)").strip()
54 if 'Caucus' in name:
55 continue
56
57 url = link.attrib['href']
58
59 committees.append(name)
60
61 self.scrape_lower_committee(name, url)
62 return committees
63
64 def scrape_lower_committee(self, name, url):
65 page = self.urlopen(url)
66 page = lxml.html.fromstring(page)
67
68 comm = Committee('lower', name)
69 comm.add_source(url)
70
71 for link in page.xpath("//a[contains(@href, 'mem?ad')]"):
72 member = link.text.strip()
73 member = re.sub(r'\s+', ' ', member)
74
75 name, role = parse_name(member)
76 comm.add_member(name, role)
77
78 self.save_committee(comm)
79
80 def scrape_upper(self):
81 committees = []
82 url = "http://www.nysenate.gov/committees"
83 page = self.urlopen(url)
84 page = lxml.html.fromstring(page)
85 page.make_links_absolute(url)
86
87 for link in page.xpath("//a[contains(@href, '/committee/')]"):
88 name = link.text.strip()
89
90 if name == 'New York State Conference of Black Senators':
91 # stop scraping once we reach the caucuses
92 break
93
94 committees.append(name)
95 self.scrape_upper_committee(name, link.attrib['href'])
96 return committees
97
98 def scrape_upper_committee(self, name, url):
99 page = self.urlopen(url)
100 page = lxml.html.fromstring(page)
101
102 comm = Committee('upper', name)
103 comm.add_source(url)
104
105 member_div = page.xpath("//div[@class = 'committee-members']")[0]
106
107 seen = set()
108 for link in member_div.xpath(".//a"):
109 if not link.text:
110 continue
111
112 member = link.text.strip()
113
114 next_elem = link.getnext()
115 if (next_elem is not None and
116 next_elem.tag == 'a' and
117 next_elem.attrib['href'] == link.attrib['href']):
118 # Sometimes NY is cool and splits names across a
119 # couple links
120 member = "%s %s" % (member, next_elem.text.strip())
121
122 member = re.sub(r'\s+', ' ', member)
123
124 if member in seen or not member:
125 continue
126 seen.add(member)
127
128 name, role = parse_name(member)
129 comm.add_member(name, role)
130
131 self.save_committee(comm)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/ny/committees.py b/openstates/ny/committees.py
--- a/openstates/ny/committees.py
+++ b/openstates/ny/committees.py
@@ -68,7 +68,7 @@
comm = Committee('lower', name)
comm.add_source(url)
- for link in page.xpath("//a[contains(@href, 'mem?ad')]"):
+ for link in page.xpath("//div[@class='commlinks']//a[contains(@href, 'mem')]"):
member = link.text.strip()
member = re.sub(r'\s+', ' ', member)
|
{"golden_diff": "diff --git a/openstates/ny/committees.py b/openstates/ny/committees.py\n--- a/openstates/ny/committees.py\n+++ b/openstates/ny/committees.py\n@@ -68,7 +68,7 @@\n comm = Committee('lower', name)\n comm.add_source(url)\n \n- for link in page.xpath(\"//a[contains(@href, 'mem?ad')]\"):\n+ for link in page.xpath(\"//div[@class='commlinks']//a[contains(@href, 'mem')]\"):\n member = link.text.strip()\n member = re.sub(r'\\s+', ' ', member)\n", "issue": "NY: Committees don't have members\nTaken from bug #195\n\nhttp://openstates.org/browse/ny/committees/\nhttp://www.nysenate.gov/committee/state-native-american-relations\nhttp://assembly.state.ny.us/comm/?sec=mem&id=60\n\n", "before_files": [{"content": "import re\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\ndef parse_name(name):\n \"\"\"\n Split a committee membership string into name and role.\n\n >>> parse_name('Felix Ortiz')\n ('Felix Ortiz', 'member')\n >>> parse_name('Felix Ortiz (Chair)')\n ('Felix Ortiz', 'chair')\n >>> parse_name('Hon. Felix Ortiz, Co-Chair')\n ('Felix Ortiz', 'co-chair')\n >>> parse_name('Owen H.\\\\r\\\\nJohnson (Vice Chairperson)')\n ('Owen H. Johnson', 'vice chairperson')\n \"\"\"\n name = re.sub(r'^(Hon\\.|Assemblyman|Assemblywoman)\\s+', '', name)\n name = re.sub(r'\\s+', ' ', name)\n\n roles = [\"Chairwoman\", \"Chairperson\", \"Chair\", \"Secretary\", \"Treasurer\",\n \"Parliamentarian\", \"Chaplain\"]\n match = re.match(\n r'([^(]+),? \\(?((Co|Vice)?-?\\s*(%s))\\)?' % '|'.join(roles),\n name)\n\n if match:\n name = match.group(1).strip(' ,')\n role = match.group(2).lower()\n return (name, role)\n return (name, 'member')\n\n\nclass NYCommitteeScraper(CommitteeScraper):\n state = \"ny\"\n latest_only = True\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber)()\n\n def scrape_lower(self, only_names=None):\n committees = []\n url = \"http://assembly.state.ny.us/comm/\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'sec=mem')]\"):\n name = link.xpath(\"string(../strong)\").strip()\n if 'Caucus' in name:\n continue\n\n url = link.attrib['href']\n\n committees.append(name)\n\n self.scrape_lower_committee(name, url)\n return committees\n\n def scrape_lower_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('lower', name)\n comm.add_source(url)\n\n for link in page.xpath(\"//a[contains(@href, 'mem?ad')]\"):\n member = link.text.strip()\n member = re.sub(r'\\s+', ' ', member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n\n def scrape_upper(self):\n committees = []\n url = \"http://www.nysenate.gov/committees\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, '/committee/')]\"):\n name = link.text.strip()\n\n if name == 'New York State Conference of Black Senators':\n # stop scraping once we reach the caucuses\n break\n\n committees.append(name)\n self.scrape_upper_committee(name, link.attrib['href'])\n return committees\n\n def scrape_upper_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('upper', name)\n comm.add_source(url)\n\n member_div = page.xpath(\"//div[@class = 'committee-members']\")[0]\n\n seen = set()\n for link in member_div.xpath(\".//a\"):\n if not link.text:\n continue\n\n member = link.text.strip()\n\n next_elem = link.getnext()\n if (next_elem is not None and\n next_elem.tag == 'a' and\n next_elem.attrib['href'] == link.attrib['href']):\n # Sometimes NY is cool and splits names across a\n # couple links\n member = \"%s %s\" % (member, next_elem.text.strip())\n\n member = re.sub(r'\\s+', ' ', member)\n\n if member in seen or not member:\n continue\n seen.add(member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n", "path": "openstates/ny/committees.py"}], "after_files": [{"content": "import re\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\ndef parse_name(name):\n \"\"\"\n Split a committee membership string into name and role.\n\n >>> parse_name('Felix Ortiz')\n ('Felix Ortiz', 'member')\n >>> parse_name('Felix Ortiz (Chair)')\n ('Felix Ortiz', 'chair')\n >>> parse_name('Hon. Felix Ortiz, Co-Chair')\n ('Felix Ortiz', 'co-chair')\n >>> parse_name('Owen H.\\\\r\\\\nJohnson (Vice Chairperson)')\n ('Owen H. Johnson', 'vice chairperson')\n \"\"\"\n name = re.sub(r'^(Hon\\.|Assemblyman|Assemblywoman)\\s+', '', name)\n name = re.sub(r'\\s+', ' ', name)\n\n roles = [\"Chairwoman\", \"Chairperson\", \"Chair\", \"Secretary\", \"Treasurer\",\n \"Parliamentarian\", \"Chaplain\"]\n match = re.match(\n r'([^(]+),? \\(?((Co|Vice)?-?\\s*(%s))\\)?' % '|'.join(roles),\n name)\n\n if match:\n name = match.group(1).strip(' ,')\n role = match.group(2).lower()\n return (name, role)\n return (name, 'member')\n\n\nclass NYCommitteeScraper(CommitteeScraper):\n state = \"ny\"\n latest_only = True\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber)()\n\n def scrape_lower(self, only_names=None):\n committees = []\n url = \"http://assembly.state.ny.us/comm/\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'sec=mem')]\"):\n name = link.xpath(\"string(../strong)\").strip()\n if 'Caucus' in name:\n continue\n\n url = link.attrib['href']\n\n committees.append(name)\n\n self.scrape_lower_committee(name, url)\n return committees\n\n def scrape_lower_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('lower', name)\n comm.add_source(url)\n\n for link in page.xpath(\"//div[@class='commlinks']//a[contains(@href, 'mem')]\"):\n member = link.text.strip()\n member = re.sub(r'\\s+', ' ', member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n\n def scrape_upper(self):\n committees = []\n url = \"http://www.nysenate.gov/committees\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, '/committee/')]\"):\n name = link.text.strip()\n\n if name == 'New York State Conference of Black Senators':\n # stop scraping once we reach the caucuses\n break\n\n committees.append(name)\n self.scrape_upper_committee(name, link.attrib['href'])\n return committees\n\n def scrape_upper_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('upper', name)\n comm.add_source(url)\n\n member_div = page.xpath(\"//div[@class = 'committee-members']\")[0]\n\n seen = set()\n for link in member_div.xpath(\".//a\"):\n if not link.text:\n continue\n\n member = link.text.strip()\n\n next_elem = link.getnext()\n if (next_elem is not None and\n next_elem.tag == 'a' and\n next_elem.attrib['href'] == link.attrib['href']):\n # Sometimes NY is cool and splits names across a\n # couple links\n member = \"%s %s\" % (member, next_elem.text.strip())\n\n member = re.sub(r'\\s+', ' ', member)\n\n if member in seen or not member:\n continue\n seen.add(member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n", "path": "openstates/ny/committees.py"}]}
| 1,560 | 137 |
gh_patches_debug_20787
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-2185
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
inputMultiple() displays a malformed empty status box
**Small description**
`search-keys` and `search-col` displays an empty box while waiting for the user to input a regex at the prompt. The box is shown where the usual status messages are shown.
**Expected result**
No box should be displayed.
**Actual result with screenshot**

For comparison, here is what the normal status messages look like:

**Steps to reproduce with sample data and a .vd**
Open any sheet and hit `r` or `/`.
**Additional context**
saul.pw/VisiData v3.0dev
Python 3.10.12
Ubuntu 22.04.3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2
3 from setuptools import setup
4 # tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96
5 #from visidata import __version__
6 __version__ = '3.0dev'
7
8 setup(name='visidata',
9 version=__version__,
10 description='terminal interface for exploring and arranging tabular data',
11 long_description=open('README.md').read(),
12 long_description_content_type='text/markdown',
13 author='Saul Pwanson',
14 python_requires='>=3.7',
15 author_email='[email protected]',
16 url='https://visidata.org',
17 download_url='https://github.com/saulpw/visidata/tarball/' + __version__,
18 scripts=['bin/vd'],
19 entry_points={'console_scripts': [
20 'visidata=visidata.main:vd_cli'
21 ],
22 },
23 py_modules=['visidata'],
24 install_requires=[
25 'python-dateutil',
26 'windows-curses != 2.3.1; platform_system == "Windows"', #1841
27 'importlib-metadata >= 3.6',
28 'importlib_resources; python_version<"3.9"'
29 ],
30 packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],
31 data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],
32 package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},
33 license='GPLv3',
34 classifiers=[
35 'Development Status :: 5 - Production/Stable',
36 'Environment :: Console',
37 'Environment :: Console :: Curses',
38 'Intended Audience :: Developers',
39 'Intended Audience :: Science/Research',
40 'Intended Audience :: System Administrators',
41 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
42 'Operating System :: OS Independent',
43 'Programming Language :: Python :: 3',
44 'Topic :: Database :: Front-Ends',
45 'Topic :: Scientific/Engineering',
46 'Topic :: Office/Business :: Financial :: Spreadsheet',
47 'Topic :: Scientific/Engineering :: Visualization',
48 'Topic :: Utilities',
49 ],
50 keywords=('console tabular data spreadsheet terminal viewer textpunk'
51 'curses csv hdf5 h5 xlsx excel tsv'),
52 )
53
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@
],
packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],
data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],
- package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},
+ package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw', 'regex.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},
license='GPLv3',
classifiers=[
'Development Status :: 5 - Production/Stable',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,7 @@\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],\n- package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n+ package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw', 'regex.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n license='GPLv3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n", "issue": "inputMultiple() displays a malformed empty status box\n**Small description**\r\n`search-keys` and `search-col` displays an empty box while waiting for the user to input a regex at the prompt. The box is shown where the usual status messages are shown.\r\n\r\n**Expected result**\r\nNo box should be displayed.\r\n\r\n**Actual result with screenshot**\r\n\r\n\r\nFor comparison, here is what the normal status messages look like:\r\n\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\nOpen any sheet and hit `r` or `/`.\r\n\r\n**Additional context**\r\nsaul.pw/VisiData v3.0dev\r\nPython 3.10.12\r\nUbuntu 22.04.3\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom setuptools import setup\n# tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96\n#from visidata import __version__\n__version__ = '3.0dev'\n\nsetup(name='visidata',\n version=__version__,\n description='terminal interface for exploring and arranging tabular data',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='Saul Pwanson',\n python_requires='>=3.7',\n author_email='[email protected]',\n url='https://visidata.org',\n download_url='https://github.com/saulpw/visidata/tarball/' + __version__,\n scripts=['bin/vd'],\n entry_points={'console_scripts': [\n 'visidata=visidata.main:vd_cli'\n ],\n },\n py_modules=['visidata'],\n install_requires=[\n 'python-dateutil',\n 'windows-curses != 2.3.1; platform_system == \"Windows\"', #1841\n 'importlib-metadata >= 3.6',\n 'importlib_resources; python_version<\"3.9\"'\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],\n package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n license='GPLv3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Utilities',\n ],\n keywords=('console tabular data spreadsheet terminal viewer textpunk'\n 'curses csv hdf5 h5 xlsx excel tsv'),\n )\n\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nfrom setuptools import setup\n# tox can't actually run python3 setup.py: https://github.com/tox-dev/tox/issues/96\n#from visidata import __version__\n__version__ = '3.0dev'\n\nsetup(name='visidata',\n version=__version__,\n description='terminal interface for exploring and arranging tabular data',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='Saul Pwanson',\n python_requires='>=3.7',\n author_email='[email protected]',\n url='https://visidata.org',\n download_url='https://github.com/saulpw/visidata/tarball/' + __version__,\n scripts=['bin/vd'],\n entry_points={'console_scripts': [\n 'visidata=visidata.main:vd_cli'\n ],\n },\n py_modules=['visidata'],\n install_requires=[\n 'python-dateutil',\n 'windows-curses != 2.3.1; platform_system == \"Windows\"', #1841\n 'importlib-metadata >= 3.6',\n 'importlib_resources; python_version<\"3.9\"'\n ],\n packages=['visidata', 'visidata.loaders', 'visidata.vendor', 'visidata.tests', 'visidata.ddw', 'visidata.man', 'visidata.themes', 'visidata.features', 'visidata.experimental', 'visidata.apps', 'visidata.apps.vgit', 'visidata.apps.vdsql', 'visidata.desktop'],\n data_files=[('share/man/man1', ['visidata/man/vd.1', 'visidata/man/visidata.1']), ('share/applications', ['visidata/desktop/visidata.desktop'])],\n package_data={'visidata.man': ['vd.1', 'vd.txt'], 'visidata.ddw': ['input.ddw', 'regex.ddw'], 'visidata.tests': ['sample.tsv'], 'visidata.desktop': ['visidata.desktop']},\n license='GPLv3',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Console :: Curses',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Utilities',\n ],\n keywords=('console tabular data spreadsheet terminal viewer textpunk'\n 'curses csv hdf5 h5 xlsx excel tsv'),\n )\n\n", "path": "setup.py"}]}
| 1,221 | 275 |
gh_patches_debug_19917
|
rasdani/github-patches
|
git_diff
|
sbi-dev__sbi-646
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SNLE + multiple independent prior + MCMC with num_workers > 1 throws error
SNLE:
- multiple independent prior + num_workers=2 leads to an error (ValueError: Expected value argument to be within the support of the distribution Uniform)
- no error if either num_workers=1 or using a BoxUniform prior
SNPE:
- seems to work
```
from sbi.inference.snpe import SNPE_A
from sbi.inference.snle import SNLE_A
from sbi.utils import BoxUniform
from sbi.inference.posteriors import MCMCPosterior
from sbi.inference.potentials import posterior_estimator_based_potential, likelihood_estimator_based_potential
from sbi.utils.user_input_checks import process_prior
from sbi.analysis.sbc import run_sbc
import torch
default_params = torch.rand(size=(4, 3),dtype=torch.float32)
default_obs = torch.rand(size=(4, 1), dtype=torch.float32)
# multiple independent prior
prior = [torch.distributions.Uniform(torch.FloatTensor([0]), torch.FloatTensor([20])),
torch.distributions.Uniform(torch.FloatTensor([-10]), torch.FloatTensor([10])),
torch.distributions.Uniform(torch.FloatTensor([0.5]), torch.FloatTensor([3]))]
# box uniform prior
# prior = BoxUniform(torch.Tensor([0, -10, 0.5]), torch.Tensor([20, 10, 3]))
prior, _, _ = process_prior(prior)
# inference = SNPE_A()
# density_estimator = inference.append_simulations(default_params, default_obs).train()
# potential_fn, theta_transform = posterior_estimator_based_potential(density_estimator, prior, default_obs[0])
inference = SNLE_A()
density_estimator = inference.append_simulations(default_params, default_obs).train()
potential_fn, theta_transform = likelihood_estimator_based_potential(density_estimator, prior, default_obs[0])
posterior = MCMCPosterior(potential_fn, proposal=prior, theta_transform=theta_transform)
# this line throws an error
ranks, dap_samples = run_sbc(default_params, default_obs, posterior, num_posterior_samples=10, num_workers=2, sbc_batch_size=2)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sbi/samplers/mcmc/init_strategy.py`
Content:
```
1 # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed
2 # under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.
3
4 from typing import Any, Callable
5
6 import torch
7 import torch.distributions.transforms as torch_tf
8 from torch import Tensor
9
10
11 class IterateParameters:
12 """Iterates through parameters by rows"""
13
14 def __init__(self, parameters: torch.Tensor, **kwargs):
15 self.iter = self._make_iterator(parameters)
16
17 @staticmethod
18 def _make_iterator(t):
19 for i in range(t.shape[0]):
20 yield t[i, :].reshape(1, -1)
21
22 def __call__(self):
23 return next(self.iter)
24
25
26 def proposal_init(
27 proposal: Any, transform: torch_tf.Transform, **kwargs: Any
28 ) -> Tensor:
29 """Return a sample from the proposal."""
30 prior_samples = proposal.sample((1,)).detach()
31 transformed_prior_samples = transform(prior_samples)
32 return transformed_prior_samples
33
34
35 def sir(
36 proposal: Any,
37 potential_fn: Callable,
38 transform: torch_tf.Transform,
39 sir_num_batches: int = 10,
40 sir_batch_size: int = 1000,
41 **kwargs: Any,
42 ) -> Tensor:
43 r"""Return a sample obtained by sequential importance reweighting.
44
45 See Rubin 1988, "Using the sir algorithm to simulate posterior distributions."
46
47 This function can also do `SIR` on the conditional posterior
48 $p(\theta_i|\theta_j, x)$ when a `condition` and `dims_to_sample` are passed.
49
50 Args:
51 proposal: Proposal distribution, candidate samples are drawn from it.
52 potential_fn: Potential function that the candidate samples are weighted with.
53 Note that the function needs to return log probabilities.
54 sir_num_batches: Number of candidate batches drawn.
55 sir_batch_size: Batch size used for evaluating candidates.
56
57 Returns:
58 A single sample.
59 """
60
61 with torch.set_grad_enabled(False):
62 log_weights = []
63 init_param_candidates = []
64 for i in range(sir_num_batches):
65 batch_draws = proposal.sample((sir_batch_size,)).detach()
66 transformed_batch_draws = transform(batch_draws)
67 init_param_candidates.append(transformed_batch_draws)
68 log_weights.append(potential_fn(transformed_batch_draws).detach())
69 log_weights = torch.cat(log_weights)
70 init_param_candidates = torch.cat(init_param_candidates)
71
72 # Norm weights in log space
73 log_weights -= torch.logsumexp(log_weights, dim=0)
74 probs = torch.exp(log_weights.view(-1))
75 probs[torch.isnan(probs)] = 0.0
76 probs[torch.isinf(probs)] = 0.0
77 probs /= probs.sum()
78
79 idxs = torch.multinomial(probs, 1, replacement=False)
80 return init_param_candidates[idxs, :]
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sbi/samplers/mcmc/init_strategy.py b/sbi/samplers/mcmc/init_strategy.py
--- a/sbi/samplers/mcmc/init_strategy.py
+++ b/sbi/samplers/mcmc/init_strategy.py
@@ -63,9 +63,8 @@
init_param_candidates = []
for i in range(sir_num_batches):
batch_draws = proposal.sample((sir_batch_size,)).detach()
- transformed_batch_draws = transform(batch_draws)
- init_param_candidates.append(transformed_batch_draws)
- log_weights.append(potential_fn(transformed_batch_draws).detach())
+ init_param_candidates.append(batch_draws)
+ log_weights.append(potential_fn(batch_draws).detach())
log_weights = torch.cat(log_weights)
init_param_candidates = torch.cat(init_param_candidates)
@@ -77,4 +76,5 @@
probs /= probs.sum()
idxs = torch.multinomial(probs, 1, replacement=False)
- return init_param_candidates[idxs, :]
+ # Return transformed sample.
+ return transform(init_param_candidates[idxs, :])
|
{"golden_diff": "diff --git a/sbi/samplers/mcmc/init_strategy.py b/sbi/samplers/mcmc/init_strategy.py\n--- a/sbi/samplers/mcmc/init_strategy.py\n+++ b/sbi/samplers/mcmc/init_strategy.py\n@@ -63,9 +63,8 @@\n init_param_candidates = []\n for i in range(sir_num_batches):\n batch_draws = proposal.sample((sir_batch_size,)).detach()\n- transformed_batch_draws = transform(batch_draws)\n- init_param_candidates.append(transformed_batch_draws)\n- log_weights.append(potential_fn(transformed_batch_draws).detach())\n+ init_param_candidates.append(batch_draws)\n+ log_weights.append(potential_fn(batch_draws).detach())\n log_weights = torch.cat(log_weights)\n init_param_candidates = torch.cat(init_param_candidates)\n \n@@ -77,4 +76,5 @@\n probs /= probs.sum()\n \n idxs = torch.multinomial(probs, 1, replacement=False)\n- return init_param_candidates[idxs, :]\n+ # Return transformed sample.\n+ return transform(init_param_candidates[idxs, :])\n", "issue": "SNLE + multiple independent prior + MCMC with num_workers > 1 throws error\nSNLE:\r\n- multiple independent prior + num_workers=2 leads to an error (ValueError: Expected value argument to be within the support of the distribution Uniform)\r\n- no error if either num_workers=1 or using a BoxUniform prior\r\n\r\nSNPE:\r\n- seems to work\r\n\r\n```\r\nfrom sbi.inference.snpe import SNPE_A\r\nfrom sbi.inference.snle import SNLE_A\r\nfrom sbi.utils import BoxUniform\r\nfrom sbi.inference.posteriors import MCMCPosterior\r\nfrom sbi.inference.potentials import posterior_estimator_based_potential, likelihood_estimator_based_potential\r\nfrom sbi.utils.user_input_checks import process_prior\r\nfrom sbi.analysis.sbc import run_sbc\r\nimport torch\r\n\r\ndefault_params = torch.rand(size=(4, 3),dtype=torch.float32)\r\ndefault_obs = torch.rand(size=(4, 1), dtype=torch.float32)\r\n\r\n# multiple independent prior\r\nprior = [torch.distributions.Uniform(torch.FloatTensor([0]), torch.FloatTensor([20])),\r\n torch.distributions.Uniform(torch.FloatTensor([-10]), torch.FloatTensor([10])),\r\n torch.distributions.Uniform(torch.FloatTensor([0.5]), torch.FloatTensor([3]))]\r\n# box uniform prior\r\n# prior = BoxUniform(torch.Tensor([0, -10, 0.5]), torch.Tensor([20, 10, 3]))\r\nprior, _, _ = process_prior(prior)\r\n\r\n# inference = SNPE_A()\r\n# density_estimator = inference.append_simulations(default_params, default_obs).train()\r\n# potential_fn, theta_transform = posterior_estimator_based_potential(density_estimator, prior, default_obs[0])\r\n\r\ninference = SNLE_A()\r\ndensity_estimator = inference.append_simulations(default_params, default_obs).train()\r\npotential_fn, theta_transform = likelihood_estimator_based_potential(density_estimator, prior, default_obs[0])\r\n\r\nposterior = MCMCPosterior(potential_fn, proposal=prior, theta_transform=theta_transform)\r\n\r\n# this line throws an error\r\nranks, dap_samples = run_sbc(default_params, default_obs, posterior, num_posterior_samples=10, num_workers=2, sbc_batch_size=2)\r\n```\r\n\n", "before_files": [{"content": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n\nfrom typing import Any, Callable\n\nimport torch\nimport torch.distributions.transforms as torch_tf\nfrom torch import Tensor\n\n\nclass IterateParameters:\n \"\"\"Iterates through parameters by rows\"\"\"\n\n def __init__(self, parameters: torch.Tensor, **kwargs):\n self.iter = self._make_iterator(parameters)\n\n @staticmethod\n def _make_iterator(t):\n for i in range(t.shape[0]):\n yield t[i, :].reshape(1, -1)\n\n def __call__(self):\n return next(self.iter)\n\n\ndef proposal_init(\n proposal: Any, transform: torch_tf.Transform, **kwargs: Any\n) -> Tensor:\n \"\"\"Return a sample from the proposal.\"\"\"\n prior_samples = proposal.sample((1,)).detach()\n transformed_prior_samples = transform(prior_samples)\n return transformed_prior_samples\n\n\ndef sir(\n proposal: Any,\n potential_fn: Callable,\n transform: torch_tf.Transform,\n sir_num_batches: int = 10,\n sir_batch_size: int = 1000,\n **kwargs: Any,\n) -> Tensor:\n r\"\"\"Return a sample obtained by sequential importance reweighting.\n\n See Rubin 1988, \"Using the sir algorithm to simulate posterior distributions.\"\n\n This function can also do `SIR` on the conditional posterior\n $p(\\theta_i|\\theta_j, x)$ when a `condition` and `dims_to_sample` are passed.\n\n Args:\n proposal: Proposal distribution, candidate samples are drawn from it.\n potential_fn: Potential function that the candidate samples are weighted with.\n Note that the function needs to return log probabilities.\n sir_num_batches: Number of candidate batches drawn.\n sir_batch_size: Batch size used for evaluating candidates.\n\n Returns:\n A single sample.\n \"\"\"\n\n with torch.set_grad_enabled(False):\n log_weights = []\n init_param_candidates = []\n for i in range(sir_num_batches):\n batch_draws = proposal.sample((sir_batch_size,)).detach()\n transformed_batch_draws = transform(batch_draws)\n init_param_candidates.append(transformed_batch_draws)\n log_weights.append(potential_fn(transformed_batch_draws).detach())\n log_weights = torch.cat(log_weights)\n init_param_candidates = torch.cat(init_param_candidates)\n\n # Norm weights in log space\n log_weights -= torch.logsumexp(log_weights, dim=0)\n probs = torch.exp(log_weights.view(-1))\n probs[torch.isnan(probs)] = 0.0\n probs[torch.isinf(probs)] = 0.0\n probs /= probs.sum()\n\n idxs = torch.multinomial(probs, 1, replacement=False)\n return init_param_candidates[idxs, :]\n", "path": "sbi/samplers/mcmc/init_strategy.py"}], "after_files": [{"content": "# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n\nfrom typing import Any, Callable\n\nimport torch\nimport torch.distributions.transforms as torch_tf\nfrom torch import Tensor\n\n\nclass IterateParameters:\n \"\"\"Iterates through parameters by rows\"\"\"\n\n def __init__(self, parameters: torch.Tensor, **kwargs):\n self.iter = self._make_iterator(parameters)\n\n @staticmethod\n def _make_iterator(t):\n for i in range(t.shape[0]):\n yield t[i, :].reshape(1, -1)\n\n def __call__(self):\n return next(self.iter)\n\n\ndef proposal_init(\n proposal: Any, transform: torch_tf.Transform, **kwargs: Any\n) -> Tensor:\n \"\"\"Return a sample from the proposal.\"\"\"\n prior_samples = proposal.sample((1,)).detach()\n transformed_prior_samples = transform(prior_samples)\n return transformed_prior_samples\n\n\ndef sir(\n proposal: Any,\n potential_fn: Callable,\n transform: torch_tf.Transform,\n sir_num_batches: int = 10,\n sir_batch_size: int = 1000,\n **kwargs: Any,\n) -> Tensor:\n r\"\"\"Return a sample obtained by sequential importance reweighting.\n\n See Rubin 1988, \"Using the sir algorithm to simulate posterior distributions.\"\n\n This function can also do `SIR` on the conditional posterior\n $p(\\theta_i|\\theta_j, x)$ when a `condition` and `dims_to_sample` are passed.\n\n Args:\n proposal: Proposal distribution, candidate samples are drawn from it.\n potential_fn: Potential function that the candidate samples are weighted with.\n Note that the function needs to return log probabilities.\n sir_num_batches: Number of candidate batches drawn.\n sir_batch_size: Batch size used for evaluating candidates.\n\n Returns:\n A single sample.\n \"\"\"\n\n with torch.set_grad_enabled(False):\n log_weights = []\n init_param_candidates = []\n for i in range(sir_num_batches):\n batch_draws = proposal.sample((sir_batch_size,)).detach()\n init_param_candidates.append(batch_draws)\n log_weights.append(potential_fn(batch_draws).detach())\n log_weights = torch.cat(log_weights)\n init_param_candidates = torch.cat(init_param_candidates)\n\n # Norm weights in log space\n log_weights -= torch.logsumexp(log_weights, dim=0)\n probs = torch.exp(log_weights.view(-1))\n probs[torch.isnan(probs)] = 0.0\n probs[torch.isinf(probs)] = 0.0\n probs /= probs.sum()\n\n idxs = torch.multinomial(probs, 1, replacement=False)\n # Return transformed sample.\n return transform(init_param_candidates[idxs, :])\n", "path": "sbi/samplers/mcmc/init_strategy.py"}]}
| 1,523 | 248 |
gh_patches_debug_16978
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-3183
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mail: New Session Proposals not Sent out to Organizers and Co-organizers
After the submission of several people the new Session Proposals have not been sent out to Organizers and Co-organizers. One reason could be, that these people did not verify their email address.
Please check configuration and ensure all emails of all submissions are sent to organizers/co-organizers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/helpers/notification_email_triggers.py`
Content:
```
1 from flask import url_for
2
3 from app.helpers.data_getter import DataGetter
4 from app.helpers.helpers import send_new_session_organizer, send_notif_new_session_organizer, \
5 send_notif_session_accept_reject, send_session_accept_reject, send_schedule_change, send_notif_session_schedule, \
6 send_email_for_after_purchase_organizers, send_notif_for_after_purchase_organizer
7 from app.models.mail import NEW_SESSION, SESSION_ACCEPT_REJECT, SESSION_SCHEDULE, TICKET_PURCHASED
8
9
10 def trigger_new_session_notifications(session_id, event_id=None, event=None):
11 if not event and not event_id:
12 raise Exception('event or event_id is required')
13 if not event:
14 event = DataGetter.get_event(event_id)
15
16 link = url_for('event_sessions.session_display_view',
17 event_id=event.id, session_id=session_id, _external=True)
18
19 admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)
20 organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')
21 for organizer in organizers:
22 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)
23 if not admin_msg_setting or \
24 (email_notification_setting and email_notification_setting.new_paper == 1 and
25 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
26
27 send_new_session_organizer(organizer.user.email, event.name, link)
28 # Send notification
29 send_notif_new_session_organizer(organizer.user, event.name, link)
30
31
32 def trigger_session_state_change_notifications(session, event_id, state=None, message=None, subject=None):
33 if not state:
34 state = session.state
35 link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)
36 admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_ACCEPT_REJECT)
37 for speaker in session.speakers:
38 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)
39 if not admin_msg_setting or \
40 (email_notification_setting and email_notification_setting.session_accept_reject == 1 and
41 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
42
43 if speaker.email:
44 send_session_accept_reject(speaker.email, session.title, state, link, subject=subject, message=message)
45 # Send notification
46 if speaker.user:
47 send_notif_session_accept_reject(speaker.user, session.title, state, link)
48 session.state_email_sent = True
49 from app.helpers.data import save_to_db
50 save_to_db(session)
51
52
53 def trigger_session_schedule_change_notifications(session, event_id):
54 link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)
55 admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_SCHEDULE)
56 for speaker in session.speakers:
57 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)
58 if not admin_msg_setting or \
59 (email_notification_setting and email_notification_setting.session_schedule == 1 and
60 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
61 if speaker.email:
62 send_schedule_change(speaker.email, session.title, link)
63 # Send notification
64 if speaker.user:
65 send_notif_session_schedule(speaker.user, session.title, link)
66
67
68 def trigger_after_purchase_notifications(buyer_email, event_id, event, invoice_id, order_url):
69 if not event and not event_id:
70 raise Exception('event or event_id is required')
71 if not event:
72 event = DataGetter.get_event(event_id)
73
74 admin_msg_setting = DataGetter.get_message_setting_by_action(TICKET_PURCHASED)
75 organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')
76 for organizer in organizers:
77 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)
78 if not admin_msg_setting or \
79 (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and
80 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
81 send_email_for_after_purchase_organizers(organizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)
82 send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)
83
84 coorganizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'coorganizer')
85 for coorganizer in coorganizers:
86 email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(coorganizer.user.id, event_id)
87 if not admin_msg_setting or \
88 (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and
89 admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
90 send_email_for_after_purchase_organizers(coorganizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)
91 send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/helpers/notification_email_triggers.py b/app/helpers/notification_email_triggers.py
--- a/app/helpers/notification_email_triggers.py
+++ b/app/helpers/notification_email_triggers.py
@@ -19,7 +19,7 @@
admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)
organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')
for organizer in organizers:
- email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)
+ email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event.id)
if not admin_msg_setting or \
(email_notification_setting and email_notification_setting.new_paper == 1 and
admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:
|
{"golden_diff": "diff --git a/app/helpers/notification_email_triggers.py b/app/helpers/notification_email_triggers.py\n--- a/app/helpers/notification_email_triggers.py\n+++ b/app/helpers/notification_email_triggers.py\n@@ -19,7 +19,7 @@\n admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n- email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n+ email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event.id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.new_paper == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n", "issue": "Mail: New Session Proposals not Sent out to Organizers and Co-organizers\nAfter the submission of several people the new Session Proposals have not been sent out to Organizers and Co-organizers. One reason could be, that these people did not verify their email address.\r\n\r\nPlease check configuration and ensure all emails of all submissions are sent to organizers/co-organizers.\n", "before_files": [{"content": "from flask import url_for\n\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_new_session_organizer, send_notif_new_session_organizer, \\\n send_notif_session_accept_reject, send_session_accept_reject, send_schedule_change, send_notif_session_schedule, \\\n send_email_for_after_purchase_organizers, send_notif_for_after_purchase_organizer\nfrom app.models.mail import NEW_SESSION, SESSION_ACCEPT_REJECT, SESSION_SCHEDULE, TICKET_PURCHASED\n\n\ndef trigger_new_session_notifications(session_id, event_id=None, event=None):\n if not event and not event_id:\n raise Exception('event or event_id is required')\n if not event:\n event = DataGetter.get_event(event_id)\n\n link = url_for('event_sessions.session_display_view',\n event_id=event.id, session_id=session_id, _external=True)\n\n admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.new_paper == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n\n send_new_session_organizer(organizer.user.email, event.name, link)\n # Send notification\n send_notif_new_session_organizer(organizer.user, event.name, link)\n\n\ndef trigger_session_state_change_notifications(session, event_id, state=None, message=None, subject=None):\n if not state:\n state = session.state\n link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)\n admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_ACCEPT_REJECT)\n for speaker in session.speakers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.session_accept_reject == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n\n if speaker.email:\n send_session_accept_reject(speaker.email, session.title, state, link, subject=subject, message=message)\n # Send notification\n if speaker.user:\n send_notif_session_accept_reject(speaker.user, session.title, state, link)\n session.state_email_sent = True\n from app.helpers.data import save_to_db\n save_to_db(session)\n\n\ndef trigger_session_schedule_change_notifications(session, event_id):\n link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)\n admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_SCHEDULE)\n for speaker in session.speakers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.session_schedule == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n if speaker.email:\n send_schedule_change(speaker.email, session.title, link)\n # Send notification\n if speaker.user:\n send_notif_session_schedule(speaker.user, session.title, link)\n\n\ndef trigger_after_purchase_notifications(buyer_email, event_id, event, invoice_id, order_url):\n if not event and not event_id:\n raise Exception('event or event_id is required')\n if not event:\n event = DataGetter.get_event(event_id)\n\n admin_msg_setting = DataGetter.get_message_setting_by_action(TICKET_PURCHASED)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n send_email_for_after_purchase_organizers(organizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)\n send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)\n\n coorganizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'coorganizer')\n for coorganizer in coorganizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(coorganizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n send_email_for_after_purchase_organizers(coorganizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)\n send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)\n", "path": "app/helpers/notification_email_triggers.py"}], "after_files": [{"content": "from flask import url_for\n\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_new_session_organizer, send_notif_new_session_organizer, \\\n send_notif_session_accept_reject, send_session_accept_reject, send_schedule_change, send_notif_session_schedule, \\\n send_email_for_after_purchase_organizers, send_notif_for_after_purchase_organizer\nfrom app.models.mail import NEW_SESSION, SESSION_ACCEPT_REJECT, SESSION_SCHEDULE, TICKET_PURCHASED\n\n\ndef trigger_new_session_notifications(session_id, event_id=None, event=None):\n if not event and not event_id:\n raise Exception('event or event_id is required')\n if not event:\n event = DataGetter.get_event(event_id)\n\n link = url_for('event_sessions.session_display_view',\n event_id=event.id, session_id=session_id, _external=True)\n\n admin_msg_setting = DataGetter.get_message_setting_by_action(NEW_SESSION)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event.id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.new_paper == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n\n send_new_session_organizer(organizer.user.email, event.name, link)\n # Send notification\n send_notif_new_session_organizer(organizer.user, event.name, link)\n\n\ndef trigger_session_state_change_notifications(session, event_id, state=None, message=None, subject=None):\n if not state:\n state = session.state\n link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)\n admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_ACCEPT_REJECT)\n for speaker in session.speakers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.session_accept_reject == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n\n if speaker.email:\n send_session_accept_reject(speaker.email, session.title, state, link, subject=subject, message=message)\n # Send notification\n if speaker.user:\n send_notif_session_accept_reject(speaker.user, session.title, state, link)\n session.state_email_sent = True\n from app.helpers.data import save_to_db\n save_to_db(session)\n\n\ndef trigger_session_schedule_change_notifications(session, event_id):\n link = url_for('event_sessions.session_display_view', event_id=event_id, session_id=session.id, _external=True)\n admin_msg_setting = DataGetter.get_message_setting_by_action(SESSION_SCHEDULE)\n for speaker in session.speakers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(speaker.user_id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.session_schedule == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n if speaker.email:\n send_schedule_change(speaker.email, session.title, link)\n # Send notification\n if speaker.user:\n send_notif_session_schedule(speaker.user, session.title, link)\n\n\ndef trigger_after_purchase_notifications(buyer_email, event_id, event, invoice_id, order_url):\n if not event and not event_id:\n raise Exception('event or event_id is required')\n if not event:\n event = DataGetter.get_event(event_id)\n\n admin_msg_setting = DataGetter.get_message_setting_by_action(TICKET_PURCHASED)\n organizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'organizer')\n for organizer in organizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(organizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n send_email_for_after_purchase_organizers(organizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)\n send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)\n\n coorganizers = DataGetter.get_user_event_roles_by_role_name(event.id, 'coorganizer')\n for coorganizer in coorganizers:\n email_notification_setting = DataGetter.get_email_notification_settings_by_event_id(coorganizer.user.id, event_id)\n if not admin_msg_setting or \\\n (email_notification_setting and email_notification_setting.after_ticket_purchase == 1 and\n admin_msg_setting.user_control_status == 1) or admin_msg_setting.user_control_status == 0:\n send_email_for_after_purchase_organizers(coorganizer.user.email, buyer_email, invoice_id, order_url, event.name, event.organizer_name)\n send_notif_for_after_purchase_organizer(organizer.user, invoice_id, order_url, event.name, buyer_email)\n", "path": "app/helpers/notification_email_triggers.py"}]}
| 1,662 | 187 |
gh_patches_debug_28782
|
rasdani/github-patches
|
git_diff
|
Pylons__pyramid-1024
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pyramid app frozen with py2exe fails because of Chameleon imports
If I freeze a Pyramid application with py2exe and try to run it, it fails during the startup, with the following traceback:
```
Traceback (most recent call last):
File "web_server.py", line 2, in <module>
File "pyramid\config\__init__.pyc", line 66, in <module>
File "pyramid\config\rendering.pyc", line 11, in <module>
File "pyramid\chameleon_text.pyc", line 3, in <module>
File "chameleon\__init__.pyc", line 1, in <module>
File "chameleon\zpt\template.pyc", line 10, in <module>
File "chameleon\tales.pyc", line 19, in <module>
File "chameleon\compiler.pyc", line 145, in <module>
File "chameleon\codegen.pyc", line 93, in template
File "inspect.pyc", line 701, in getsource
File "inspect.pyc", line 690, in getsourcelines
File "inspect.pyc", line 538, in findsource
IOError: could not get source code
```
My application doesn't use Chameleon, but the Pyramid modules still import it, which ultimately causes the .exe to fail to run.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/chameleon_zpt.py`
Content:
```
1 from zope.interface import implementer
2
3 from chameleon.zpt.template import PageTemplateFile
4
5 from pyramid.interfaces import ITemplateRenderer
6 from pyramid.decorator import reify
7 from pyramid import renderers
8
9 def renderer_factory(info):
10 return renderers.template_renderer_factory(info, ZPTTemplateRenderer)
11
12 @implementer(ITemplateRenderer)
13 class ZPTTemplateRenderer(object):
14 def __init__(self, path, lookup, macro=None):
15 self.path = path
16 self.lookup = lookup
17 self.macro = macro
18
19 @reify # avoid looking up reload_templates before manager pushed
20 def template(self):
21 tf = PageTemplateFile(
22 self.path,
23 auto_reload=self.lookup.auto_reload,
24 debug=self.lookup.debug,
25 translate=self.lookup.translate
26 )
27 if self.macro:
28 # render only the portion of the template included in a
29 # define-macro named the value of self.macro
30 macro_renderer = tf.macros[self.macro].include
31 tf._render = macro_renderer
32 return tf
33
34 def implementation(self):
35 return self.template
36
37 def __call__(self, value, system):
38 try:
39 system.update(value)
40 except (TypeError, ValueError):
41 raise ValueError('renderer was passed non-dictionary as value')
42 result = self.template(**system)
43 return result
44
45
```
Path: `pyramid/chameleon_text.py`
Content:
```
1 from zope.interface import implementer
2
3 from chameleon.zpt.template import PageTextTemplateFile
4
5 from pyramid.interfaces import ITemplateRenderer
6
7 from pyramid.decorator import reify
8 from pyramid import renderers
9
10 def renderer_factory(info):
11 return renderers.template_renderer_factory(info, TextTemplateRenderer)
12
13 @implementer(ITemplateRenderer)
14 class TextTemplateRenderer(object):
15 def __init__(self, path, lookup, macro=None):
16 self.path = path
17 self.lookup = lookup
18 # text template renderers have no macros, so we ignore the
19 # macro arg
20
21 @reify # avoid looking up reload_templates before manager pushed
22 def template(self):
23 return PageTextTemplateFile(self.path,
24 auto_reload=self.lookup.auto_reload,
25 debug=self.lookup.debug,
26 translate=self.lookup.translate)
27
28 def implementation(self):
29 return self.template
30
31 def __call__(self, value, system):
32 try:
33 system.update(value)
34 except (TypeError, ValueError):
35 raise ValueError('renderer was passed non-dictionary as value')
36 result = self.template(**system)
37 return result
38
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyramid/chameleon_text.py b/pyramid/chameleon_text.py
--- a/pyramid/chameleon_text.py
+++ b/pyramid/chameleon_text.py
@@ -1,7 +1,5 @@
from zope.interface import implementer
-from chameleon.zpt.template import PageTextTemplateFile
-
from pyramid.interfaces import ITemplateRenderer
from pyramid.decorator import reify
@@ -20,6 +18,7 @@
@reify # avoid looking up reload_templates before manager pushed
def template(self):
+ from chameleon.zpt.template import PageTextTemplateFile
return PageTextTemplateFile(self.path,
auto_reload=self.lookup.auto_reload,
debug=self.lookup.debug,
diff --git a/pyramid/chameleon_zpt.py b/pyramid/chameleon_zpt.py
--- a/pyramid/chameleon_zpt.py
+++ b/pyramid/chameleon_zpt.py
@@ -1,7 +1,5 @@
from zope.interface import implementer
-from chameleon.zpt.template import PageTemplateFile
-
from pyramid.interfaces import ITemplateRenderer
from pyramid.decorator import reify
from pyramid import renderers
@@ -18,6 +16,7 @@
@reify # avoid looking up reload_templates before manager pushed
def template(self):
+ from chameleon.zpt.template import PageTemplateFile
tf = PageTemplateFile(
self.path,
auto_reload=self.lookup.auto_reload,
|
{"golden_diff": "diff --git a/pyramid/chameleon_text.py b/pyramid/chameleon_text.py\n--- a/pyramid/chameleon_text.py\n+++ b/pyramid/chameleon_text.py\n@@ -1,7 +1,5 @@\n from zope.interface import implementer\n \n-from chameleon.zpt.template import PageTextTemplateFile\n-\n from pyramid.interfaces import ITemplateRenderer\n \n from pyramid.decorator import reify\n@@ -20,6 +18,7 @@\n \n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n+ from chameleon.zpt.template import PageTextTemplateFile\n return PageTextTemplateFile(self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\ndiff --git a/pyramid/chameleon_zpt.py b/pyramid/chameleon_zpt.py\n--- a/pyramid/chameleon_zpt.py\n+++ b/pyramid/chameleon_zpt.py\n@@ -1,7 +1,5 @@\n from zope.interface import implementer\n \n-from chameleon.zpt.template import PageTemplateFile\n-\n from pyramid.interfaces import ITemplateRenderer\n from pyramid.decorator import reify\n from pyramid import renderers\n@@ -18,6 +16,7 @@\n \n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n+ from chameleon.zpt.template import PageTemplateFile\n tf = PageTemplateFile(\n self.path,\n auto_reload=self.lookup.auto_reload,\n", "issue": "Pyramid app frozen with py2exe fails because of Chameleon imports\nIf I freeze a Pyramid application with py2exe and try to run it, it fails during the startup, with the following traceback:\n\n```\nTraceback (most recent call last):\n File \"web_server.py\", line 2, in <module>\n File \"pyramid\\config\\__init__.pyc\", line 66, in <module>\n File \"pyramid\\config\\rendering.pyc\", line 11, in <module>\n File \"pyramid\\chameleon_text.pyc\", line 3, in <module>\n File \"chameleon\\__init__.pyc\", line 1, in <module>\n File \"chameleon\\zpt\\template.pyc\", line 10, in <module>\n File \"chameleon\\tales.pyc\", line 19, in <module>\n File \"chameleon\\compiler.pyc\", line 145, in <module>\n File \"chameleon\\codegen.pyc\", line 93, in template\n File \"inspect.pyc\", line 701, in getsource\n File \"inspect.pyc\", line 690, in getsourcelines\n File \"inspect.pyc\", line 538, in findsource\nIOError: could not get source code\n```\n\nMy application doesn't use Chameleon, but the Pyramid modules still import it, which ultimately causes the .exe to fail to run.\n\n", "before_files": [{"content": "from zope.interface import implementer\n\nfrom chameleon.zpt.template import PageTemplateFile\n\nfrom pyramid.interfaces import ITemplateRenderer\nfrom pyramid.decorator import reify\nfrom pyramid import renderers\n\ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, ZPTTemplateRenderer)\n\n@implementer(ITemplateRenderer)\nclass ZPTTemplateRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n self.macro = macro\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n tf = PageTemplateFile(\n self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate\n )\n if self.macro:\n # render only the portion of the template included in a\n # define-macro named the value of self.macro\n macro_renderer = tf.macros[self.macro].include\n tf._render = macro_renderer\n return tf\n\n def implementation(self):\n return self.template\n \n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(**system)\n return result\n\n", "path": "pyramid/chameleon_zpt.py"}, {"content": "from zope.interface import implementer\n\nfrom chameleon.zpt.template import PageTextTemplateFile\n\nfrom pyramid.interfaces import ITemplateRenderer\n\nfrom pyramid.decorator import reify\nfrom pyramid import renderers\n\ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, TextTemplateRenderer)\n\n@implementer(ITemplateRenderer)\nclass TextTemplateRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n # text template renderers have no macros, so we ignore the\n # macro arg\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n return PageTextTemplateFile(self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate)\n\n def implementation(self):\n return self.template\n \n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(**system)\n return result\n\n", "path": "pyramid/chameleon_text.py"}], "after_files": [{"content": "from zope.interface import implementer\n\nfrom pyramid.interfaces import ITemplateRenderer\nfrom pyramid.decorator import reify\nfrom pyramid import renderers\n\ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, ZPTTemplateRenderer)\n\n@implementer(ITemplateRenderer)\nclass ZPTTemplateRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n self.macro = macro\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n from chameleon.zpt.template import PageTemplateFile\n tf = PageTemplateFile(\n self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate\n )\n if self.macro:\n # render only the portion of the template included in a\n # define-macro named the value of self.macro\n macro_renderer = tf.macros[self.macro].include\n tf._render = macro_renderer\n return tf\n\n def implementation(self):\n return self.template\n \n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(**system)\n return result\n\n", "path": "pyramid/chameleon_zpt.py"}, {"content": "from zope.interface import implementer\n\nfrom pyramid.interfaces import ITemplateRenderer\n\nfrom pyramid.decorator import reify\nfrom pyramid import renderers\n\ndef renderer_factory(info):\n return renderers.template_renderer_factory(info, TextTemplateRenderer)\n\n@implementer(ITemplateRenderer)\nclass TextTemplateRenderer(object):\n def __init__(self, path, lookup, macro=None):\n self.path = path\n self.lookup = lookup\n # text template renderers have no macros, so we ignore the\n # macro arg\n\n @reify # avoid looking up reload_templates before manager pushed\n def template(self):\n from chameleon.zpt.template import PageTextTemplateFile\n return PageTextTemplateFile(self.path,\n auto_reload=self.lookup.auto_reload,\n debug=self.lookup.debug,\n translate=self.lookup.translate)\n\n def implementation(self):\n return self.template\n \n def __call__(self, value, system):\n try:\n system.update(value)\n except (TypeError, ValueError):\n raise ValueError('renderer was passed non-dictionary as value')\n result = self.template(**system)\n return result\n\n", "path": "pyramid/chameleon_text.py"}]}
| 1,273 | 308 |
gh_patches_debug_2156
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-569
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Write tests for bokeh templating software
With the merge of #459, bokeh templating will be implemented for `jwql`. We should address the test coverage for this software.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/bokeh_templating/example/main.py`
Content:
```
1 """
2 This is a minimal example demonstrating how to create a Bokeh app using
3 the ``bokeh-templating`` package and the associated YAML template files.
4
5 Author
6 -------
7
8 - Graham Kanarek
9
10 Dependencies
11 ------------
12
13 The user must have PyYAML, Bokeh, and the ``bokeh-templating``
14 packages installed.
15 """
16
17 import os
18 import numpy as np
19
20 from jwql.bokeh_templating import BokehTemplate
21
22 file_dir = os.path.dirname(os.path.realpath(__file__))
23
24
25 class TestBokehApp(BokehTemplate):
26 """This is a minimal ``BokehTemplate`` app."""
27
28 def pre_init(self):
29 """Before creating the Bokeh interface (by parsing the interface
30 file), we must initialize our ``a`` and ``b`` variables, and set
31 the path to the interface file.
32 """
33
34 self.a, self.b = 4, 2
35
36 self.format_string = None
37 self.interface_file = os.path.join(file_dir, "example_interface.yaml")
38
39 # No post-initialization tasks are required.
40 post_init = None
41
42 @property
43 def x(self):
44 """The x-value of the Lissajous curves."""
45 return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))
46
47 @property
48 def y(self):
49 """The y-value of the Lissajous curves."""
50 return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))
51
52 def controller(self, attr, old, new):
53 """This is the controller function which is used to update the
54 curves when the sliders are adjusted. Note the use of the
55 ``self.refs`` dictionary for accessing the Bokeh object
56 attributes."""
57 self.a = self.refs["a_slider"].value
58 self.b = self.refs["b_slider"].value
59
60 self.refs["figure_source"].data = {'x': self.x, 'y': self.y}
61
62
63 TestBokehApp()
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jwql/bokeh_templating/example/main.py b/jwql/bokeh_templating/example/main.py
--- a/jwql/bokeh_templating/example/main.py
+++ b/jwql/bokeh_templating/example/main.py
@@ -24,6 +24,8 @@
class TestBokehApp(BokehTemplate):
"""This is a minimal ``BokehTemplate`` app."""
+
+ _embed = True
def pre_init(self):
"""Before creating the Bokeh interface (by parsing the interface
|
{"golden_diff": "diff --git a/jwql/bokeh_templating/example/main.py b/jwql/bokeh_templating/example/main.py\n--- a/jwql/bokeh_templating/example/main.py\n+++ b/jwql/bokeh_templating/example/main.py\n@@ -24,6 +24,8 @@\n \n class TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n+ \n+ _embed = True\n \n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n", "issue": "Write tests for bokeh templating software\nWith the merge of #459, bokeh templating will be implemented for `jwql`. We should address the test coverage for this software. \n", "before_files": [{"content": "\"\"\"\nThis is a minimal example demonstrating how to create a Bokeh app using\nthe ``bokeh-templating`` package and the associated YAML template files.\n\nAuthor\n-------\n\n - Graham Kanarek\n\nDependencies\n------------\n\n The user must have PyYAML, Bokeh, and the ``bokeh-templating``\n packages installed.\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom jwql.bokeh_templating import BokehTemplate\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n\n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n file), we must initialize our ``a`` and ``b`` variables, and set\n the path to the interface file.\n \"\"\"\n\n self.a, self.b = 4, 2\n\n self.format_string = None\n self.interface_file = os.path.join(file_dir, \"example_interface.yaml\")\n\n # No post-initialization tasks are required.\n post_init = None\n\n @property\n def x(self):\n \"\"\"The x-value of the Lissajous curves.\"\"\"\n return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))\n\n @property\n def y(self):\n \"\"\"The y-value of the Lissajous curves.\"\"\"\n return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))\n\n def controller(self, attr, old, new):\n \"\"\"This is the controller function which is used to update the\n curves when the sliders are adjusted. Note the use of the\n ``self.refs`` dictionary for accessing the Bokeh object\n attributes.\"\"\"\n self.a = self.refs[\"a_slider\"].value\n self.b = self.refs[\"b_slider\"].value\n\n self.refs[\"figure_source\"].data = {'x': self.x, 'y': self.y}\n\n\nTestBokehApp()\n", "path": "jwql/bokeh_templating/example/main.py"}], "after_files": [{"content": "\"\"\"\nThis is a minimal example demonstrating how to create a Bokeh app using\nthe ``bokeh-templating`` package and the associated YAML template files.\n\nAuthor\n-------\n\n - Graham Kanarek\n\nDependencies\n------------\n\n The user must have PyYAML, Bokeh, and the ``bokeh-templating``\n packages installed.\n\"\"\"\n\nimport os\nimport numpy as np\n\nfrom jwql.bokeh_templating import BokehTemplate\n\nfile_dir = os.path.dirname(os.path.realpath(__file__))\n\n\nclass TestBokehApp(BokehTemplate):\n \"\"\"This is a minimal ``BokehTemplate`` app.\"\"\"\n \n _embed = True\n\n def pre_init(self):\n \"\"\"Before creating the Bokeh interface (by parsing the interface\n file), we must initialize our ``a`` and ``b`` variables, and set\n the path to the interface file.\n \"\"\"\n\n self.a, self.b = 4, 2\n\n self.format_string = None\n self.interface_file = os.path.join(file_dir, \"example_interface.yaml\")\n\n # No post-initialization tasks are required.\n post_init = None\n\n @property\n def x(self):\n \"\"\"The x-value of the Lissajous curves.\"\"\"\n return 4. * np.sin(self.a * np.linspace(0, 2 * np.pi, 500))\n\n @property\n def y(self):\n \"\"\"The y-value of the Lissajous curves.\"\"\"\n return 3. * np.sin(self.b * np.linspace(0, 2 * np.pi, 500))\n\n def controller(self, attr, old, new):\n \"\"\"This is the controller function which is used to update the\n curves when the sliders are adjusted. Note the use of the\n ``self.refs`` dictionary for accessing the Bokeh object\n attributes.\"\"\"\n self.a = self.refs[\"a_slider\"].value\n self.b = self.refs[\"b_slider\"].value\n\n self.refs[\"figure_source\"].data = {'x': self.x, 'y': self.y}\n\n\nTestBokehApp()\n", "path": "jwql/bokeh_templating/example/main.py"}]}
| 880 | 125 |
gh_patches_debug_347
|
rasdani/github-patches
|
git_diff
|
kivy__python-for-android-2436
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"diff" files are ignored during "pip install ."
in `setup.py` the "diff" is not listed:
https://github.com/kivy/python-for-android/blob/develop/setup.py
```python
package_data = {'': ['*.tmpl',
'*.patch', ], }
```
and therefore this `diff` patch:
https://github.com/kivy/python-for-android/blob/develop/pythonforandroid/recipes/python3/patches/reproducible-buildinfo.diff
is not installed during `pip` invocation:
```sh
cd /tmp
git clone --depth 1 https://github.com/kivy/python-for-android.git
cd python-for-android
pip install .
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1
2 import glob
3 from io import open # for open(..,encoding=...) parameter in python 2
4 from os import walk
5 from os.path import join, dirname, sep
6 import re
7 from setuptools import setup, find_packages
8
9 # NOTE: All package data should also be set in MANIFEST.in
10
11 packages = find_packages()
12
13 package_data = {'': ['*.tmpl',
14 '*.patch', ], }
15
16 data_files = []
17
18
19 # must be a single statement since buildozer is currently parsing it, refs:
20 # https://github.com/kivy/buildozer/issues/722
21 install_reqs = [
22 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',
23 'enum34; python_version<"3.4"', 'sh>=1.10; sys_platform!="nt"',
24 'pep517<0.7.0', 'toml',
25 ]
26 # (pep517 and toml are used by pythonpackage.py)
27
28
29 # By specifying every file manually, package_data will be able to
30 # include them in binary distributions. Note that we have to add
31 # everything as a 'pythonforandroid' rule, using '' apparently doesn't
32 # work.
33 def recursively_include(results, directory, patterns):
34 for root, subfolders, files in walk(directory):
35 for fn in files:
36 if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):
37 continue
38 filename = join(root, fn)
39 directory = 'pythonforandroid'
40 if directory not in results:
41 results[directory] = []
42 results[directory].append(join(*filename.split(sep)[1:]))
43
44
45 recursively_include(package_data, 'pythonforandroid/recipes',
46 ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',
47 '*.mk', '*.jam', ])
48 recursively_include(package_data, 'pythonforandroid/bootstraps',
49 ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',
50 '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',
51 '*.gradle', '.gitkeep', 'gradlew*', '*.jar', "*.patch", ])
52 recursively_include(package_data, 'pythonforandroid/bootstraps',
53 ['sdl-config', ])
54 recursively_include(package_data, 'pythonforandroid/bootstraps/webview',
55 ['*.html', ])
56 recursively_include(package_data, 'pythonforandroid',
57 ['liblink', 'biglink', 'liblink.sh'])
58
59 with open(join(dirname(__file__), 'README.md'),
60 encoding="utf-8",
61 errors="replace",
62 ) as fileh:
63 long_description = fileh.read()
64
65 init_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')
66 version = None
67 try:
68 with open(init_filen,
69 encoding="utf-8",
70 errors="replace"
71 ) as fileh:
72 lines = fileh.readlines()
73 except IOError:
74 pass
75 else:
76 for line in lines:
77 line = line.strip()
78 if line.startswith('__version__ = '):
79 matches = re.findall(r'["\'].+["\']', line)
80 if matches:
81 version = matches[0].strip("'").strip('"')
82 break
83 if version is None:
84 raise Exception('Error: version could not be loaded from {}'.format(init_filen))
85
86 setup(name='python-for-android',
87 version=version,
88 description='Android APK packager for Python scripts and apps',
89 long_description=long_description,
90 long_description_content_type='text/markdown',
91 python_requires=">=3.6.0",
92 author='The Kivy team',
93 author_email='[email protected]',
94 url='https://github.com/kivy/python-for-android',
95 license='MIT',
96 install_requires=install_reqs,
97 entry_points={
98 'console_scripts': [
99 'python-for-android = pythonforandroid.entrypoints:main',
100 'p4a = pythonforandroid.entrypoints:main',
101 ],
102 'distutils.commands': [
103 'apk = pythonforandroid.bdistapk:BdistAPK',
104 'aar = pythonforandroid.bdistapk:BdistAAR',
105 ],
106 },
107 classifiers=[
108 'Development Status :: 5 - Production/Stable',
109 'Intended Audience :: Developers',
110 'License :: OSI Approved :: MIT License',
111 'Operating System :: Microsoft :: Windows',
112 'Operating System :: OS Independent',
113 'Operating System :: POSIX :: Linux',
114 'Operating System :: MacOS :: MacOS X',
115 'Operating System :: Android',
116 'Programming Language :: C',
117 'Programming Language :: Python :: 3',
118 'Programming Language :: Python :: 3.6',
119 'Programming Language :: Python :: 3.7',
120 'Programming Language :: Python :: 3.8',
121 'Topic :: Software Development',
122 'Topic :: Utilities',
123 ],
124 packages=packages,
125 package_data=package_data,
126 )
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,8 @@
packages = find_packages()
package_data = {'': ['*.tmpl',
- '*.patch', ], }
+ '*.patch',
+ '*.diff', ], }
data_files = []
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,8 @@\n packages = find_packages()\n \n package_data = {'': ['*.tmpl',\n- '*.patch', ], }\n+ '*.patch',\n+ '*.diff', ], }\n \n data_files = []\n", "issue": "\"diff\" files are ignored during \"pip install .\"\nin `setup.py` the \"diff\" is not listed: \r\nhttps://github.com/kivy/python-for-android/blob/develop/setup.py\r\n```python\r\npackage_data = {'': ['*.tmpl',\r\n '*.patch', ], }\r\n```\r\n\r\nand therefore this `diff` patch:\r\nhttps://github.com/kivy/python-for-android/blob/develop/pythonforandroid/recipes/python3/patches/reproducible-buildinfo.diff\r\n\r\nis not installed during `pip` invocation:\r\n```sh\r\ncd /tmp\r\ngit clone --depth 1 https://github.com/kivy/python-for-android.git\r\ncd python-for-android\r\npip install .\r\n```\r\n\n", "before_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch', ], }\n\ndata_files = []\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0', 'toml',\n]\n# (pep517 and toml are used by pythonpackage.py)\n\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires=\">=3.6.0\",\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n 'aar = pythonforandroid.bdistapk:BdistAAR',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}], "after_files": [{"content": "\nimport glob\nfrom io import open # for open(..,encoding=...) parameter in python 2\nfrom os import walk\nfrom os.path import join, dirname, sep\nimport re\nfrom setuptools import setup, find_packages\n\n# NOTE: All package data should also be set in MANIFEST.in\n\npackages = find_packages()\n\npackage_data = {'': ['*.tmpl',\n '*.patch',\n '*.diff', ], }\n\ndata_files = []\n\n\n# must be a single statement since buildozer is currently parsing it, refs:\n# https://github.com/kivy/buildozer/issues/722\ninstall_reqs = [\n 'appdirs', 'colorama>=0.3.3', 'jinja2', 'six',\n 'enum34; python_version<\"3.4\"', 'sh>=1.10; sys_platform!=\"nt\"',\n 'pep517<0.7.0', 'toml',\n]\n# (pep517 and toml are used by pythonpackage.py)\n\n\n# By specifying every file manually, package_data will be able to\n# include them in binary distributions. Note that we have to add\n# everything as a 'pythonforandroid' rule, using '' apparently doesn't\n# work.\ndef recursively_include(results, directory, patterns):\n for root, subfolders, files in walk(directory):\n for fn in files:\n if not any(glob.fnmatch.fnmatch(fn, pattern) for pattern in patterns):\n continue\n filename = join(root, fn)\n directory = 'pythonforandroid'\n if directory not in results:\n results[directory] = []\n results[directory].append(join(*filename.split(sep)[1:]))\n\n\nrecursively_include(package_data, 'pythonforandroid/recipes',\n ['*.patch', 'Setup*', '*.pyx', '*.py', '*.c', '*.h',\n '*.mk', '*.jam', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['*.properties', '*.xml', '*.java', '*.tmpl', '*.txt', '*.png',\n '*.mk', '*.c', '*.h', '*.py', '*.sh', '*.jpg', '*.aidl',\n '*.gradle', '.gitkeep', 'gradlew*', '*.jar', \"*.patch\", ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps',\n ['sdl-config', ])\nrecursively_include(package_data, 'pythonforandroid/bootstraps/webview',\n ['*.html', ])\nrecursively_include(package_data, 'pythonforandroid',\n ['liblink', 'biglink', 'liblink.sh'])\n\nwith open(join(dirname(__file__), 'README.md'),\n encoding=\"utf-8\",\n errors=\"replace\",\n ) as fileh:\n long_description = fileh.read()\n\ninit_filen = join(dirname(__file__), 'pythonforandroid', '__init__.py')\nversion = None\ntry:\n with open(init_filen,\n encoding=\"utf-8\",\n errors=\"replace\"\n ) as fileh:\n lines = fileh.readlines()\nexcept IOError:\n pass\nelse:\n for line in lines:\n line = line.strip()\n if line.startswith('__version__ = '):\n matches = re.findall(r'[\"\\'].+[\"\\']', line)\n if matches:\n version = matches[0].strip(\"'\").strip('\"')\n break\nif version is None:\n raise Exception('Error: version could not be loaded from {}'.format(init_filen))\n\nsetup(name='python-for-android',\n version=version,\n description='Android APK packager for Python scripts and apps',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires=\">=3.6.0\",\n author='The Kivy team',\n author_email='[email protected]',\n url='https://github.com/kivy/python-for-android',\n license='MIT',\n install_requires=install_reqs,\n entry_points={\n 'console_scripts': [\n 'python-for-android = pythonforandroid.entrypoints:main',\n 'p4a = pythonforandroid.entrypoints:main',\n ],\n 'distutils.commands': [\n 'apk = pythonforandroid.bdistapk:BdistAPK',\n 'aar = pythonforandroid.bdistapk:BdistAAR',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: OS Independent',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Android',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n packages=packages,\n package_data=package_data,\n )\n", "path": "setup.py"}]}
| 1,741 | 71 |
gh_patches_debug_18046
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-3745
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting of PYTHONHASHSEED has no effect
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
(Previously submitted here: https://github.com/PyTorchLightning/pytorch-lightning/issues/1939, but I didn't use the correct template, so now I'm resubmitting)
In https://github.com/PyTorchLightning/pytorch-lightning/blob/9045b6c599df3871da6aaaa310f62d3f1364c632/pytorch_lightning/trainer/seed.py#L32
, `PYTHONHASHSEED` is assigned a value in order to ensure reproducability. However, this assignment has no effect. In fact, this assignment might mislead the user or any logging software into believing that `PYTHONHASHSEED` has a specific value, when in fact it has another.
To see that setting `PYTHONHASHSEED` inside the current program has no effect, run the following two commands:
```
PYTHONHASHSEED=1 python -c "import os; print(hash('a'))"
PYTHONHASHSEED=1 python -c "import os; os.environ['PYTHONHASHSEED']='2'; print(hash('a'))"
```
The commands should output the same value, meaning that setting `PYTHONHASHSEED` after the process has started has no effect.
The following commands will likely output different values, also indicating that setting `PYTHONHASHSEED` after the process has started has no effect:
```
unset PYTHONHASHSEED # make sure it is not already set
python -c "import os; os.environ['PYTHONHASHSEED']='2'; print(hash('a'))"
python -c "import os; os.environ['PYTHONHASHSEED']='2'; print(hash('a'))"
```
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior:
1. Start python terminal with `PYTHONHASHSEED=1 python`
2. Run
```python
import pytorch_lightning as pl
pl.seed_everything(100)
print(hash('a'))
# >>> 8432517439229126278
```
3. Start new python terminal with `PYTHONHASHSEED=2 python`
4. Run
```python
import pytorch_lightning as pl
pl.seed_everything(100)
print(hash('a'))
# >>> -8333094867672744108
```
### Expected behavior
Expect output of `hash` function to be the same in both cases. The examples demonstrate that this is not possible.
<!-- A clear and concise description of what you expected to happen. -->
### Environment
```
* CUDA:
- GPU:
- available: False
- version: 10.2
* Packages:
- numpy: 1.18.5
- pyTorch_debug: False
- pyTorch_version: 1.5.0
- pytorch-lightning: 0.7.6
- tensorboard: 2.2.2
- tqdm: 4.46.1
* System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor:
- python: 3.8.3
- version: #1 SMP PREEMPT Wed May 27 20:25:12 UTC 2020
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/utilities/seed.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helper functions to help with reproducibility of models. """
16
17 import os
18 import random
19 from typing import Optional
20
21 import numpy as np
22 import torch
23
24 from pytorch_lightning import _logger as log
25
26
27 def seed_everything(seed: Optional[int] = None) -> int:
28 """
29 Function that sets seed for pseudo-random number generators in:
30 pytorch, numpy, python.random and sets PYTHONHASHSEED environment variable.
31 In addition, sets the env variable `PL_GLOBAL_SEED` which will be passed to
32 spawned subprocesses (e.g. ddp_spawn backend).
33
34 Args:
35 seed: the integer value seed for global random state in Lightning.
36 If `None`, will read seed from `PL_GLOBAL_SEED` env variable
37 or select it randomly.
38 """
39 max_seed_value = np.iinfo(np.uint32).max
40 min_seed_value = np.iinfo(np.uint32).min
41
42 try:
43 if seed is None:
44 seed = os.environ.get("PL_GLOBAL_SEED", _select_seed_randomly(min_seed_value, max_seed_value))
45 seed = int(seed)
46 except (TypeError, ValueError):
47 seed = _select_seed_randomly(min_seed_value, max_seed_value)
48
49 if (seed > max_seed_value) or (seed < min_seed_value):
50 log.warning(
51 f"{seed} is not in bounds, \
52 numpy accepts from {min_seed_value} to {max_seed_value}"
53 )
54 seed = _select_seed_randomly(min_seed_value, max_seed_value)
55
56 os.environ["PYTHONHASHSEED"] = str(seed)
57 os.environ["PL_GLOBAL_SEED"] = str(seed)
58 random.seed(seed)
59 np.random.seed(seed)
60 torch.manual_seed(seed)
61 torch.cuda.manual_seed_all(seed)
62 return seed
63
64
65 def _select_seed_randomly(min_seed_value: int = 0, max_seed_value: int = 255) -> int:
66 seed = random.randint(min_seed_value, max_seed_value)
67 log.warning(f"No correct seed found, seed set to {seed}")
68 return seed
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/utilities/seed.py b/pytorch_lightning/utilities/seed.py
--- a/pytorch_lightning/utilities/seed.py
+++ b/pytorch_lightning/utilities/seed.py
@@ -27,7 +27,7 @@
def seed_everything(seed: Optional[int] = None) -> int:
"""
Function that sets seed for pseudo-random number generators in:
- pytorch, numpy, python.random and sets PYTHONHASHSEED environment variable.
+ pytorch, numpy, python.random
In addition, sets the env variable `PL_GLOBAL_SEED` which will be passed to
spawned subprocesses (e.g. ddp_spawn backend).
@@ -53,7 +53,6 @@
)
seed = _select_seed_randomly(min_seed_value, max_seed_value)
- os.environ["PYTHONHASHSEED"] = str(seed)
os.environ["PL_GLOBAL_SEED"] = str(seed)
random.seed(seed)
np.random.seed(seed)
|
{"golden_diff": "diff --git a/pytorch_lightning/utilities/seed.py b/pytorch_lightning/utilities/seed.py\n--- a/pytorch_lightning/utilities/seed.py\n+++ b/pytorch_lightning/utilities/seed.py\n@@ -27,7 +27,7 @@\n def seed_everything(seed: Optional[int] = None) -> int:\n \"\"\"\n Function that sets seed for pseudo-random number generators in:\n- pytorch, numpy, python.random and sets PYTHONHASHSEED environment variable.\n+ pytorch, numpy, python.random\n In addition, sets the env variable `PL_GLOBAL_SEED` which will be passed to\n spawned subprocesses (e.g. ddp_spawn backend).\n \n@@ -53,7 +53,6 @@\n )\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n \n- os.environ[\"PYTHONHASHSEED\"] = str(seed)\n os.environ[\"PL_GLOBAL_SEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n", "issue": "Setting of PYTHONHASHSEED has no effect\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\n(Previously submitted here: https://github.com/PyTorchLightning/pytorch-lightning/issues/1939, but I didn't use the correct template, so now I'm resubmitting)\r\n\r\nIn https://github.com/PyTorchLightning/pytorch-lightning/blob/9045b6c599df3871da6aaaa310f62d3f1364c632/pytorch_lightning/trainer/seed.py#L32\r\n, `PYTHONHASHSEED` is assigned a value in order to ensure reproducability. However, this assignment has no effect. In fact, this assignment might mislead the user or any logging software into believing that `PYTHONHASHSEED` has a specific value, when in fact it has another.\r\n\r\nTo see that setting `PYTHONHASHSEED` inside the current program has no effect, run the following two commands:\r\n```\r\nPYTHONHASHSEED=1 python -c \"import os; print(hash('a'))\"\r\nPYTHONHASHSEED=1 python -c \"import os; os.environ['PYTHONHASHSEED']='2'; print(hash('a'))\"\r\n```\r\nThe commands should output the same value, meaning that setting `PYTHONHASHSEED` after the process has started has no effect.\r\n\r\nThe following commands will likely output different values, also indicating that setting `PYTHONHASHSEED` after the process has started has no effect:\r\n```\r\nunset PYTHONHASHSEED # make sure it is not already set\r\npython -c \"import os; os.environ['PYTHONHASHSEED']='2'; print(hash('a'))\"\r\npython -c \"import os; os.environ['PYTHONHASHSEED']='2'; print(hash('a'))\"\r\n```\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Start python terminal with `PYTHONHASHSEED=1 python`\r\n2. Run \r\n```python\r\nimport pytorch_lightning as pl\r\npl.seed_everything(100)\r\nprint(hash('a'))\r\n# >>> 8432517439229126278\r\n```\r\n3. Start new python terminal with `PYTHONHASHSEED=2 python`\r\n4. Run\r\n```python\r\nimport pytorch_lightning as pl\r\npl.seed_everything(100)\r\nprint(hash('a'))\r\n# >>> -8333094867672744108\r\n```\r\n\r\n### Expected behavior\r\n\r\nExpect output of `hash` function to be the same in both cases. The examples demonstrate that this is not possible.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n```\r\n* CUDA:\r\n\t- GPU:\r\n\t- available: False\r\n\t- version: 10.2\r\n* Packages:\r\n\t- numpy: 1.18.5\r\n\t- pyTorch_debug: False\r\n\t- pyTorch_version: 1.5.0\r\n\t- pytorch-lightning: 0.7.6\r\n\t- tensorboard: 2.2.2\r\n\t- tqdm: 4.46.1\r\n* System:\r\n\t- OS: Linux\r\n\t- architecture:\r\n\t\t- 64bit\r\n\t\t- ELF\r\n\t- processor: \r\n\t- python: 3.8.3\r\n\t- version: #1 SMP PREEMPT Wed May 27 20:25:12 UTC 2020\r\n\r\n```\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions to help with reproducibility of models. \"\"\"\n\nimport os\nimport random\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning import _logger as log\n\n\ndef seed_everything(seed: Optional[int] = None) -> int:\n \"\"\"\n Function that sets seed for pseudo-random number generators in:\n pytorch, numpy, python.random and sets PYTHONHASHSEED environment variable.\n In addition, sets the env variable `PL_GLOBAL_SEED` which will be passed to\n spawned subprocesses (e.g. ddp_spawn backend).\n\n Args:\n seed: the integer value seed for global random state in Lightning.\n If `None`, will read seed from `PL_GLOBAL_SEED` env variable\n or select it randomly.\n \"\"\"\n max_seed_value = np.iinfo(np.uint32).max\n min_seed_value = np.iinfo(np.uint32).min\n\n try:\n if seed is None:\n seed = os.environ.get(\"PL_GLOBAL_SEED\", _select_seed_randomly(min_seed_value, max_seed_value))\n seed = int(seed)\n except (TypeError, ValueError):\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n\n if (seed > max_seed_value) or (seed < min_seed_value):\n log.warning(\n f\"{seed} is not in bounds, \\\n numpy accepts from {min_seed_value} to {max_seed_value}\"\n )\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n os.environ[\"PL_GLOBAL_SEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n return seed\n\n\ndef _select_seed_randomly(min_seed_value: int = 0, max_seed_value: int = 255) -> int:\n seed = random.randint(min_seed_value, max_seed_value)\n log.warning(f\"No correct seed found, seed set to {seed}\")\n return seed\n", "path": "pytorch_lightning/utilities/seed.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helper functions to help with reproducibility of models. \"\"\"\n\nimport os\nimport random\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning import _logger as log\n\n\ndef seed_everything(seed: Optional[int] = None) -> int:\n \"\"\"\n Function that sets seed for pseudo-random number generators in:\n pytorch, numpy, python.random\n In addition, sets the env variable `PL_GLOBAL_SEED` which will be passed to\n spawned subprocesses (e.g. ddp_spawn backend).\n\n Args:\n seed: the integer value seed for global random state in Lightning.\n If `None`, will read seed from `PL_GLOBAL_SEED` env variable\n or select it randomly.\n \"\"\"\n max_seed_value = np.iinfo(np.uint32).max\n min_seed_value = np.iinfo(np.uint32).min\n\n try:\n if seed is None:\n seed = os.environ.get(\"PL_GLOBAL_SEED\", _select_seed_randomly(min_seed_value, max_seed_value))\n seed = int(seed)\n except (TypeError, ValueError):\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n\n if (seed > max_seed_value) or (seed < min_seed_value):\n log.warning(\n f\"{seed} is not in bounds, \\\n numpy accepts from {min_seed_value} to {max_seed_value}\"\n )\n seed = _select_seed_randomly(min_seed_value, max_seed_value)\n\n os.environ[\"PL_GLOBAL_SEED\"] = str(seed)\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n return seed\n\n\ndef _select_seed_randomly(min_seed_value: int = 0, max_seed_value: int = 255) -> int:\n seed = random.randint(min_seed_value, max_seed_value)\n log.warning(f\"No correct seed found, seed set to {seed}\")\n return seed\n", "path": "pytorch_lightning/utilities/seed.py"}]}
| 1,836 | 220 |
gh_patches_debug_21147
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-8565
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Banner edit fields should be larger
### NetBox version
v3.1.6
### Feature type
Change to existing functionality
### Proposed functionality
Currently the Login, Top and Bottom banner edit fields in Config revisions are small input fields: they only show a few words at the time. It makes editing the banners somewhat hard.
I propose that the input fields are shown larger, preferably as multi-line input fields, to make it easier to edit the HTML-containing banners.
### Use case
Communicating upcoming maintenances or specific login instructions is easier when the banner input fields are easier to edit.
### Database changes
None
### External dependencies
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/config/parameters.py`
Content:
```
1 from django import forms
2 from django.contrib.postgres.forms import SimpleArrayField
3
4
5 class ConfigParam:
6
7 def __init__(self, name, label, default, description='', field=None, field_kwargs=None):
8 self.name = name
9 self.label = label
10 self.default = default
11 self.field = field or forms.CharField
12 self.description = description
13 self.field_kwargs = field_kwargs or {}
14
15
16 PARAMS = (
17
18 # Banners
19 ConfigParam(
20 name='BANNER_LOGIN',
21 label='Login banner',
22 default='',
23 description="Additional content to display on the login page"
24 ),
25 ConfigParam(
26 name='BANNER_TOP',
27 label='Top banner',
28 default='',
29 description="Additional content to display at the top of every page"
30 ),
31 ConfigParam(
32 name='BANNER_BOTTOM',
33 label='Bottom banner',
34 default='',
35 description="Additional content to display at the bottom of every page"
36 ),
37
38 # IPAM
39 ConfigParam(
40 name='ENFORCE_GLOBAL_UNIQUE',
41 label='Globally unique IP space',
42 default=False,
43 description="Enforce unique IP addressing within the global table",
44 field=forms.BooleanField
45 ),
46 ConfigParam(
47 name='PREFER_IPV4',
48 label='Prefer IPv4',
49 default=False,
50 description="Prefer IPv4 addresses over IPv6",
51 field=forms.BooleanField
52 ),
53
54 # Racks
55 ConfigParam(
56 name='RACK_ELEVATION_DEFAULT_UNIT_HEIGHT',
57 label='Rack unit height',
58 default=22,
59 description="Default unit height for rendered rack elevations",
60 field=forms.IntegerField
61 ),
62 ConfigParam(
63 name='RACK_ELEVATION_DEFAULT_UNIT_WIDTH',
64 label='Rack unit width',
65 default=220,
66 description="Default unit width for rendered rack elevations",
67 field=forms.IntegerField
68 ),
69
70 # Security
71 ConfigParam(
72 name='ALLOWED_URL_SCHEMES',
73 label='Allowed URL schemes',
74 default=(
75 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc',
76 'xmpp',
77 ),
78 description="Permitted schemes for URLs in user-provided content",
79 field=SimpleArrayField,
80 field_kwargs={'base_field': forms.CharField()}
81 ),
82
83 # Pagination
84 ConfigParam(
85 name='PAGINATE_COUNT',
86 label='Default page size',
87 default=50,
88 field=forms.IntegerField
89 ),
90 ConfigParam(
91 name='MAX_PAGE_SIZE',
92 label='Maximum page size',
93 default=1000,
94 field=forms.IntegerField
95 ),
96
97 # Validation
98 ConfigParam(
99 name='CUSTOM_VALIDATORS',
100 label='Custom validators',
101 default={},
102 description="Custom validation rules (JSON)",
103 field=forms.JSONField
104 ),
105
106 # NAPALM
107 ConfigParam(
108 name='NAPALM_USERNAME',
109 label='NAPALM username',
110 default='',
111 description="Username to use when connecting to devices via NAPALM"
112 ),
113 ConfigParam(
114 name='NAPALM_PASSWORD',
115 label='NAPALM password',
116 default='',
117 description="Password to use when connecting to devices via NAPALM"
118 ),
119 ConfigParam(
120 name='NAPALM_TIMEOUT',
121 label='NAPALM timeout',
122 default=30,
123 description="NAPALM connection timeout (in seconds)",
124 field=forms.IntegerField
125 ),
126 ConfigParam(
127 name='NAPALM_ARGS',
128 label='NAPALM arguments',
129 default={},
130 description="Additional arguments to pass when invoking a NAPALM driver (as JSON data)",
131 field=forms.JSONField
132 ),
133
134 # Miscellaneous
135 ConfigParam(
136 name='MAINTENANCE_MODE',
137 label='Maintenance mode',
138 default=False,
139 description="Enable maintenance mode",
140 field=forms.BooleanField
141 ),
142 ConfigParam(
143 name='GRAPHQL_ENABLED',
144 label='GraphQL enabled',
145 default=True,
146 description="Enable the GraphQL API",
147 field=forms.BooleanField
148 ),
149 ConfigParam(
150 name='CHANGELOG_RETENTION',
151 label='Changelog retention',
152 default=90,
153 description="Days to retain changelog history (set to zero for unlimited)",
154 field=forms.IntegerField
155 ),
156 ConfigParam(
157 name='MAPS_URL',
158 label='Maps URL',
159 default='https://maps.google.com/?q=',
160 description="Base URL for mapping geographic locations"
161 ),
162
163 )
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/config/parameters.py b/netbox/netbox/config/parameters.py
--- a/netbox/netbox/config/parameters.py
+++ b/netbox/netbox/config/parameters.py
@@ -20,19 +20,28 @@
name='BANNER_LOGIN',
label='Login banner',
default='',
- description="Additional content to display on the login page"
+ description="Additional content to display on the login page",
+ field_kwargs={
+ 'widget': forms.Textarea(),
+ },
),
ConfigParam(
name='BANNER_TOP',
label='Top banner',
default='',
- description="Additional content to display at the top of every page"
+ description="Additional content to display at the top of every page",
+ field_kwargs={
+ 'widget': forms.Textarea(),
+ },
),
ConfigParam(
name='BANNER_BOTTOM',
label='Bottom banner',
default='',
- description="Additional content to display at the bottom of every page"
+ description="Additional content to display at the bottom of every page",
+ field_kwargs={
+ 'widget': forms.Textarea(),
+ },
),
# IPAM
|
{"golden_diff": "diff --git a/netbox/netbox/config/parameters.py b/netbox/netbox/config/parameters.py\n--- a/netbox/netbox/config/parameters.py\n+++ b/netbox/netbox/config/parameters.py\n@@ -20,19 +20,28 @@\n name='BANNER_LOGIN',\n label='Login banner',\n default='',\n- description=\"Additional content to display on the login page\"\n+ description=\"Additional content to display on the login page\",\n+ field_kwargs={\n+ 'widget': forms.Textarea(),\n+ },\n ),\n ConfigParam(\n name='BANNER_TOP',\n label='Top banner',\n default='',\n- description=\"Additional content to display at the top of every page\"\n+ description=\"Additional content to display at the top of every page\",\n+ field_kwargs={\n+ 'widget': forms.Textarea(),\n+ },\n ),\n ConfigParam(\n name='BANNER_BOTTOM',\n label='Bottom banner',\n default='',\n- description=\"Additional content to display at the bottom of every page\"\n+ description=\"Additional content to display at the bottom of every page\",\n+ field_kwargs={\n+ 'widget': forms.Textarea(),\n+ },\n ),\n \n # IPAM\n", "issue": "Banner edit fields should be larger\n### NetBox version\n\nv3.1.6\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nCurrently the Login, Top and Bottom banner edit fields in Config revisions are small input fields: they only show a few words at the time. It makes editing the banners somewhat hard.\r\n\r\nI propose that the input fields are shown larger, preferably as multi-line input fields, to make it easier to edit the HTML-containing banners.\n\n### Use case\n\nCommunicating upcoming maintenances or specific login instructions is easier when the banner input fields are easier to edit.\n\n### Database changes\n\nNone\n\n### External dependencies\n\nNone\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\n\nclass ConfigParam:\n\n def __init__(self, name, label, default, description='', field=None, field_kwargs=None):\n self.name = name\n self.label = label\n self.default = default\n self.field = field or forms.CharField\n self.description = description\n self.field_kwargs = field_kwargs or {}\n\n\nPARAMS = (\n\n # Banners\n ConfigParam(\n name='BANNER_LOGIN',\n label='Login banner',\n default='',\n description=\"Additional content to display on the login page\"\n ),\n ConfigParam(\n name='BANNER_TOP',\n label='Top banner',\n default='',\n description=\"Additional content to display at the top of every page\"\n ),\n ConfigParam(\n name='BANNER_BOTTOM',\n label='Bottom banner',\n default='',\n description=\"Additional content to display at the bottom of every page\"\n ),\n\n # IPAM\n ConfigParam(\n name='ENFORCE_GLOBAL_UNIQUE',\n label='Globally unique IP space',\n default=False,\n description=\"Enforce unique IP addressing within the global table\",\n field=forms.BooleanField\n ),\n ConfigParam(\n name='PREFER_IPV4',\n label='Prefer IPv4',\n default=False,\n description=\"Prefer IPv4 addresses over IPv6\",\n field=forms.BooleanField\n ),\n\n # Racks\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_HEIGHT',\n label='Rack unit height',\n default=22,\n description=\"Default unit height for rendered rack elevations\",\n field=forms.IntegerField\n ),\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_WIDTH',\n label='Rack unit width',\n default=220,\n description=\"Default unit width for rendered rack elevations\",\n field=forms.IntegerField\n ),\n\n # Security\n ConfigParam(\n name='ALLOWED_URL_SCHEMES',\n label='Allowed URL schemes',\n default=(\n 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc',\n 'xmpp',\n ),\n description=\"Permitted schemes for URLs in user-provided content\",\n field=SimpleArrayField,\n field_kwargs={'base_field': forms.CharField()}\n ),\n\n # Pagination\n ConfigParam(\n name='PAGINATE_COUNT',\n label='Default page size',\n default=50,\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAX_PAGE_SIZE',\n label='Maximum page size',\n default=1000,\n field=forms.IntegerField\n ),\n\n # Validation\n ConfigParam(\n name='CUSTOM_VALIDATORS',\n label='Custom validators',\n default={},\n description=\"Custom validation rules (JSON)\",\n field=forms.JSONField\n ),\n\n # NAPALM\n ConfigParam(\n name='NAPALM_USERNAME',\n label='NAPALM username',\n default='',\n description=\"Username to use when connecting to devices via NAPALM\"\n ),\n ConfigParam(\n name='NAPALM_PASSWORD',\n label='NAPALM password',\n default='',\n description=\"Password to use when connecting to devices via NAPALM\"\n ),\n ConfigParam(\n name='NAPALM_TIMEOUT',\n label='NAPALM timeout',\n default=30,\n description=\"NAPALM connection timeout (in seconds)\",\n field=forms.IntegerField\n ),\n ConfigParam(\n name='NAPALM_ARGS',\n label='NAPALM arguments',\n default={},\n description=\"Additional arguments to pass when invoking a NAPALM driver (as JSON data)\",\n field=forms.JSONField\n ),\n\n # Miscellaneous\n ConfigParam(\n name='MAINTENANCE_MODE',\n label='Maintenance mode',\n default=False,\n description=\"Enable maintenance mode\",\n field=forms.BooleanField\n ),\n ConfigParam(\n name='GRAPHQL_ENABLED',\n label='GraphQL enabled',\n default=True,\n description=\"Enable the GraphQL API\",\n field=forms.BooleanField\n ),\n ConfigParam(\n name='CHANGELOG_RETENTION',\n label='Changelog retention',\n default=90,\n description=\"Days to retain changelog history (set to zero for unlimited)\",\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAPS_URL',\n label='Maps URL',\n default='https://maps.google.com/?q=',\n description=\"Base URL for mapping geographic locations\"\n ),\n\n)\n", "path": "netbox/netbox/config/parameters.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.postgres.forms import SimpleArrayField\n\n\nclass ConfigParam:\n\n def __init__(self, name, label, default, description='', field=None, field_kwargs=None):\n self.name = name\n self.label = label\n self.default = default\n self.field = field or forms.CharField\n self.description = description\n self.field_kwargs = field_kwargs or {}\n\n\nPARAMS = (\n\n # Banners\n ConfigParam(\n name='BANNER_LOGIN',\n label='Login banner',\n default='',\n description=\"Additional content to display on the login page\",\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n ConfigParam(\n name='BANNER_TOP',\n label='Top banner',\n default='',\n description=\"Additional content to display at the top of every page\",\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n ConfigParam(\n name='BANNER_BOTTOM',\n label='Bottom banner',\n default='',\n description=\"Additional content to display at the bottom of every page\",\n field_kwargs={\n 'widget': forms.Textarea(),\n },\n ),\n\n # IPAM\n ConfigParam(\n name='ENFORCE_GLOBAL_UNIQUE',\n label='Globally unique IP space',\n default=False,\n description=\"Enforce unique IP addressing within the global table\",\n field=forms.BooleanField\n ),\n ConfigParam(\n name='PREFER_IPV4',\n label='Prefer IPv4',\n default=False,\n description=\"Prefer IPv4 addresses over IPv6\",\n field=forms.BooleanField\n ),\n\n # Racks\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_HEIGHT',\n label='Rack unit height',\n default=22,\n description=\"Default unit height for rendered rack elevations\",\n field=forms.IntegerField\n ),\n ConfigParam(\n name='RACK_ELEVATION_DEFAULT_UNIT_WIDTH',\n label='Rack unit width',\n default=220,\n description=\"Default unit width for rendered rack elevations\",\n field=forms.IntegerField\n ),\n\n # Security\n ConfigParam(\n name='ALLOWED_URL_SCHEMES',\n label='Allowed URL schemes',\n default=(\n 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc',\n 'xmpp',\n ),\n description=\"Permitted schemes for URLs in user-provided content\",\n field=SimpleArrayField,\n field_kwargs={'base_field': forms.CharField()}\n ),\n\n # Pagination\n ConfigParam(\n name='PAGINATE_COUNT',\n label='Default page size',\n default=50,\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAX_PAGE_SIZE',\n label='Maximum page size',\n default=1000,\n field=forms.IntegerField\n ),\n\n # Validation\n ConfigParam(\n name='CUSTOM_VALIDATORS',\n label='Custom validators',\n default={},\n description=\"Custom validation rules (JSON)\",\n field=forms.JSONField\n ),\n\n # NAPALM\n ConfigParam(\n name='NAPALM_USERNAME',\n label='NAPALM username',\n default='',\n description=\"Username to use when connecting to devices via NAPALM\"\n ),\n ConfigParam(\n name='NAPALM_PASSWORD',\n label='NAPALM password',\n default='',\n description=\"Password to use when connecting to devices via NAPALM\"\n ),\n ConfigParam(\n name='NAPALM_TIMEOUT',\n label='NAPALM timeout',\n default=30,\n description=\"NAPALM connection timeout (in seconds)\",\n field=forms.IntegerField\n ),\n ConfigParam(\n name='NAPALM_ARGS',\n label='NAPALM arguments',\n default={},\n description=\"Additional arguments to pass when invoking a NAPALM driver (as JSON data)\",\n field=forms.JSONField\n ),\n\n # Miscellaneous\n ConfigParam(\n name='MAINTENANCE_MODE',\n label='Maintenance mode',\n default=False,\n description=\"Enable maintenance mode\",\n field=forms.BooleanField\n ),\n ConfigParam(\n name='GRAPHQL_ENABLED',\n label='GraphQL enabled',\n default=True,\n description=\"Enable the GraphQL API\",\n field=forms.BooleanField\n ),\n ConfigParam(\n name='CHANGELOG_RETENTION',\n label='Changelog retention',\n default=90,\n description=\"Days to retain changelog history (set to zero for unlimited)\",\n field=forms.IntegerField\n ),\n ConfigParam(\n name='MAPS_URL',\n label='Maps URL',\n default='https://maps.google.com/?q=',\n description=\"Base URL for mapping geographic locations\"\n ),\n\n)\n", "path": "netbox/netbox/config/parameters.py"}]}
| 1,778 | 263 |
gh_patches_debug_1073
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-98
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
EC2 resource detector hangs for a long time outside of an EC2 instance
**Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.
The environment I initially saw this in was a container running in Docker compose on an AWS EC2 instance but I've been able to reproduce it on my laptop as well. I think it will show up in anything not directly running in AWS.
**Steps to reproduce**
Describe exactly how to reproduce the error. Include a code sample if applicable.
The following code reproduced the issue on my laptop:
```python
from opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector
from opentelemetry.sdk.resources import get_aggregated_resources
resource = get_aggregated_resources(
detectors=[AwsEc2ResourceDetector()]
)
```
**What is the expected behavior?**
It should complete quickly (this is the behavior I see running on an EC2 instance).
**What is the actual behavior?**
What did you see instead?
On my laptop, it will hand ~indefinitely.
Note: one solution is just to remove the resource detector but we'd like to be able to include it and just have it fail silently, which is the behavior we've seen in other resource detectors.
**Additional context**
I think the problem is here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/80969a06da77d1e616124de178d12a1ebe3ffe7f/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py#L37
It looks like the request is using a 1000 _second_ timeout which I suspect is intended to be a 1000 _millisecond_ timeout. At least with the server program I've been working on that will block the startup of the program until the request completes.
You can verify by running:
```
curl http://169.254.169.254/latest/api/token
```
Which is one of the requests that the resource detector makes -- it should hang indefinitely as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 __version__ = "0.15.dev0"
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py
--- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py
+++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-__version__ = "0.15.dev0"
+__version__ = "0.15b0"
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py\n--- a/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py\n+++ b/instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py\n@@ -12,4 +12,4 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-__version__ = \"0.15.dev0\"\n+__version__ = \"0.15b0\"\n", "issue": "EC2 resource detector hangs for a long time outside of an EC2 instance\n**Describe your environment** Describe any aspect of your environment relevant to the problem, including your Python version, [platform](https://docs.python.org/3/library/platform.html), version numbers of installed dependencies, information about your cloud hosting provider, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main.\r\n\r\nThe environment I initially saw this in was a container running in Docker compose on an AWS EC2 instance but I've been able to reproduce it on my laptop as well. I think it will show up in anything not directly running in AWS.\r\n\r\n**Steps to reproduce**\r\nDescribe exactly how to reproduce the error. Include a code sample if applicable.\r\n\r\nThe following code reproduced the issue on my laptop:\r\n\r\n```python\r\nfrom opentelemetry.sdk.extension.aws.resource.ec2 import AwsEc2ResourceDetector\r\nfrom opentelemetry.sdk.resources import get_aggregated_resources\r\n\r\nresource = get_aggregated_resources(\r\n detectors=[AwsEc2ResourceDetector()]\r\n)\r\n```\r\n\r\n**What is the expected behavior?**\r\n\r\nIt should complete quickly (this is the behavior I see running on an EC2 instance).\r\n\r\n**What is the actual behavior?**\r\n\r\nWhat did you see instead?\r\n\r\nOn my laptop, it will hand ~indefinitely.\r\n\r\nNote: one solution is just to remove the resource detector but we'd like to be able to include it and just have it fail silently, which is the behavior we've seen in other resource detectors.\r\n\r\n**Additional context**\r\n\r\nI think the problem is here: https://github.com/open-telemetry/opentelemetry-python-contrib/blob/80969a06da77d1e616124de178d12a1ebe3ffe7f/sdk-extension/opentelemetry-sdk-extension-aws/src/opentelemetry/sdk/extension/aws/resource/ec2.py#L37\r\n\r\nIt looks like the request is using a 1000 _second_ timeout which I suspect is intended to be a 1000 _millisecond_ timeout. At least with the server program I've been working on that will block the startup of the program until the request completes.\r\n\r\nYou can verify by running:\r\n\r\n```\r\ncurl http://169.254.169.254/latest/api/token\r\n```\r\n\r\nWhich is one of the requests that the resource detector makes -- it should hang indefinitely as well.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version__ = \"0.15.dev0\"\n", "path": "instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version__ = \"0.15b0\"\n", "path": "instrumentation/opentelemetry-instrumentation-botocore/src/opentelemetry/instrumentation/botocore/version.py"}]}
| 962 | 169 |
gh_patches_debug_4517
|
rasdani/github-patches
|
git_diff
|
TileDB-Inc__TileDB-Py-309
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
array.query no longer works for string attributes
I have a case that looks basically like this:
```
array = tiledb.DenseArray(uri, mode="r")
q = array.query(attrs=["attrname"])
data = q[:]
```
This works when the type of the attribute is a float or an int.
But if the type of the attribute is a string, this fails with an exception:
```
File "tiledb/libtiledb.pyx", line 3874, in tiledb.libtiledb.Query.__getitem__
File "tiledb/libtiledb.pyx", line 4107, in tiledb.libtiledb.DenseArrayImpl.subarray
File "tiledb/libtiledb.pyx", line 4156, in tiledb.libtiledb.DenseArrayImpl._read_dense_subarray
File "tiledb/libtiledb.pyx", line 3760, in tiledb.libtiledb.Array._unpack_varlen_query
File "tiledb/libtiledb.pyx", line 3760, in tiledb.libtiledb.Array._unpack_varlen_query
File "tiledb/libtiledb.pyx", line 3805, in tiledb.libtiledb.Array._unpack_varlen_query
SystemError: Negative size passed to PyUnicode_FromStringAndSize
```
This worked in v1.7.6, and I ran into this problem when porting to 2.0.0.
If there is a better way to pull out just one attribute from the array, then
I'm open to suggestions.
[example.zip](https://github.com/TileDB-Inc/TileDB-Py/files/4589870/example.zip)
Attached is a script, output from 1.7.6, and output from 2.0.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tiledb/multirange_indexing.py`
Content:
```
1 import tiledb
2 from tiledb import Array, ArraySchema, TileDBError
3 import os, numpy as np
4 import sys, weakref
5 from collections import OrderedDict
6
7 def mr_dense_result_shape(ranges, base_shape = None):
8 # assumptions: len(ranges) matches number of dims
9 if base_shape is not None:
10 assert len(ranges) == len(base_shape), "internal error: mismatched shapes"
11
12 new_shape = list()
13 for i,rr in enumerate(ranges):
14 if rr != ():
15 m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))
16 new_shape.append(np.sum(m))
17 else:
18 if base_shape is None:
19 raise ValueError("Missing required base_shape for whole-dimension slices")
20 # empty range covers dimension
21 new_shape.append(base_shape[i])
22
23 return tuple(new_shape)
24
25 def mr_dense_result_numel(ranges):
26 return np.prod(mr_dense_result_shape(ranges))
27
28 def sel_to_subranges(dim_sel):
29 subranges = list()
30 for range in dim_sel:
31 if np.isscalar(range):
32 subranges.append( (range, range) )
33 elif isinstance(range, slice):
34 if range.step is not None:
35 raise ValueError("Stepped slice ranges are not supported")
36 elif range.start is None and range.stop is None:
37 # ':' full slice
38 pass
39 else:
40 subranges.append( (range.start, range.stop) )
41 elif isinstance(range, tuple):
42 subranges.extend((range,))
43 elif isinstance(range, list):
44 for el in range:
45 subranges.append( (el, el) )
46 else:
47 raise TypeError("Unsupported selection ")
48
49 return tuple(subranges)
50
51
52 class MultiRangeIndexer(object):
53 """
54 Implements multi-range / outer / orthogonal indexing.
55
56 """
57
58 def __init__(self, array, query = None):
59 if not issubclass(type(array), tiledb.Array):
60 raise ValueError("Internal error: MultiRangeIndexer expected tiledb.Array")
61 self.array_ref = weakref.ref(array)
62 self.schema = array.schema
63 self.query = query
64
65 @property
66 def array(self):
67 assert self.array_ref() is not None, \
68 "Internal error: invariant violation (indexing call w/ dead array_ref)"
69 return self.array_ref()
70
71 @classmethod
72 def __test_init__(cls, array):
73 """
74 Internal helper method for testing getitem range calculation.
75 :param array:
76 :return:
77 """
78 m = cls.__new__(cls)
79 m.array_ref = weakref.ref(array)
80 m.schema = array.schema
81 m.query = None
82 return m
83
84 def getitem_ranges(self, idx):
85 dom = self.schema.domain
86 ndim = dom.ndim
87
88 if isinstance(idx, tuple):
89 idx = list(idx)
90 else:
91 idx = [idx]
92
93 ranges = list()
94 for i,sel in enumerate(idx):
95 if not isinstance(sel, list):
96 sel = [sel]
97 subranges = sel_to_subranges(sel)
98 ranges.append(subranges)
99
100 # extend the list to ndim
101 if len(ranges) < ndim:
102 ranges.extend([ tuple() for _ in range(ndim-len(ranges))])
103
104 rval = tuple(ranges)
105 return rval
106
107 def __getitem__(self, idx):
108 # implements multi-range / outer / orthogonal indexing
109 ranges = self.getitem_ranges(idx)
110
111 schema = self.schema
112 dom = self.schema.domain
113 attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))
114
115 coords = None
116 if self.query is not None:
117 # if we are called via Query object, then we need to respect Query semantics
118 attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all
119 coords = self.query.coords
120
121 from tiledb.core import PyQuery
122 q = PyQuery(self.array._ctx_(), self.array, attr_names, coords)
123
124 q.set_ranges(ranges)
125 q.submit()
126
127 result_dict = OrderedDict(q.results())
128
129 for name, item in result_dict.items():
130 if len(item[1]) > 0:
131 arr = self.array._unpack_varlen_query(item, name)
132 else:
133 arr = item[0]
134 arr.dtype = schema.attr_or_dim_dtype(name)
135 result_dict[name] = arr
136
137 if self.schema.sparse:
138 return result_dict
139 else:
140 result_shape = mr_dense_result_shape(ranges, self.schema.shape)
141 for arr in result_dict.values():
142 # TODO check/test layout
143 arr.shape = result_shape
144 return result_dict
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py
--- a/tiledb/multirange_indexing.py
+++ b/tiledb/multirange_indexing.py
@@ -128,7 +128,7 @@
for name, item in result_dict.items():
if len(item[1]) > 0:
- arr = self.array._unpack_varlen_query(item, name)
+ arr = q.unpack_buffer(name, item[0], item[1])
else:
arr = item[0]
arr.dtype = schema.attr_or_dim_dtype(name)
|
{"golden_diff": "diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py\n--- a/tiledb/multirange_indexing.py\n+++ b/tiledb/multirange_indexing.py\n@@ -128,7 +128,7 @@\n \n for name, item in result_dict.items():\n if len(item[1]) > 0:\n- arr = self.array._unpack_varlen_query(item, name)\n+ arr = q.unpack_buffer(name, item[0], item[1])\n else:\n arr = item[0]\n arr.dtype = schema.attr_or_dim_dtype(name)\n", "issue": "array.query no longer works for string attributes\nI have a case that looks basically like this:\r\n\r\n```\r\narray = tiledb.DenseArray(uri, mode=\"r\")\r\nq = array.query(attrs=[\"attrname\"])\r\ndata = q[:]\r\n```\r\nThis works when the type of the attribute is a float or an int.\r\nBut if the type of the attribute is a string, this fails with an exception:\r\n\r\n```\r\n File \"tiledb/libtiledb.pyx\", line 3874, in tiledb.libtiledb.Query.__getitem__\r\n File \"tiledb/libtiledb.pyx\", line 4107, in tiledb.libtiledb.DenseArrayImpl.subarray\r\n File \"tiledb/libtiledb.pyx\", line 4156, in tiledb.libtiledb.DenseArrayImpl._read_dense_subarray\r\n File \"tiledb/libtiledb.pyx\", line 3760, in tiledb.libtiledb.Array._unpack_varlen_query\r\n File \"tiledb/libtiledb.pyx\", line 3760, in tiledb.libtiledb.Array._unpack_varlen_query\r\n File \"tiledb/libtiledb.pyx\", line 3805, in tiledb.libtiledb.Array._unpack_varlen_query\r\nSystemError: Negative size passed to PyUnicode_FromStringAndSize\r\n```\r\n\r\nThis worked in v1.7.6, and I ran into this problem when porting to 2.0.0.\r\nIf there is a better way to pull out just one attribute from the array, then\r\nI'm open to suggestions.\r\n\r\n[example.zip](https://github.com/TileDB-Inc/TileDB-Py/files/4589870/example.zip)\r\n\r\nAttached is a script, output from 1.7.6, and output from 2.0.0\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema, TileDBError\nimport os, numpy as np\nimport sys, weakref\nfrom collections import OrderedDict\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, tuple):\n subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n\n if isinstance(idx, tuple):\n idx = list(idx)\n else:\n idx = [idx]\n\n ranges = list()\n for i,sel in enumerate(idx):\n if not isinstance(sel, list):\n sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n schema = self.schema\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n from tiledb.core import PyQuery\n q = PyQuery(self.array._ctx_(), self.array, attr_names, coords)\n\n q.set_ranges(ranges)\n q.submit()\n\n result_dict = OrderedDict(q.results())\n\n for name, item in result_dict.items():\n if len(item[1]) > 0:\n arr = self.array._unpack_varlen_query(item, name)\n else:\n arr = item[0]\n arr.dtype = schema.attr_or_dim_dtype(name)\n result_dict[name] = arr\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict\n", "path": "tiledb/multirange_indexing.py"}], "after_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema, TileDBError\nimport os, numpy as np\nimport sys, weakref\nfrom collections import OrderedDict\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, tuple):\n subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n\n if isinstance(idx, tuple):\n idx = list(idx)\n else:\n idx = [idx]\n\n ranges = list()\n for i,sel in enumerate(idx):\n if not isinstance(sel, list):\n sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n schema = self.schema\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n from tiledb.core import PyQuery\n q = PyQuery(self.array._ctx_(), self.array, attr_names, coords)\n\n q.set_ranges(ranges)\n q.submit()\n\n result_dict = OrderedDict(q.results())\n\n for name, item in result_dict.items():\n if len(item[1]) > 0:\n arr = q.unpack_buffer(name, item[0], item[1])\n else:\n arr = item[0]\n arr.dtype = schema.attr_or_dim_dtype(name)\n result_dict[name] = arr\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict\n", "path": "tiledb/multirange_indexing.py"}]}
| 2,001 | 141 |
gh_patches_debug_36022
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-6357
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
yettel_bg is broken
The spider does not return anything. It seems like they've moved the page for getting their store data. I wasn't able to find the new address or how the data reaches their store locator page.
Store locator: https://www.yettel.bg/faq/digital-customer-service/store-locator
Spider: https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/yettel_bg.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/yettel_bg.py`
Content:
```
1 from scrapy import Selector, Spider
2
3 from locations.items import Feature
4
5
6 class YettelBGSpider(Spider):
7 name = "yettel_bg"
8 item_attributes = {
9 "brand": "Yettel",
10 "brand_wikidata": "Q14915070",
11 "country": "BG",
12 }
13 start_urls = ["https://www.yettel.bg/store-locator/json"]
14
15 def parse(self, response):
16 for store in response.json()["features"]:
17 item = Feature()
18
19 item["lon"], item["lat"] = store["geometry"]["coordinates"]
20
21 item["ref"] = store["properties"]["title"]
22
23 address_block = Selector(text=store["properties"]["gsl_addressfield"])
24
25 item["street_address"] = address_block.xpath('//div[@class="thoroughfare"]/text()').get()
26 item["postcode"] = address_block.xpath('//span[@class="postal-code"]/text()').get()
27 item["city"] = address_block.xpath('//span[@class="locality"]/text()').get()
28
29 yield item
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/yettel_bg.py b/locations/spiders/yettel_bg.py
--- a/locations/spiders/yettel_bg.py
+++ b/locations/spiders/yettel_bg.py
@@ -1,5 +1,9 @@
-from scrapy import Selector, Spider
+import io
+from openpyxl import load_workbook
+from scrapy import Spider
+
+from locations.hours import OpeningHours, day_range
from locations.items import Feature
@@ -10,20 +14,50 @@
"brand_wikidata": "Q14915070",
"country": "BG",
}
- start_urls = ["https://www.yettel.bg/store-locator/json"]
+ start_urls = ["https://www.yettel.bg/faq/digital-customer-service/store-locator"]
+ no_refs = True
+ custom_settings = {"ROBOTSTXT_OBEY": False}
def parse(self, response):
- for store in response.json()["features"]:
- item = Feature()
-
- item["lon"], item["lat"] = store["geometry"]["coordinates"]
-
- item["ref"] = store["properties"]["title"]
-
- address_block = Selector(text=store["properties"]["gsl_addressfield"])
-
- item["street_address"] = address_block.xpath('//div[@class="thoroughfare"]/text()').get()
- item["postcode"] = address_block.xpath('//span[@class="postal-code"]/text()').get()
- item["city"] = address_block.xpath('//span[@class="locality"]/text()').get()
-
- yield item
+ yield response.follow(
+ url=response.xpath('//input[@id="hdnExcelFile"]/@value').get(), callback=self.parse_spreadsheet
+ )
+
+ def parse_spreadsheet(self, response):
+ if "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" in response.headers.get(
+ "Content-Type"
+ ).decode("utf-8"):
+ excel_file = response.body
+
+ excel_data = io.BytesIO(excel_file)
+ workbook = load_workbook(excel_data, read_only=True)
+
+ sheet = workbook.active
+
+ data = []
+ for row in sheet.iter_rows(values_only=True):
+ data.append(row)
+
+ headers = data[0]
+ json_data = []
+ for row in data[1:]:
+ json_data.append({headers[i]: cell for i, cell in enumerate(row)})
+
+ for store in json_data:
+ item = Feature()
+
+ item["lat"] = store["latitude"]
+ item["lon"] = store["longitude"]
+
+ item["street_address"] = store["address_loc"]
+ item["city"] = store["city_loc"]
+
+ item["opening_hours"] = OpeningHours()
+ item["opening_hours"].add_days_range(
+ day_range("Mo", "Fr"), *store["working_time_weekdays"].replace(" ", "").split("-")
+ )
+ if store["is_closed_on_saturday"] == "No":
+ item["opening_hours"].add_range("Sa", *store["working_time_saturday"].replace(" ", "").split("-"))
+ if store["is_closed_on_sunday"] == "No":
+ item["opening_hours"].add_range("Su", *store["working_time_sunday"].replace(" ", "").split("-"))
+ yield item
|
{"golden_diff": "diff --git a/locations/spiders/yettel_bg.py b/locations/spiders/yettel_bg.py\n--- a/locations/spiders/yettel_bg.py\n+++ b/locations/spiders/yettel_bg.py\n@@ -1,5 +1,9 @@\n-from scrapy import Selector, Spider\n+import io\n \n+from openpyxl import load_workbook\n+from scrapy import Spider\n+\n+from locations.hours import OpeningHours, day_range\n from locations.items import Feature\n \n \n@@ -10,20 +14,50 @@\n \"brand_wikidata\": \"Q14915070\",\n \"country\": \"BG\",\n }\n- start_urls = [\"https://www.yettel.bg/store-locator/json\"]\n+ start_urls = [\"https://www.yettel.bg/faq/digital-customer-service/store-locator\"]\n+ no_refs = True\n+ custom_settings = {\"ROBOTSTXT_OBEY\": False}\n \n def parse(self, response):\n- for store in response.json()[\"features\"]:\n- item = Feature()\n-\n- item[\"lon\"], item[\"lat\"] = store[\"geometry\"][\"coordinates\"]\n-\n- item[\"ref\"] = store[\"properties\"][\"title\"]\n-\n- address_block = Selector(text=store[\"properties\"][\"gsl_addressfield\"])\n-\n- item[\"street_address\"] = address_block.xpath('//div[@class=\"thoroughfare\"]/text()').get()\n- item[\"postcode\"] = address_block.xpath('//span[@class=\"postal-code\"]/text()').get()\n- item[\"city\"] = address_block.xpath('//span[@class=\"locality\"]/text()').get()\n-\n- yield item\n+ yield response.follow(\n+ url=response.xpath('//input[@id=\"hdnExcelFile\"]/@value').get(), callback=self.parse_spreadsheet\n+ )\n+\n+ def parse_spreadsheet(self, response):\n+ if \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" in response.headers.get(\n+ \"Content-Type\"\n+ ).decode(\"utf-8\"):\n+ excel_file = response.body\n+\n+ excel_data = io.BytesIO(excel_file)\n+ workbook = load_workbook(excel_data, read_only=True)\n+\n+ sheet = workbook.active\n+\n+ data = []\n+ for row in sheet.iter_rows(values_only=True):\n+ data.append(row)\n+\n+ headers = data[0]\n+ json_data = []\n+ for row in data[1:]:\n+ json_data.append({headers[i]: cell for i, cell in enumerate(row)})\n+\n+ for store in json_data:\n+ item = Feature()\n+\n+ item[\"lat\"] = store[\"latitude\"]\n+ item[\"lon\"] = store[\"longitude\"]\n+\n+ item[\"street_address\"] = store[\"address_loc\"]\n+ item[\"city\"] = store[\"city_loc\"]\n+\n+ item[\"opening_hours\"] = OpeningHours()\n+ item[\"opening_hours\"].add_days_range(\n+ day_range(\"Mo\", \"Fr\"), *store[\"working_time_weekdays\"].replace(\" \", \"\").split(\"-\")\n+ )\n+ if store[\"is_closed_on_saturday\"] == \"No\":\n+ item[\"opening_hours\"].add_range(\"Sa\", *store[\"working_time_saturday\"].replace(\" \", \"\").split(\"-\"))\n+ if store[\"is_closed_on_sunday\"] == \"No\":\n+ item[\"opening_hours\"].add_range(\"Su\", *store[\"working_time_sunday\"].replace(\" \", \"\").split(\"-\"))\n+ yield item\n", "issue": "yettel_bg is broken\nThe spider does not return anything. It seems like they've moved the page for getting their store data. I wasn't able to find the new address or how the data reaches their store locator page.\r\n\r\nStore locator: https://www.yettel.bg/faq/digital-customer-service/store-locator\r\n\r\nSpider: https://github.com/alltheplaces/alltheplaces/blob/master/locations/spiders/yettel_bg.py\n", "before_files": [{"content": "from scrapy import Selector, Spider\n\nfrom locations.items import Feature\n\n\nclass YettelBGSpider(Spider):\n name = \"yettel_bg\"\n item_attributes = {\n \"brand\": \"Yettel\",\n \"brand_wikidata\": \"Q14915070\",\n \"country\": \"BG\",\n }\n start_urls = [\"https://www.yettel.bg/store-locator/json\"]\n\n def parse(self, response):\n for store in response.json()[\"features\"]:\n item = Feature()\n\n item[\"lon\"], item[\"lat\"] = store[\"geometry\"][\"coordinates\"]\n\n item[\"ref\"] = store[\"properties\"][\"title\"]\n\n address_block = Selector(text=store[\"properties\"][\"gsl_addressfield\"])\n\n item[\"street_address\"] = address_block.xpath('//div[@class=\"thoroughfare\"]/text()').get()\n item[\"postcode\"] = address_block.xpath('//span[@class=\"postal-code\"]/text()').get()\n item[\"city\"] = address_block.xpath('//span[@class=\"locality\"]/text()').get()\n\n yield item\n", "path": "locations/spiders/yettel_bg.py"}], "after_files": [{"content": "import io\n\nfrom openpyxl import load_workbook\nfrom scrapy import Spider\n\nfrom locations.hours import OpeningHours, day_range\nfrom locations.items import Feature\n\n\nclass YettelBGSpider(Spider):\n name = \"yettel_bg\"\n item_attributes = {\n \"brand\": \"Yettel\",\n \"brand_wikidata\": \"Q14915070\",\n \"country\": \"BG\",\n }\n start_urls = [\"https://www.yettel.bg/faq/digital-customer-service/store-locator\"]\n no_refs = True\n custom_settings = {\"ROBOTSTXT_OBEY\": False}\n\n def parse(self, response):\n yield response.follow(\n url=response.xpath('//input[@id=\"hdnExcelFile\"]/@value').get(), callback=self.parse_spreadsheet\n )\n\n def parse_spreadsheet(self, response):\n if \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\" in response.headers.get(\n \"Content-Type\"\n ).decode(\"utf-8\"):\n excel_file = response.body\n\n excel_data = io.BytesIO(excel_file)\n workbook = load_workbook(excel_data, read_only=True)\n\n sheet = workbook.active\n\n data = []\n for row in sheet.iter_rows(values_only=True):\n data.append(row)\n\n headers = data[0]\n json_data = []\n for row in data[1:]:\n json_data.append({headers[i]: cell for i, cell in enumerate(row)})\n\n for store in json_data:\n item = Feature()\n\n item[\"lat\"] = store[\"latitude\"]\n item[\"lon\"] = store[\"longitude\"]\n\n item[\"street_address\"] = store[\"address_loc\"]\n item[\"city\"] = store[\"city_loc\"]\n\n item[\"opening_hours\"] = OpeningHours()\n item[\"opening_hours\"].add_days_range(\n day_range(\"Mo\", \"Fr\"), *store[\"working_time_weekdays\"].replace(\" \", \"\").split(\"-\")\n )\n if store[\"is_closed_on_saturday\"] == \"No\":\n item[\"opening_hours\"].add_range(\"Sa\", *store[\"working_time_saturday\"].replace(\" \", \"\").split(\"-\"))\n if store[\"is_closed_on_sunday\"] == \"No\":\n item[\"opening_hours\"].add_range(\"Su\", *store[\"working_time_sunday\"].replace(\" \", \"\").split(\"-\"))\n yield item\n", "path": "locations/spiders/yettel_bg.py"}]}
| 633 | 756 |
gh_patches_debug_1875
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6598
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Possibly to prefill Reason textarea on $delete
If you're going to have some human-helped automation that deletes the 20% or so non-en-US documents that aren't actually translated, it would be nice if you don't have to type in the same reason every time.
<img width="989" alt="Screen Shot 2020-02-26 at 11 56 40 AM" src="https://user-images.githubusercontent.com/26739/75367987-1be85500-588f-11ea-8ba1-f49e0db69cc7.png">
Would be neat if you could control it with something like `?reason=Sample%20reason`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/wiki/views/delete.py`
Content:
```
1 from django.db import IntegrityError
2 from django.shortcuts import get_object_or_404, redirect, render
3 from django.utils.translation import ugettext
4 from django.views.decorators.cache import never_cache
5
6 from kuma.core.decorators import (
7 block_user_agents,
8 ensure_wiki_domain,
9 login_required,
10 permission_required,
11 )
12 from kuma.core.urlresolvers import reverse
13
14 from ..decorators import check_readonly, process_document_path
15 from ..forms import DocumentDeletionForm
16 from ..models import Document, DocumentDeletionLog, Revision
17 from ..utils import locale_and_slug_from_path
18
19
20 @ensure_wiki_domain
21 @never_cache
22 @block_user_agents
23 @login_required
24 @check_readonly
25 def revert_document(request, document_path, revision_id):
26 """
27 Revert document to a specific revision.
28 """
29 document_locale, document_slug, needs_redirect = locale_and_slug_from_path(
30 document_path, request
31 )
32
33 revision = get_object_or_404(
34 Revision.objects.select_related("document"),
35 pk=revision_id,
36 document__slug=document_slug,
37 )
38
39 if request.method == "GET":
40 # Render the confirmation page
41 return render(
42 request,
43 "wiki/confirm_revision_revert.html",
44 {"revision": revision, "document": revision.document},
45 )
46 else:
47 comment = request.POST.get("comment")
48 document = revision.document
49 old_revision_pk = revision.pk
50 try:
51 new_revision = document.revert(revision, request.user, comment)
52 # schedule a rendering of the new revision if it really was saved
53 if new_revision.pk != old_revision_pk:
54 document.schedule_rendering("max-age=0")
55 except IntegrityError:
56 return render(
57 request,
58 "wiki/confirm_revision_revert.html",
59 {
60 "revision": revision,
61 "document": revision.document,
62 "error": ugettext(
63 "Document already exists. Note: You cannot "
64 "revert a document that has been moved until you "
65 "delete its redirect."
66 ),
67 },
68 )
69 return redirect("wiki.document_revisions", revision.document.slug)
70
71
72 @ensure_wiki_domain
73 @never_cache
74 @block_user_agents
75 @login_required
76 @permission_required("wiki.delete_document")
77 @check_readonly
78 @process_document_path
79 def delete_document(request, document_slug, document_locale):
80 """
81 Delete a Document.
82 """
83 document = get_object_or_404(Document, locale=document_locale, slug=document_slug)
84
85 # HACK: https://bugzil.la/972545 - Don't delete pages that have children
86 # TODO: https://bugzil.la/972541 - Deleting a page that has subpages
87 prevent = document.children.exists()
88
89 first_revision = document.revisions.all()[0]
90
91 if request.method == "POST":
92 form = DocumentDeletionForm(data=request.POST)
93 if form.is_valid():
94 DocumentDeletionLog.objects.create(
95 locale=document.locale,
96 slug=document.slug,
97 user=request.user,
98 reason=form.cleaned_data["reason"],
99 )
100 document.delete()
101 return redirect(document)
102 else:
103 form = DocumentDeletionForm()
104
105 context = {
106 "document": document,
107 "form": form,
108 "request": request,
109 "revision": first_revision,
110 "prevent": prevent,
111 }
112 return render(request, "wiki/confirm_document_delete.html", context)
113
114
115 @ensure_wiki_domain
116 @never_cache
117 @block_user_agents
118 @login_required
119 @permission_required("wiki.restore_document")
120 @check_readonly
121 @process_document_path
122 def restore_document(request, document_slug, document_locale):
123 """
124 Restore a deleted Document.
125 """
126 document = get_object_or_404(
127 Document.deleted_objects.all(), slug=document_slug, locale=document_locale
128 )
129 document.restore()
130 return redirect(document)
131
132
133 @ensure_wiki_domain
134 @never_cache
135 @block_user_agents
136 @login_required
137 @permission_required("wiki.purge_document")
138 @check_readonly
139 @process_document_path
140 def purge_document(request, document_slug, document_locale):
141 """
142 Permanently purge a deleted Document.
143 """
144 document = get_object_or_404(
145 Document.deleted_objects.all(), slug=document_slug, locale=document_locale
146 )
147 deletion_log_entries = DocumentDeletionLog.objects.filter(
148 locale=document_locale, slug=document_slug
149 )
150 if deletion_log_entries.exists():
151 deletion_log = deletion_log_entries.order_by("-pk")[0]
152 else:
153 deletion_log = {}
154
155 if request.method == "POST" and "confirm" in request.POST:
156 document.purge()
157 return redirect(
158 reverse("wiki.document", args=(document_slug,), locale=document_locale)
159 )
160 else:
161 return render(
162 request,
163 "wiki/confirm_purge.html",
164 {"document": document, "deletion_log": deletion_log},
165 )
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kuma/wiki/views/delete.py b/kuma/wiki/views/delete.py
--- a/kuma/wiki/views/delete.py
+++ b/kuma/wiki/views/delete.py
@@ -100,7 +100,8 @@
document.delete()
return redirect(document)
else:
- form = DocumentDeletionForm()
+
+ form = DocumentDeletionForm(initial={"reason": request.GET.get("reason", "")})
context = {
"document": document,
|
{"golden_diff": "diff --git a/kuma/wiki/views/delete.py b/kuma/wiki/views/delete.py\n--- a/kuma/wiki/views/delete.py\n+++ b/kuma/wiki/views/delete.py\n@@ -100,7 +100,8 @@\n document.delete()\n return redirect(document)\n else:\n- form = DocumentDeletionForm()\n+\n+ form = DocumentDeletionForm(initial={\"reason\": request.GET.get(\"reason\", \"\")})\n \n context = {\n \"document\": document,\n", "issue": "Possibly to prefill Reason textarea on $delete\nIf you're going to have some human-helped automation that deletes the 20% or so non-en-US documents that aren't actually translated, it would be nice if you don't have to type in the same reason every time. \r\n<img width=\"989\" alt=\"Screen Shot 2020-02-26 at 11 56 40 AM\" src=\"https://user-images.githubusercontent.com/26739/75367987-1be85500-588f-11ea-8ba1-f49e0db69cc7.png\">\r\n\r\nWould be neat if you could control it with something like `?reason=Sample%20reason`\n", "before_files": [{"content": "from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext\nfrom django.views.decorators.cache import never_cache\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n login_required,\n permission_required,\n)\nfrom kuma.core.urlresolvers import reverse\n\nfrom ..decorators import check_readonly, process_document_path\nfrom ..forms import DocumentDeletionForm\nfrom ..models import Document, DocumentDeletionLog, Revision\nfrom ..utils import locale_and_slug_from_path\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@check_readonly\ndef revert_document(request, document_path, revision_id):\n \"\"\"\n Revert document to a specific revision.\n \"\"\"\n document_locale, document_slug, needs_redirect = locale_and_slug_from_path(\n document_path, request\n )\n\n revision = get_object_or_404(\n Revision.objects.select_related(\"document\"),\n pk=revision_id,\n document__slug=document_slug,\n )\n\n if request.method == \"GET\":\n # Render the confirmation page\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\"revision\": revision, \"document\": revision.document},\n )\n else:\n comment = request.POST.get(\"comment\")\n document = revision.document\n old_revision_pk = revision.pk\n try:\n new_revision = document.revert(revision, request.user, comment)\n # schedule a rendering of the new revision if it really was saved\n if new_revision.pk != old_revision_pk:\n document.schedule_rendering(\"max-age=0\")\n except IntegrityError:\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\n \"revision\": revision,\n \"document\": revision.document,\n \"error\": ugettext(\n \"Document already exists. Note: You cannot \"\n \"revert a document that has been moved until you \"\n \"delete its redirect.\"\n ),\n },\n )\n return redirect(\"wiki.document_revisions\", revision.document.slug)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.delete_document\")\n@check_readonly\n@process_document_path\ndef delete_document(request, document_slug, document_locale):\n \"\"\"\n Delete a Document.\n \"\"\"\n document = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n\n # HACK: https://bugzil.la/972545 - Don't delete pages that have children\n # TODO: https://bugzil.la/972541 - Deleting a page that has subpages\n prevent = document.children.exists()\n\n first_revision = document.revisions.all()[0]\n\n if request.method == \"POST\":\n form = DocumentDeletionForm(data=request.POST)\n if form.is_valid():\n DocumentDeletionLog.objects.create(\n locale=document.locale,\n slug=document.slug,\n user=request.user,\n reason=form.cleaned_data[\"reason\"],\n )\n document.delete()\n return redirect(document)\n else:\n form = DocumentDeletionForm()\n\n context = {\n \"document\": document,\n \"form\": form,\n \"request\": request,\n \"revision\": first_revision,\n \"prevent\": prevent,\n }\n return render(request, \"wiki/confirm_document_delete.html\", context)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.restore_document\")\n@check_readonly\n@process_document_path\ndef restore_document(request, document_slug, document_locale):\n \"\"\"\n Restore a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n document.restore()\n return redirect(document)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.purge_document\")\n@check_readonly\n@process_document_path\ndef purge_document(request, document_slug, document_locale):\n \"\"\"\n Permanently purge a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n deletion_log_entries = DocumentDeletionLog.objects.filter(\n locale=document_locale, slug=document_slug\n )\n if deletion_log_entries.exists():\n deletion_log = deletion_log_entries.order_by(\"-pk\")[0]\n else:\n deletion_log = {}\n\n if request.method == \"POST\" and \"confirm\" in request.POST:\n document.purge()\n return redirect(\n reverse(\"wiki.document\", args=(document_slug,), locale=document_locale)\n )\n else:\n return render(\n request,\n \"wiki/confirm_purge.html\",\n {\"document\": document, \"deletion_log\": deletion_log},\n )\n", "path": "kuma/wiki/views/delete.py"}], "after_files": [{"content": "from django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext\nfrom django.views.decorators.cache import never_cache\n\nfrom kuma.core.decorators import (\n block_user_agents,\n ensure_wiki_domain,\n login_required,\n permission_required,\n)\nfrom kuma.core.urlresolvers import reverse\n\nfrom ..decorators import check_readonly, process_document_path\nfrom ..forms import DocumentDeletionForm\nfrom ..models import Document, DocumentDeletionLog, Revision\nfrom ..utils import locale_and_slug_from_path\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@check_readonly\ndef revert_document(request, document_path, revision_id):\n \"\"\"\n Revert document to a specific revision.\n \"\"\"\n document_locale, document_slug, needs_redirect = locale_and_slug_from_path(\n document_path, request\n )\n\n revision = get_object_or_404(\n Revision.objects.select_related(\"document\"),\n pk=revision_id,\n document__slug=document_slug,\n )\n\n if request.method == \"GET\":\n # Render the confirmation page\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\"revision\": revision, \"document\": revision.document},\n )\n else:\n comment = request.POST.get(\"comment\")\n document = revision.document\n old_revision_pk = revision.pk\n try:\n new_revision = document.revert(revision, request.user, comment)\n # schedule a rendering of the new revision if it really was saved\n if new_revision.pk != old_revision_pk:\n document.schedule_rendering(\"max-age=0\")\n except IntegrityError:\n return render(\n request,\n \"wiki/confirm_revision_revert.html\",\n {\n \"revision\": revision,\n \"document\": revision.document,\n \"error\": ugettext(\n \"Document already exists. Note: You cannot \"\n \"revert a document that has been moved until you \"\n \"delete its redirect.\"\n ),\n },\n )\n return redirect(\"wiki.document_revisions\", revision.document.slug)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.delete_document\")\n@check_readonly\n@process_document_path\ndef delete_document(request, document_slug, document_locale):\n \"\"\"\n Delete a Document.\n \"\"\"\n document = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n\n # HACK: https://bugzil.la/972545 - Don't delete pages that have children\n # TODO: https://bugzil.la/972541 - Deleting a page that has subpages\n prevent = document.children.exists()\n\n first_revision = document.revisions.all()[0]\n\n if request.method == \"POST\":\n form = DocumentDeletionForm(data=request.POST)\n if form.is_valid():\n DocumentDeletionLog.objects.create(\n locale=document.locale,\n slug=document.slug,\n user=request.user,\n reason=form.cleaned_data[\"reason\"],\n )\n document.delete()\n return redirect(document)\n else:\n\n form = DocumentDeletionForm(initial={\"reason\": request.GET.get(\"reason\", \"\")})\n\n context = {\n \"document\": document,\n \"form\": form,\n \"request\": request,\n \"revision\": first_revision,\n \"prevent\": prevent,\n }\n return render(request, \"wiki/confirm_document_delete.html\", context)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.restore_document\")\n@check_readonly\n@process_document_path\ndef restore_document(request, document_slug, document_locale):\n \"\"\"\n Restore a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n document.restore()\n return redirect(document)\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@permission_required(\"wiki.purge_document\")\n@check_readonly\n@process_document_path\ndef purge_document(request, document_slug, document_locale):\n \"\"\"\n Permanently purge a deleted Document.\n \"\"\"\n document = get_object_or_404(\n Document.deleted_objects.all(), slug=document_slug, locale=document_locale\n )\n deletion_log_entries = DocumentDeletionLog.objects.filter(\n locale=document_locale, slug=document_slug\n )\n if deletion_log_entries.exists():\n deletion_log = deletion_log_entries.order_by(\"-pk\")[0]\n else:\n deletion_log = {}\n\n if request.method == \"POST\" and \"confirm\" in request.POST:\n document.purge()\n return redirect(\n reverse(\"wiki.document\", args=(document_slug,), locale=document_locale)\n )\n else:\n return render(\n request,\n \"wiki/confirm_purge.html\",\n {\"document\": document, \"deletion_log\": deletion_log},\n )\n", "path": "kuma/wiki/views/delete.py"}]}
| 1,875 | 103 |
gh_patches_debug_10882
|
rasdani/github-patches
|
git_diff
|
statsmodels__statsmodels-6292
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nonparametric.KDEUnivariate returns zero bandwidth
Hi,
Does anyone have an idea why the following dataset makes KDEUnivariate return zero bandwidth?
```
data = np.array([1,1,1,1,2])
kde_sm = sm.nonparametric.KDEUnivariate(data.astype('double'))
kde_sm.fit(bw="silverman")
sup = kde_sm.support
den = kde_sm.density
print(kde_sm.bw) # prints 0
```
when `data = np.array([1,1,1,2])` the returned bandwidth is correct. When I add more 1s, the bandwidth stays zero.
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `statsmodels/nonparametric/bandwidths.py`
Content:
```
1 import numpy as np
2 from scipy.stats import scoreatpercentile as sap
3
4 from statsmodels.compat.pandas import Substitution
5 from statsmodels.sandbox.nonparametric import kernels
6
7 def _select_sigma(X):
8 """
9 Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.
10
11 References
12 ----------
13 Silverman (1986) p.47
14 """
15 # normalize = norm.ppf(.75) - norm.ppf(.25)
16 normalize = 1.349
17 # IQR = np.subtract.reduce(percentile(X, [75,25],
18 # axis=axis), axis=axis)/normalize
19 IQR = (sap(X, 75) - sap(X, 25))/normalize
20 return np.minimum(np.std(X, axis=0, ddof=1), IQR)
21
22
23 ## Univariate Rule of Thumb Bandwidths ##
24 def bw_scott(x, kernel=None):
25 """
26 Scott's Rule of Thumb
27
28 Parameters
29 ----------
30 x : array_like
31 Array for which to get the bandwidth
32 kernel : CustomKernel object
33 Unused
34
35 Returns
36 -------
37 bw : float
38 The estimate of the bandwidth
39
40 Notes
41 -----
42 Returns 1.059 * A * n ** (-1/5.) where ::
43
44 A = min(std(x, ddof=1), IQR/1.349)
45 IQR = np.subtract.reduce(np.percentile(x, [75,25]))
46
47 References
48 ----------
49
50 Scott, D.W. (1992) Multivariate Density Estimation: Theory, Practice, and
51 Visualization.
52 """
53 A = _select_sigma(x)
54 n = len(x)
55 return 1.059 * A * n ** (-0.2)
56
57 def bw_silverman(x, kernel=None):
58 """
59 Silverman's Rule of Thumb
60
61 Parameters
62 ----------
63 x : array_like
64 Array for which to get the bandwidth
65 kernel : CustomKernel object
66 Unused
67
68 Returns
69 -------
70 bw : float
71 The estimate of the bandwidth
72
73 Notes
74 -----
75 Returns .9 * A * n ** (-1/5.) where ::
76
77 A = min(std(x, ddof=1), IQR/1.349)
78 IQR = np.subtract.reduce(np.percentile(x, [75,25]))
79
80 References
81 ----------
82
83 Silverman, B.W. (1986) `Density Estimation.`
84 """
85 A = _select_sigma(x)
86 n = len(x)
87 return .9 * A * n ** (-0.2)
88
89
90 def bw_normal_reference(x, kernel=kernels.Gaussian):
91 """
92 Plug-in bandwidth with kernel specific constant based on normal reference.
93
94 This bandwidth minimizes the mean integrated square error if the true
95 distribution is the normal. This choice is an appropriate bandwidth for
96 single peaked distributions that are similar to the normal distribution.
97
98 Parameters
99 ----------
100 x : array_like
101 Array for which to get the bandwidth
102 kernel : CustomKernel object
103 Used to calculate the constant for the plug-in bandwidth.
104
105 Returns
106 -------
107 bw : float
108 The estimate of the bandwidth
109
110 Notes
111 -----
112 Returns C * A * n ** (-1/5.) where ::
113
114 A = min(std(x, ddof=1), IQR/1.349)
115 IQR = np.subtract.reduce(np.percentile(x, [75,25]))
116 C = constant from Hansen (2009)
117
118 When using a Gaussian kernel this is equivalent to the 'scott' bandwidth up
119 to two decimal places. This is the accuracy to which the 'scott' constant is
120 specified.
121
122 References
123 ----------
124
125 Silverman, B.W. (1986) `Density Estimation.`
126 Hansen, B.E. (2009) `Lecture Notes on Nonparametrics.`
127 """
128 C = kernel.normal_reference_constant
129 A = _select_sigma(x)
130 n = len(x)
131 return C * A * n ** (-0.2)
132
133 ## Plug-In Methods ##
134
135 ## Least Squares Cross-Validation ##
136
137 ## Helper Functions ##
138
139 bandwidth_funcs = {
140 "scott": bw_scott,
141 "silverman": bw_silverman,
142 "normal_reference": bw_normal_reference,
143 }
144
145
146 @Substitution(", ".join(sorted(bandwidth_funcs.keys())))
147 def select_bandwidth(x, bw, kernel):
148 """
149 Selects bandwidth for a selection rule bw
150
151 this is a wrapper around existing bandwidth selection rules
152
153 Parameters
154 ----------
155 x : array_like
156 Array for which to get the bandwidth
157 bw : str
158 name of bandwidth selection rule, currently supported are:
159 %s
160 kernel : not used yet
161
162 Returns
163 -------
164 bw : float
165 The estimate of the bandwidth
166
167 """
168 bw = bw.lower()
169 if bw not in bandwidth_funcs:
170 raise ValueError("Bandwidth %s not understood" % bw)
171 #TODO: uncomment checks when we have non-rule of thumb bandwidths for diff. kernels
172 # if kernel == "gauss":
173 return bandwidth_funcs[bw](x, kernel)
174 # else:
175 # raise ValueError("Only Gaussian Kernels are currently supported")
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/statsmodels/nonparametric/bandwidths.py b/statsmodels/nonparametric/bandwidths.py
--- a/statsmodels/nonparametric/bandwidths.py
+++ b/statsmodels/nonparametric/bandwidths.py
@@ -168,8 +168,10 @@
bw = bw.lower()
if bw not in bandwidth_funcs:
raise ValueError("Bandwidth %s not understood" % bw)
-#TODO: uncomment checks when we have non-rule of thumb bandwidths for diff. kernels
-# if kernel == "gauss":
- return bandwidth_funcs[bw](x, kernel)
-# else:
-# raise ValueError("Only Gaussian Kernels are currently supported")
+ bandwidth = bandwidth_funcs[bw](x, kernel)
+ if bandwidth == 0:
+ # eventually this can fall back on another selection criterion.
+ err = "Selected KDE bandwidth is 0. Cannot estiamte density."
+ raise RuntimeError(err)
+ else:
+ return bandwidth
|
{"golden_diff": "diff --git a/statsmodels/nonparametric/bandwidths.py b/statsmodels/nonparametric/bandwidths.py\n--- a/statsmodels/nonparametric/bandwidths.py\n+++ b/statsmodels/nonparametric/bandwidths.py\n@@ -168,8 +168,10 @@\n bw = bw.lower()\n if bw not in bandwidth_funcs:\n raise ValueError(\"Bandwidth %s not understood\" % bw)\n-#TODO: uncomment checks when we have non-rule of thumb bandwidths for diff. kernels\n-# if kernel == \"gauss\":\n- return bandwidth_funcs[bw](x, kernel)\n-# else:\n-# raise ValueError(\"Only Gaussian Kernels are currently supported\")\n+ bandwidth = bandwidth_funcs[bw](x, kernel)\n+ if bandwidth == 0:\n+ # eventually this can fall back on another selection criterion.\n+ err = \"Selected KDE bandwidth is 0. Cannot estiamte density.\"\n+ raise RuntimeError(err)\n+ else:\n+ return bandwidth\n", "issue": "nonparametric.KDEUnivariate returns zero bandwidth\nHi,\r\nDoes anyone have an idea why the following dataset makes KDEUnivariate return zero bandwidth?\r\n\r\n```\r\ndata = np.array([1,1,1,1,2])\r\nkde_sm = sm.nonparametric.KDEUnivariate(data.astype('double'))\r\nkde_sm.fit(bw=\"silverman\")\r\nsup = kde_sm.support\r\nden = kde_sm.density\r\nprint(kde_sm.bw) # prints 0\r\n```\r\nwhen `data = np.array([1,1,1,2])` the returned bandwidth is correct. When I add more 1s, the bandwidth stays zero.\r\n\r\nThank you!\n", "before_files": [{"content": "import numpy as np\nfrom scipy.stats import scoreatpercentile as sap\n\nfrom statsmodels.compat.pandas import Substitution\nfrom statsmodels.sandbox.nonparametric import kernels\n\ndef _select_sigma(X):\n \"\"\"\n Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.\n\n References\n ----------\n Silverman (1986) p.47\n \"\"\"\n# normalize = norm.ppf(.75) - norm.ppf(.25)\n normalize = 1.349\n# IQR = np.subtract.reduce(percentile(X, [75,25],\n# axis=axis), axis=axis)/normalize\n IQR = (sap(X, 75) - sap(X, 25))/normalize\n return np.minimum(np.std(X, axis=0, ddof=1), IQR)\n\n\n## Univariate Rule of Thumb Bandwidths ##\ndef bw_scott(x, kernel=None):\n \"\"\"\n Scott's Rule of Thumb\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n kernel : CustomKernel object\n Unused\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns 1.059 * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ----------\n\n Scott, D.W. (1992) Multivariate Density Estimation: Theory, Practice, and\n Visualization.\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return 1.059 * A * n ** (-0.2)\n\ndef bw_silverman(x, kernel=None):\n \"\"\"\n Silverman's Rule of Thumb\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n kernel : CustomKernel object\n Unused\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns .9 * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ----------\n\n Silverman, B.W. (1986) `Density Estimation.`\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return .9 * A * n ** (-0.2)\n\n\ndef bw_normal_reference(x, kernel=kernels.Gaussian):\n \"\"\"\n Plug-in bandwidth with kernel specific constant based on normal reference.\n\n This bandwidth minimizes the mean integrated square error if the true\n distribution is the normal. This choice is an appropriate bandwidth for\n single peaked distributions that are similar to the normal distribution.\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n kernel : CustomKernel object\n Used to calculate the constant for the plug-in bandwidth.\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns C * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n C = constant from Hansen (2009)\n\n When using a Gaussian kernel this is equivalent to the 'scott' bandwidth up\n to two decimal places. This is the accuracy to which the 'scott' constant is\n specified.\n\n References\n ----------\n\n Silverman, B.W. (1986) `Density Estimation.`\n Hansen, B.E. (2009) `Lecture Notes on Nonparametrics.`\n \"\"\"\n C = kernel.normal_reference_constant\n A = _select_sigma(x)\n n = len(x)\n return C * A * n ** (-0.2)\n\n## Plug-In Methods ##\n\n## Least Squares Cross-Validation ##\n\n## Helper Functions ##\n\nbandwidth_funcs = {\n \"scott\": bw_scott,\n \"silverman\": bw_silverman,\n \"normal_reference\": bw_normal_reference,\n}\n\n\n@Substitution(\", \".join(sorted(bandwidth_funcs.keys())))\ndef select_bandwidth(x, bw, kernel):\n \"\"\"\n Selects bandwidth for a selection rule bw\n\n this is a wrapper around existing bandwidth selection rules\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n bw : str\n name of bandwidth selection rule, currently supported are:\n %s\n kernel : not used yet\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n \"\"\"\n bw = bw.lower()\n if bw not in bandwidth_funcs:\n raise ValueError(\"Bandwidth %s not understood\" % bw)\n#TODO: uncomment checks when we have non-rule of thumb bandwidths for diff. kernels\n# if kernel == \"gauss\":\n return bandwidth_funcs[bw](x, kernel)\n# else:\n# raise ValueError(\"Only Gaussian Kernels are currently supported\")\n", "path": "statsmodels/nonparametric/bandwidths.py"}], "after_files": [{"content": "import numpy as np\nfrom scipy.stats import scoreatpercentile as sap\n\nfrom statsmodels.compat.pandas import Substitution\nfrom statsmodels.sandbox.nonparametric import kernels\n\ndef _select_sigma(X):\n \"\"\"\n Returns the smaller of std(X, ddof=1) or normalized IQR(X) over axis 0.\n\n References\n ----------\n Silverman (1986) p.47\n \"\"\"\n# normalize = norm.ppf(.75) - norm.ppf(.25)\n normalize = 1.349\n# IQR = np.subtract.reduce(percentile(X, [75,25],\n# axis=axis), axis=axis)/normalize\n IQR = (sap(X, 75) - sap(X, 25))/normalize\n return np.minimum(np.std(X, axis=0, ddof=1), IQR)\n\n\n## Univariate Rule of Thumb Bandwidths ##\ndef bw_scott(x, kernel=None):\n \"\"\"\n Scott's Rule of Thumb\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n kernel : CustomKernel object\n Unused\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns 1.059 * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ----------\n\n Scott, D.W. (1992) Multivariate Density Estimation: Theory, Practice, and\n Visualization.\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return 1.059 * A * n ** (-0.2)\n\ndef bw_silverman(x, kernel=None):\n \"\"\"\n Silverman's Rule of Thumb\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n kernel : CustomKernel object\n Unused\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns .9 * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n\n References\n ----------\n\n Silverman, B.W. (1986) `Density Estimation.`\n \"\"\"\n A = _select_sigma(x)\n n = len(x)\n return .9 * A * n ** (-0.2)\n\n\ndef bw_normal_reference(x, kernel=kernels.Gaussian):\n \"\"\"\n Plug-in bandwidth with kernel specific constant based on normal reference.\n\n This bandwidth minimizes the mean integrated square error if the true\n distribution is the normal. This choice is an appropriate bandwidth for\n single peaked distributions that are similar to the normal distribution.\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n kernel : CustomKernel object\n Used to calculate the constant for the plug-in bandwidth.\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n Notes\n -----\n Returns C * A * n ** (-1/5.) where ::\n\n A = min(std(x, ddof=1), IQR/1.349)\n IQR = np.subtract.reduce(np.percentile(x, [75,25]))\n C = constant from Hansen (2009)\n\n When using a Gaussian kernel this is equivalent to the 'scott' bandwidth up\n to two decimal places. This is the accuracy to which the 'scott' constant is\n specified.\n\n References\n ----------\n\n Silverman, B.W. (1986) `Density Estimation.`\n Hansen, B.E. (2009) `Lecture Notes on Nonparametrics.`\n \"\"\"\n C = kernel.normal_reference_constant\n A = _select_sigma(x)\n n = len(x)\n return C * A * n ** (-0.2)\n\n## Plug-In Methods ##\n\n## Least Squares Cross-Validation ##\n\n## Helper Functions ##\n\nbandwidth_funcs = {\n \"scott\": bw_scott,\n \"silverman\": bw_silverman,\n \"normal_reference\": bw_normal_reference,\n}\n\n\n@Substitution(\", \".join(sorted(bandwidth_funcs.keys())))\ndef select_bandwidth(x, bw, kernel):\n \"\"\"\n Selects bandwidth for a selection rule bw\n\n this is a wrapper around existing bandwidth selection rules\n\n Parameters\n ----------\n x : array_like\n Array for which to get the bandwidth\n bw : str\n name of bandwidth selection rule, currently supported are:\n %s\n kernel : not used yet\n\n Returns\n -------\n bw : float\n The estimate of the bandwidth\n\n \"\"\"\n bw = bw.lower()\n if bw not in bandwidth_funcs:\n raise ValueError(\"Bandwidth %s not understood\" % bw)\n bandwidth = bandwidth_funcs[bw](x, kernel)\n if bandwidth == 0:\n # eventually this can fall back on another selection criterion.\n err = \"Selected KDE bandwidth is 0. Cannot estiamte density.\"\n raise RuntimeError(err)\n else:\n return bandwidth\n", "path": "statsmodels/nonparametric/bandwidths.py"}]}
| 2,009 | 223 |
gh_patches_debug_2714
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-1397
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logger not work.
**Describe the bug**
Logger did not work at current master commit (https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18).
**Code snippet**
```python
from sanic import Sanic
from sanic.log import logger
from sanic.response import text
app = Sanic()
@app.listener('before_server_start')
async def setup(app, loop):
logger.info('INFO')
@app.get('/')
async def test(request):
return text('hello world')
if __name__ == '__main__':
app.run()
```
There is no any log/output now.
**Expected behavior**
At `0.8.3` release, it will logging/output some messages like:
```
[2018-11-05 17:34:47 +0800] [12112] [INFO] Goin' Fast @ http://127.0.0.1:8000
[2018-11-05 17:34:47 +0800] [12112] [INFO] INFO
[2018-11-05 17:34:47 +0800] [12112] [INFO] Starting worker [12112]
```
**Environment (please complete the following information):**
- OS: Ubuntu 18.04
- Version: https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18
**Additional context**
It seems that `getLogger()` does not get the correct logger at [line 56](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L56) in `log.py`. The logger is trying to get a logger named `sanic.root`, but it does not exist. Rename the logger `root` at [line 9](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L9) should fix this bug.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/log.py`
Content:
```
1 import logging
2 import sys
3
4
5 LOGGING_CONFIG_DEFAULTS = dict(
6 version=1,
7 disable_existing_loggers=False,
8 loggers={
9 "root": {"level": "INFO", "handlers": ["console"]},
10 "sanic.error": {
11 "level": "INFO",
12 "handlers": ["error_console"],
13 "propagate": True,
14 "qualname": "sanic.error",
15 },
16 "sanic.access": {
17 "level": "INFO",
18 "handlers": ["access_console"],
19 "propagate": True,
20 "qualname": "sanic.access",
21 },
22 },
23 handlers={
24 "console": {
25 "class": "logging.StreamHandler",
26 "formatter": "generic",
27 "stream": sys.stdout,
28 },
29 "error_console": {
30 "class": "logging.StreamHandler",
31 "formatter": "generic",
32 "stream": sys.stderr,
33 },
34 "access_console": {
35 "class": "logging.StreamHandler",
36 "formatter": "access",
37 "stream": sys.stdout,
38 },
39 },
40 formatters={
41 "generic": {
42 "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
43 "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
44 "class": "logging.Formatter",
45 },
46 "access": {
47 "format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
48 + "%(request)s %(message)s %(status)d %(byte)d",
49 "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
50 "class": "logging.Formatter",
51 },
52 },
53 )
54
55
56 logger = logging.getLogger("sanic.root")
57 error_logger = logging.getLogger("sanic.error")
58 access_logger = logging.getLogger("sanic.access")
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sanic/log.py b/sanic/log.py
--- a/sanic/log.py
+++ b/sanic/log.py
@@ -6,7 +6,7 @@
version=1,
disable_existing_loggers=False,
loggers={
- "root": {"level": "INFO", "handlers": ["console"]},
+ "sanic.root": {"level": "INFO", "handlers": ["console"]},
"sanic.error": {
"level": "INFO",
"handlers": ["error_console"],
|
{"golden_diff": "diff --git a/sanic/log.py b/sanic/log.py\n--- a/sanic/log.py\n+++ b/sanic/log.py\n@@ -6,7 +6,7 @@\n version=1,\n disable_existing_loggers=False,\n loggers={\n- \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n+ \"sanic.root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n", "issue": "Logger not work.\n**Describe the bug**\r\nLogger did not work at current master commit (https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18).\r\n\r\n\r\n**Code snippet**\r\n```python\r\nfrom sanic import Sanic\r\nfrom sanic.log import logger\r\nfrom sanic.response import text\r\n\r\n\r\napp = Sanic()\r\n\r\[email protected]('before_server_start')\r\nasync def setup(app, loop):\r\n logger.info('INFO')\r\n\r\n\r\[email protected]('/')\r\nasync def test(request):\r\n return text('hello world')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n```\r\nThere is no any log/output now.\r\n\r\n\r\n\r\n**Expected behavior**\r\nAt `0.8.3` release, it will logging/output some messages like:\r\n```\r\n[2018-11-05 17:34:47 +0800] [12112] [INFO] Goin' Fast @ http://127.0.0.1:8000\r\n[2018-11-05 17:34:47 +0800] [12112] [INFO] INFO\r\n[2018-11-05 17:34:47 +0800] [12112] [INFO] Starting worker [12112]\r\n```\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Ubuntu 18.04\r\n - Version: https://github.com/huge-success/sanic/commit/7d79a86d4dc48de11cd34e8ba12e41f3a9f9ff18\r\n\r\n\r\n**Additional context**\r\nIt seems that `getLogger()` does not get the correct logger at [line 56](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L56) in `log.py`. The logger is trying to get a logger named `sanic.root`, but it does not exist. Rename the logger `root` at [line 9](https://github.com/huge-success/sanic/blob/master/sanic/log.py#L9) should fix this bug.\r\n\n", "before_files": [{"content": "import logging\nimport sys\n\n\nLOGGING_CONFIG_DEFAULTS = dict(\n version=1,\n disable_existing_loggers=False,\n loggers={\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.error\",\n },\n \"sanic.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"access_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.access\",\n },\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stdout,\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stderr,\n },\n \"access_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"access\",\n \"stream\": sys.stdout,\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n + \"%(request)s %(message)s %(status)d %(byte)d\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n },\n)\n\n\nlogger = logging.getLogger(\"sanic.root\")\nerror_logger = logging.getLogger(\"sanic.error\")\naccess_logger = logging.getLogger(\"sanic.access\")\n", "path": "sanic/log.py"}], "after_files": [{"content": "import logging\nimport sys\n\n\nLOGGING_CONFIG_DEFAULTS = dict(\n version=1,\n disable_existing_loggers=False,\n loggers={\n \"sanic.root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.error\",\n },\n \"sanic.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"access_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.access\",\n },\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stdout,\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stderr,\n },\n \"access_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"access\",\n \"stream\": sys.stdout,\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n + \"%(request)s %(message)s %(status)d %(byte)d\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n },\n)\n\n\nlogger = logging.getLogger(\"sanic.root\")\nerror_logger = logging.getLogger(\"sanic.error\")\naccess_logger = logging.getLogger(\"sanic.access\")\n", "path": "sanic/log.py"}]}
| 1,273 | 114 |
gh_patches_debug_1097
|
rasdani/github-patches
|
git_diff
|
openfun__richie-290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Person plugin form list every pages, not only Person pages
## Bug Report
**Expected behavior/code**
Select box in PersonPlugin form should list only extended page with Person model.
**Actual Behavior**
Currently the select box is listing every CMS pages.
**Steps to Reproduce**
1. Edit a page;
2. Try to add a Person plugin into a placeholder which allow it;
3. Click to open the select box from opened form for added Person plugin.
**Environment**
- Richie version: 0.1.0 (from my own branch synchronized from master 200c8a3)
- Platform: Ubuntu 18.04 LTS
**Possible Solution**
Adding a filter inside plugin form machinery to retain only the extend page with Person.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/richie/apps/persons/models.py`
Content:
```
1 """
2 Declare and configure the model for the person application
3 """
4 from django.db import models
5 from django.utils.translation import ugettext_lazy as _
6
7 from cms.api import Page
8 from cms.extensions import PageExtension
9 from cms.models.pluginmodel import CMSPlugin
10 from parler.models import TranslatableModel, TranslatedFields
11
12
13 class PersonTitle(TranslatableModel):
14 """
15 PersonTitle define i18ned list of people titles and there abbreviations
16 Instances of this models should only be created by CMS administrators
17 """
18
19 translations = TranslatedFields(
20 title=models.CharField(_("Title"), max_length=200),
21 abbreviation=models.CharField(_("Title abbreviation"), max_length=10),
22 )
23
24 class Meta:
25 verbose_name = _("person title")
26
27 def __str__(self):
28 """Human representation of a person title"""
29 return "{model}: {title} ({abbreviation})".format(
30 model=self._meta.verbose_name.title(),
31 title=self.title,
32 abbreviation=self.abbreviation,
33 )
34
35
36 class Person(PageExtension):
37 """
38 The person page extension represents and records people information.
39 It could be a course or news article author.
40
41 This model should be used to record structured data about the person whereas the
42 associated page object is where we record the less structured information to display on the
43 page to present the person.
44 """
45
46 first_name = models.CharField(max_length=200, verbose_name=_("First name"))
47 last_name = models.CharField(max_length=200, verbose_name=_("Last name"))
48
49 person_title = models.ForeignKey("PersonTitle", related_name="persons")
50
51 ROOT_REVERSE_ID = "persons"
52 TEMPLATE_DETAIL = "persons/cms/person_detail.html"
53
54 class Meta:
55 verbose_name = _("person")
56
57 def __str__(self):
58 """Human representation of a person"""
59 return "{model}: {title} ({full_name})".format(
60 model=self._meta.verbose_name.title(),
61 title=self.extended_object.get_title(),
62 full_name=self.get_full_name(),
63 )
64
65 def save(self, *args, **kwargs):
66 """
67 Enforce validation on each instance save
68 """
69 self.full_clean()
70 super().save(*args, **kwargs)
71
72 def get_full_name(self):
73 """
74 Return person's full name
75 """
76 return "{person_title} {first_name} {last_name}".format(
77 person_title=self.person_title.title,
78 first_name=self.first_name,
79 last_name=self.last_name,
80 )
81
82
83 class PersonPluginModel(CMSPlugin):
84 """
85 Person plugin model handles the relation from PersonPlugin
86 to their Person instance
87 """
88
89 page = models.ForeignKey(Page)
90
91 class Meta:
92 verbose_name = _("person plugin model")
93
94 def __str__(self):
95 """Human representation of a person plugin"""
96 return "{model:s}: {id:d}".format(
97 model=self._meta.verbose_name.title(), id=self.id
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/richie/apps/persons/models.py b/src/richie/apps/persons/models.py
--- a/src/richie/apps/persons/models.py
+++ b/src/richie/apps/persons/models.py
@@ -86,7 +86,7 @@
to their Person instance
"""
- page = models.ForeignKey(Page)
+ page = models.ForeignKey(Page, limit_choices_to={"person__isnull": False})
class Meta:
verbose_name = _("person plugin model")
|
{"golden_diff": "diff --git a/src/richie/apps/persons/models.py b/src/richie/apps/persons/models.py\n--- a/src/richie/apps/persons/models.py\n+++ b/src/richie/apps/persons/models.py\n@@ -86,7 +86,7 @@\n to their Person instance\n \"\"\"\n \n- page = models.ForeignKey(Page)\n+ page = models.ForeignKey(Page, limit_choices_to={\"person__isnull\": False})\n \n class Meta:\n verbose_name = _(\"person plugin model\")\n", "issue": "Person plugin form list every pages, not only Person pages\n## Bug Report\r\n\r\n**Expected behavior/code**\r\nSelect box in PersonPlugin form should list only extended page with Person model.\r\n\r\n**Actual Behavior**\r\nCurrently the select box is listing every CMS pages.\r\n\r\n**Steps to Reproduce**\r\n1. Edit a page;\r\n2. Try to add a Person plugin into a placeholder which allow it;\r\n3. Click to open the select box from opened form for added Person plugin.\r\n\r\n**Environment**\r\n- Richie version: 0.1.0 (from my own branch synchronized from master 200c8a3) \r\n- Platform: Ubuntu 18.04 LTS\r\n\r\n**Possible Solution**\r\nAdding a filter inside plugin form machinery to retain only the extend page with Person.\r\n\n", "before_files": [{"content": "\"\"\"\nDeclare and configure the model for the person application\n\"\"\"\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.api import Page\nfrom cms.extensions import PageExtension\nfrom cms.models.pluginmodel import CMSPlugin\nfrom parler.models import TranslatableModel, TranslatedFields\n\n\nclass PersonTitle(TranslatableModel):\n \"\"\"\n PersonTitle define i18ned list of people titles and there abbreviations\n Instances of this models should only be created by CMS administrators\n \"\"\"\n\n translations = TranslatedFields(\n title=models.CharField(_(\"Title\"), max_length=200),\n abbreviation=models.CharField(_(\"Title abbreviation\"), max_length=10),\n )\n\n class Meta:\n verbose_name = _(\"person title\")\n\n def __str__(self):\n \"\"\"Human representation of a person title\"\"\"\n return \"{model}: {title} ({abbreviation})\".format(\n model=self._meta.verbose_name.title(),\n title=self.title,\n abbreviation=self.abbreviation,\n )\n\n\nclass Person(PageExtension):\n \"\"\"\n The person page extension represents and records people information.\n It could be a course or news article author.\n\n This model should be used to record structured data about the person whereas the\n associated page object is where we record the less structured information to display on the\n page to present the person.\n \"\"\"\n\n first_name = models.CharField(max_length=200, verbose_name=_(\"First name\"))\n last_name = models.CharField(max_length=200, verbose_name=_(\"Last name\"))\n\n person_title = models.ForeignKey(\"PersonTitle\", related_name=\"persons\")\n\n ROOT_REVERSE_ID = \"persons\"\n TEMPLATE_DETAIL = \"persons/cms/person_detail.html\"\n\n class Meta:\n verbose_name = _(\"person\")\n\n def __str__(self):\n \"\"\"Human representation of a person\"\"\"\n return \"{model}: {title} ({full_name})\".format(\n model=self._meta.verbose_name.title(),\n title=self.extended_object.get_title(),\n full_name=self.get_full_name(),\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Enforce validation on each instance save\n \"\"\"\n self.full_clean()\n super().save(*args, **kwargs)\n\n def get_full_name(self):\n \"\"\"\n Return person's full name\n \"\"\"\n return \"{person_title} {first_name} {last_name}\".format(\n person_title=self.person_title.title,\n first_name=self.first_name,\n last_name=self.last_name,\n )\n\n\nclass PersonPluginModel(CMSPlugin):\n \"\"\"\n Person plugin model handles the relation from PersonPlugin\n to their Person instance\n \"\"\"\n\n page = models.ForeignKey(Page)\n\n class Meta:\n verbose_name = _(\"person plugin model\")\n\n def __str__(self):\n \"\"\"Human representation of a person plugin\"\"\"\n return \"{model:s}: {id:d}\".format(\n model=self._meta.verbose_name.title(), id=self.id\n )\n", "path": "src/richie/apps/persons/models.py"}], "after_files": [{"content": "\"\"\"\nDeclare and configure the model for the person application\n\"\"\"\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cms.api import Page\nfrom cms.extensions import PageExtension\nfrom cms.models.pluginmodel import CMSPlugin\nfrom parler.models import TranslatableModel, TranslatedFields\n\n\nclass PersonTitle(TranslatableModel):\n \"\"\"\n PersonTitle define i18ned list of people titles and there abbreviations\n Instances of this models should only be created by CMS administrators\n \"\"\"\n\n translations = TranslatedFields(\n title=models.CharField(_(\"Title\"), max_length=200),\n abbreviation=models.CharField(_(\"Title abbreviation\"), max_length=10),\n )\n\n class Meta:\n verbose_name = _(\"person title\")\n\n def __str__(self):\n \"\"\"Human representation of a person title\"\"\"\n return \"{model}: {title} ({abbreviation})\".format(\n model=self._meta.verbose_name.title(),\n title=self.title,\n abbreviation=self.abbreviation,\n )\n\n\nclass Person(PageExtension):\n \"\"\"\n The person page extension represents and records people information.\n It could be a course or news article author.\n\n This model should be used to record structured data about the person whereas the\n associated page object is where we record the less structured information to display on the\n page to present the person.\n \"\"\"\n\n first_name = models.CharField(max_length=200, verbose_name=_(\"First name\"))\n last_name = models.CharField(max_length=200, verbose_name=_(\"Last name\"))\n\n person_title = models.ForeignKey(\"PersonTitle\", related_name=\"persons\")\n\n ROOT_REVERSE_ID = \"persons\"\n TEMPLATE_DETAIL = \"persons/cms/person_detail.html\"\n\n class Meta:\n verbose_name = _(\"person\")\n\n def __str__(self):\n \"\"\"Human representation of a person\"\"\"\n return \"{model}: {title} ({full_name})\".format(\n model=self._meta.verbose_name.title(),\n title=self.extended_object.get_title(),\n full_name=self.get_full_name(),\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Enforce validation on each instance save\n \"\"\"\n self.full_clean()\n super().save(*args, **kwargs)\n\n def get_full_name(self):\n \"\"\"\n Return person's full name\n \"\"\"\n return \"{person_title} {first_name} {last_name}\".format(\n person_title=self.person_title.title,\n first_name=self.first_name,\n last_name=self.last_name,\n )\n\n\nclass PersonPluginModel(CMSPlugin):\n \"\"\"\n Person plugin model handles the relation from PersonPlugin\n to their Person instance\n \"\"\"\n\n page = models.ForeignKey(Page, limit_choices_to={\"person__isnull\": False})\n\n class Meta:\n verbose_name = _(\"person plugin model\")\n\n def __str__(self):\n \"\"\"Human representation of a person plugin\"\"\"\n return \"{model:s}: {id:d}\".format(\n model=self._meta.verbose_name.title(), id=self.id\n )\n", "path": "src/richie/apps/persons/models.py"}]}
| 1,238 | 110 |
gh_patches_debug_9198
|
rasdani/github-patches
|
git_diff
|
sotetsuk__pgx-792
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Docs] Add PyPI description
Currently, it's empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3 setup(
4 name="pgx",
5 version="0.4.0",
6 long_description_content_type="text/markdown",
7 description="",
8 url="",
9 author="Sotetsu KOYAMADA",
10 author_email="[email protected]",
11 keywords="",
12 packages=find_packages(),
13 package_data={"": ["LICENSE", "*.svg"]},
14 include_package_data=True,
15 install_requires=[
16 "jax>=0.3.25", # JAX version on Colab (TPU)
17 "chex>=0.1.6",
18 "svgwrite",
19 "msgpack",
20 "typing_extensions"
21 ],
22 classifiers=[
23 "Programming Language :: Python :: 3.8",
24 "Programming Language :: Python :: 3.9",
25 "Programming Language :: Python :: 3.10",
26 ],
27 )
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,10 +1,14 @@
from setuptools import find_packages, setup
+from pathlib import Path
+
+long_description = (Path(__file__).parent / "README.md").read_text()
setup(
name="pgx",
version="0.4.0",
- long_description_content_type="text/markdown",
- description="",
+ description="GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)",
+ long_description=long_description,
+ long_description_content_type='text/markdown',
url="",
author="Sotetsu KOYAMADA",
author_email="[email protected]",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,10 +1,14 @@\n from setuptools import find_packages, setup\n+from pathlib import Path\n+\n+long_description = (Path(__file__).parent / \"README.md\").read_text()\n \n setup(\n name=\"pgx\",\n version=\"0.4.0\",\n- long_description_content_type=\"text/markdown\",\n- description=\"\",\n+ description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n url=\"\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n", "issue": "[Docs] Add PyPI description\nCurrently, it's empty.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nsetup(\n name=\"pgx\",\n version=\"0.4.0\",\n long_description_content_type=\"text/markdown\",\n description=\"\",\n url=\"\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\"\": [\"LICENSE\", \"*.svg\"]},\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"chex>=0.1.6\",\n \"svgwrite\",\n \"msgpack\",\n \"typing_extensions\"\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\nfrom pathlib import Path\n\nlong_description = (Path(__file__).parent / \"README.md\").read_text()\n\nsetup(\n name=\"pgx\",\n version=\"0.4.0\",\n description=\"GPU/TPU-accelerated parallel game simulators for reinforcement learning (RL)\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"\",\n author=\"Sotetsu KOYAMADA\",\n author_email=\"[email protected]\",\n keywords=\"\",\n packages=find_packages(),\n package_data={\"\": [\"LICENSE\", \"*.svg\"]},\n include_package_data=True,\n install_requires=[\n \"jax>=0.3.25\", # JAX version on Colab (TPU)\n \"chex>=0.1.6\",\n \"svgwrite\",\n \"msgpack\",\n \"typing_extensions\"\n ],\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}]}
| 513 | 171 |
gh_patches_debug_27075
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-478
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: Reshape() with -1 fails
When calling the chainer function Reshape() with a -1 value (e.g. Reshape((-1, 10, 10))) I get an error:
```
InvalidType: Expect: prod(in_types[0].shape) == prod((-1, 10, 10))
Actual: 2800 != -100
```
The cupy function reshape does support -1 arguments, so the error seems to be just the check in chainer's Reshape. The problematic check is:
_type_check_prod(in_types[0].shape) == _type_check_prod(self.shape)
If a -1 argument is present, it should instead check if the size of in_types[0] is divisible by the product of the axis-lengths that are not -1.
I would send a PR but I'm not exactly clear on the correct usage of type_check.expect, and had some issues with it in the past. Should be simple enough to fix though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/reshape.py`
Content:
```
1 import numpy
2
3 from chainer import function
4 from chainer.utils import type_check
5
6
7 _type_check_prod = type_check.Variable(numpy.prod, 'prod')
8
9
10 class Reshape(function.Function):
11
12 """Reshapes an input array without copy."""
13
14 def __init__(self, shape):
15 self.shape = shape
16
17 def check_type_forward(self, in_types):
18 type_check.expect(
19 in_types.size() == 1,
20 _type_check_prod(in_types[0].shape) ==
21 _type_check_prod(self.shape)
22 )
23
24 def forward(self, x):
25 return x[0].reshape(self.shape),
26
27 def backward(self, x, gy):
28 return gy[0].reshape(x[0].shape),
29
30
31 def reshape(x, shape):
32 """Reshapes an input variable without copy.
33
34 Args:
35 x (~chainer.Variable): Input variable.
36 shape (tuple of ints): Target shape.
37
38 Returns:
39 ~chainer.Variable: Variable that holds a reshaped version of the input
40 variable.
41
42 """
43 return Reshape(shape)(x)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/functions/array/reshape.py b/chainer/functions/array/reshape.py
--- a/chainer/functions/array/reshape.py
+++ b/chainer/functions/array/reshape.py
@@ -1,10 +1,12 @@
-import numpy
-
from chainer import function
from chainer.utils import type_check
-_type_check_prod = type_check.Variable(numpy.prod, 'prod')
+def _count_unknown_dims(shape):
+ cnt = 0
+ for dim in shape:
+ cnt += dim < 0
+ return cnt
class Reshape(function.Function):
@@ -12,15 +14,32 @@
"""Reshapes an input array without copy."""
def __init__(self, shape):
+ cnt = _count_unknown_dims(shape)
+ assert cnt == 0 or cnt == 1
+
self.shape = shape
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
- _type_check_prod(in_types[0].shape) ==
- _type_check_prod(self.shape)
)
+ x_type, = in_types
+
+ cnt = _count_unknown_dims(self.shape)
+ if cnt == 0:
+ type_check.expect(
+ type_check.prod(x_type.shape) == type_check.prod(self.shape))
+ else:
+ known_size = 1
+ for s in self.shape:
+ if s > 0:
+ known_size *= s
+ size_var = type_check.Variable(known_size,
+ 'known_size(=%d)' % known_size)
+ type_check.expect(
+ type_check.prod(x_type.shape) % size_var == 0)
+
def forward(self, x):
return x[0].reshape(self.shape),
|
{"golden_diff": "diff --git a/chainer/functions/array/reshape.py b/chainer/functions/array/reshape.py\n--- a/chainer/functions/array/reshape.py\n+++ b/chainer/functions/array/reshape.py\n@@ -1,10 +1,12 @@\n-import numpy\n-\n from chainer import function\n from chainer.utils import type_check\n \n \n-_type_check_prod = type_check.Variable(numpy.prod, 'prod')\n+def _count_unknown_dims(shape):\n+ cnt = 0\n+ for dim in shape:\n+ cnt += dim < 0\n+ return cnt\n \n \n class Reshape(function.Function):\n@@ -12,15 +14,32 @@\n \"\"\"Reshapes an input array without copy.\"\"\"\n \n def __init__(self, shape):\n+ cnt = _count_unknown_dims(shape)\n+ assert cnt == 0 or cnt == 1\n+\n self.shape = shape\n \n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n- _type_check_prod(in_types[0].shape) ==\n- _type_check_prod(self.shape)\n )\n \n+ x_type, = in_types\n+\n+ cnt = _count_unknown_dims(self.shape)\n+ if cnt == 0:\n+ type_check.expect(\n+ type_check.prod(x_type.shape) == type_check.prod(self.shape))\n+ else:\n+ known_size = 1\n+ for s in self.shape:\n+ if s > 0:\n+ known_size *= s\n+ size_var = type_check.Variable(known_size,\n+ 'known_size(=%d)' % known_size)\n+ type_check.expect(\n+ type_check.prod(x_type.shape) % size_var == 0)\n+\n def forward(self, x):\n return x[0].reshape(self.shape),\n", "issue": "Bug: Reshape() with -1 fails\nWhen calling the chainer function Reshape() with a -1 value (e.g. Reshape((-1, 10, 10))) I get an error: \n\n```\nInvalidType: Expect: prod(in_types[0].shape) == prod((-1, 10, 10))\nActual: 2800 != -100\n```\n\nThe cupy function reshape does support -1 arguments, so the error seems to be just the check in chainer's Reshape. The problematic check is:\n _type_check_prod(in_types[0].shape) == _type_check_prod(self.shape)\n\nIf a -1 argument is present, it should instead check if the size of in_types[0] is divisible by the product of the axis-lengths that are not -1. \n\nI would send a PR but I'm not exactly clear on the correct usage of type_check.expect, and had some issues with it in the past. Should be simple enough to fix though.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\n_type_check_prod = type_check.Variable(numpy.prod, 'prod')\n\n\nclass Reshape(function.Function):\n\n \"\"\"Reshapes an input array without copy.\"\"\"\n\n def __init__(self, shape):\n self.shape = shape\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n _type_check_prod(in_types[0].shape) ==\n _type_check_prod(self.shape)\n )\n\n def forward(self, x):\n return x[0].reshape(self.shape),\n\n def backward(self, x, gy):\n return gy[0].reshape(x[0].shape),\n\n\ndef reshape(x, shape):\n \"\"\"Reshapes an input variable without copy.\n\n Args:\n x (~chainer.Variable): Input variable.\n shape (tuple of ints): Target shape.\n\n Returns:\n ~chainer.Variable: Variable that holds a reshaped version of the input\n variable.\n\n \"\"\"\n return Reshape(shape)(x)\n", "path": "chainer/functions/array/reshape.py"}], "after_files": [{"content": "from chainer import function\nfrom chainer.utils import type_check\n\n\ndef _count_unknown_dims(shape):\n cnt = 0\n for dim in shape:\n cnt += dim < 0\n return cnt\n\n\nclass Reshape(function.Function):\n\n \"\"\"Reshapes an input array without copy.\"\"\"\n\n def __init__(self, shape):\n cnt = _count_unknown_dims(shape)\n assert cnt == 0 or cnt == 1\n\n self.shape = shape\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n )\n\n x_type, = in_types\n\n cnt = _count_unknown_dims(self.shape)\n if cnt == 0:\n type_check.expect(\n type_check.prod(x_type.shape) == type_check.prod(self.shape))\n else:\n known_size = 1\n for s in self.shape:\n if s > 0:\n known_size *= s\n size_var = type_check.Variable(known_size,\n 'known_size(=%d)' % known_size)\n type_check.expect(\n type_check.prod(x_type.shape) % size_var == 0)\n\n def forward(self, x):\n return x[0].reshape(self.shape),\n\n def backward(self, x, gy):\n return gy[0].reshape(x[0].shape),\n\n\ndef reshape(x, shape):\n \"\"\"Reshapes an input variable without copy.\n\n Args:\n x (~chainer.Variable): Input variable.\n shape (tuple of ints): Target shape.\n\n Returns:\n ~chainer.Variable: Variable that holds a reshaped version of the input\n variable.\n\n \"\"\"\n return Reshape(shape)(x)\n", "path": "chainer/functions/array/reshape.py"}]}
| 795 | 394 |
gh_patches_debug_13895
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-2587
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Web3 instantiation from subclass throws error
* Version: 5.30.0
* Python: 3.8
* OS: osx
* pip freeze output is intentionally omitted.
### What was wrong?
We use a sub-class of Web3, for the purpose of this example, call it SubclassWeb3, as follows:
```
class SubclassWeb3(Web3):
def __init__(self, ....):
... do bunch of stuff...
super().__init__(*args, **kwargs)
... do bunch more stuff...
```
This setup worked fine with web3 version == 5.23.1. However, switching to web3==5.30.0 breaks this, with error
```
[rest of callstack omitted]
File “[redacted]/web3/main.py”, line 251, in __init__
self.attach_modules(modules)
File “[redacted]/web3/main.py”, line 340, in attach_modules
_attach_modules(self, modules)
File “[redacted]/web3/_utils/module.py”, line 68, in attach_modules
setattr(parent_module, module_name, module_class(w3))
File “[redacted]/web3/module.py”, line 93, in __init__
self.codec: ABICodec = web3.codec
AttributeError: ‘NoneType’ object has no attribute ‘codec’
```
This used to work in older versions because the parent_module is assumed to be a Web3 object and stored accordingly - [see logic](https://github.com/ethereum/web3.py/blob/2b229d2e16792e52bb47108e665c6a8937c188a2/web3/_utils/module.py#L33-L35).
Compare this to the somewhat brittle name checking that happens in 5.30.0 - [see logic](https://github.com/ethereum/web3.py/blob/ee5f0543ad91e4ba991997e35f50719ceb58135d/web3/_utils/module.py#L58-L60). This doesn't quite work because if this logic is being executed from a subclass of Web3, the name of the parent module appears as SubclassWeb3 (and not Web3).
### How can it be fixed?
Fill this section in if you know how this could or should be fixed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `web3/_utils/module.py`
Content:
```
1 import inspect
2 from io import (
3 UnsupportedOperation,
4 )
5 from typing import (
6 TYPE_CHECKING,
7 Any,
8 Dict,
9 List,
10 Optional,
11 Sequence,
12 Union,
13 )
14
15 from web3.exceptions import (
16 ValidationError,
17 )
18 from web3.module import (
19 Module,
20 )
21
22 if TYPE_CHECKING:
23 from web3 import Web3 # noqa: F401
24
25
26 def _validate_init_params_and_return_if_found(module_class: Any) -> List[str]:
27 init_params_raw = list(inspect.signature(module_class.__init__).parameters)
28 module_init_params = [
29 param for param in init_params_raw if param not in ['self', 'args', 'kwargs']
30 ]
31
32 if len(module_init_params) > 1:
33 raise UnsupportedOperation(
34 "A module class may accept a single `Web3` instance as the first argument of its "
35 f"__init__() method. More than one argument found for {module_class.__name__}: "
36 f"{module_init_params}"
37 )
38
39 return module_init_params
40
41
42 def attach_modules(
43 parent_module: Union["Web3", "Module"],
44 module_definitions: Dict[str, Any],
45 w3: Optional[Union["Web3", "Module"]] = None
46 ) -> None:
47 for module_name, module_info in module_definitions.items():
48 module_info_is_list_like = isinstance(module_info, Sequence)
49
50 module_class = module_info[0] if module_info_is_list_like else module_info
51
52 if hasattr(parent_module, module_name):
53 raise AttributeError(
54 f"Cannot set {parent_module} module named '{module_name}'. The web3 object "
55 "already has an attribute with that name"
56 )
57
58 # The parent module is the ``Web3`` instance on first run of the loop
59 if type(parent_module).__name__ == 'Web3':
60 w3 = parent_module
61
62 module_init_params = _validate_init_params_and_return_if_found(module_class)
63 if len(module_init_params) == 1:
64 # Modules that need access to the ``Web3`` instance may accept the instance as the first
65 # arg in their ``__init__()`` method. This is the case for any module that inherits from
66 # ``web3.module.Module``.
67 # e.g. def __init__(self, w3):
68 setattr(parent_module, module_name, module_class(w3))
69 else:
70 # Modules need not take in a ``Web3`` instance in their ``__init__()`` if not needed
71 setattr(parent_module, module_name, module_class())
72
73 if module_info_is_list_like:
74 if len(module_info) == 2:
75 submodule_definitions = module_info[1]
76 module = getattr(parent_module, module_name)
77 attach_modules(module, submodule_definitions, w3)
78 elif len(module_info) != 1:
79 raise ValidationError("Module definitions can only have 1 or 2 elements.")
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/web3/_utils/module.py b/web3/_utils/module.py
--- a/web3/_utils/module.py
+++ b/web3/_utils/module.py
@@ -55,9 +55,13 @@
"already has an attribute with that name"
)
- # The parent module is the ``Web3`` instance on first run of the loop
- if type(parent_module).__name__ == 'Web3':
- w3 = parent_module
+ # The parent module is the ``Web3`` instance on first run of the loop and w3 is
+ # None. Thus, set w3 to the parent_module. The import needs to happen locally
+ # due to circular import issues.
+ if w3 is None:
+ from web3 import Web3
+ if isinstance(parent_module, Web3):
+ w3 = parent_module
module_init_params = _validate_init_params_and_return_if_found(module_class)
if len(module_init_params) == 1:
|
{"golden_diff": "diff --git a/web3/_utils/module.py b/web3/_utils/module.py\n--- a/web3/_utils/module.py\n+++ b/web3/_utils/module.py\n@@ -55,9 +55,13 @@\n \"already has an attribute with that name\"\n )\n \n- # The parent module is the ``Web3`` instance on first run of the loop\n- if type(parent_module).__name__ == 'Web3':\n- w3 = parent_module\n+ # The parent module is the ``Web3`` instance on first run of the loop and w3 is\n+ # None. Thus, set w3 to the parent_module. The import needs to happen locally\n+ # due to circular import issues.\n+ if w3 is None:\n+ from web3 import Web3\n+ if isinstance(parent_module, Web3):\n+ w3 = parent_module\n \n module_init_params = _validate_init_params_and_return_if_found(module_class)\n if len(module_init_params) == 1:\n", "issue": "Web3 instantiation from subclass throws error\n* Version: 5.30.0\r\n* Python: 3.8\r\n* OS: osx\r\n* pip freeze output is intentionally omitted.\r\n\r\n### What was wrong?\r\nWe use a sub-class of Web3, for the purpose of this example, call it SubclassWeb3, as follows:\r\n\r\n```\r\nclass SubclassWeb3(Web3):\r\ndef __init__(self, ....):\r\n ... do bunch of stuff...\r\n super().__init__(*args, **kwargs)\r\n ... do bunch more stuff...\r\n```\r\n\r\nThis setup worked fine with web3 version == 5.23.1. However, switching to web3==5.30.0 breaks this, with error\r\n\r\n```\r\n[rest of callstack omitted]\r\n File \u201c[redacted]/web3/main.py\u201d, line 251, in __init__\r\n self.attach_modules(modules)\r\n File \u201c[redacted]/web3/main.py\u201d, line 340, in attach_modules\r\n _attach_modules(self, modules)\r\n File \u201c[redacted]/web3/_utils/module.py\u201d, line 68, in attach_modules\r\n setattr(parent_module, module_name, module_class(w3))\r\n File \u201c[redacted]/web3/module.py\u201d, line 93, in __init__\r\n self.codec: ABICodec = web3.codec\r\nAttributeError: \u2018NoneType\u2019 object has no attribute \u2018codec\u2019\r\n```\r\n\r\nThis used to work in older versions because the parent_module is assumed to be a Web3 object and stored accordingly - [see logic](https://github.com/ethereum/web3.py/blob/2b229d2e16792e52bb47108e665c6a8937c188a2/web3/_utils/module.py#L33-L35).\r\n\r\nCompare this to the somewhat brittle name checking that happens in 5.30.0 - [see logic](https://github.com/ethereum/web3.py/blob/ee5f0543ad91e4ba991997e35f50719ceb58135d/web3/_utils/module.py#L58-L60). This doesn't quite work because if this logic is being executed from a subclass of Web3, the name of the parent module appears as SubclassWeb3 (and not Web3).\r\n\r\n### How can it be fixed?\r\n\r\nFill this section in if you know how this could or should be fixed.\n", "before_files": [{"content": "import inspect\nfrom io import (\n UnsupportedOperation,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n Union,\n)\n\nfrom web3.exceptions import (\n ValidationError,\n)\nfrom web3.module import (\n Module,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\n\ndef _validate_init_params_and_return_if_found(module_class: Any) -> List[str]:\n init_params_raw = list(inspect.signature(module_class.__init__).parameters)\n module_init_params = [\n param for param in init_params_raw if param not in ['self', 'args', 'kwargs']\n ]\n\n if len(module_init_params) > 1:\n raise UnsupportedOperation(\n \"A module class may accept a single `Web3` instance as the first argument of its \"\n f\"__init__() method. More than one argument found for {module_class.__name__}: \"\n f\"{module_init_params}\"\n )\n\n return module_init_params\n\n\ndef attach_modules(\n parent_module: Union[\"Web3\", \"Module\"],\n module_definitions: Dict[str, Any],\n w3: Optional[Union[\"Web3\", \"Module\"]] = None\n) -> None:\n for module_name, module_info in module_definitions.items():\n module_info_is_list_like = isinstance(module_info, Sequence)\n\n module_class = module_info[0] if module_info_is_list_like else module_info\n\n if hasattr(parent_module, module_name):\n raise AttributeError(\n f\"Cannot set {parent_module} module named '{module_name}'. The web3 object \"\n \"already has an attribute with that name\"\n )\n\n # The parent module is the ``Web3`` instance on first run of the loop\n if type(parent_module).__name__ == 'Web3':\n w3 = parent_module\n\n module_init_params = _validate_init_params_and_return_if_found(module_class)\n if len(module_init_params) == 1:\n # Modules that need access to the ``Web3`` instance may accept the instance as the first\n # arg in their ``__init__()`` method. This is the case for any module that inherits from\n # ``web3.module.Module``.\n # e.g. def __init__(self, w3):\n setattr(parent_module, module_name, module_class(w3))\n else:\n # Modules need not take in a ``Web3`` instance in their ``__init__()`` if not needed\n setattr(parent_module, module_name, module_class())\n\n if module_info_is_list_like:\n if len(module_info) == 2:\n submodule_definitions = module_info[1]\n module = getattr(parent_module, module_name)\n attach_modules(module, submodule_definitions, w3)\n elif len(module_info) != 1:\n raise ValidationError(\"Module definitions can only have 1 or 2 elements.\")\n", "path": "web3/_utils/module.py"}], "after_files": [{"content": "import inspect\nfrom io import (\n UnsupportedOperation,\n)\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n List,\n Optional,\n Sequence,\n Union,\n)\n\nfrom web3.exceptions import (\n ValidationError,\n)\nfrom web3.module import (\n Module,\n)\n\nif TYPE_CHECKING:\n from web3 import Web3 # noqa: F401\n\n\ndef _validate_init_params_and_return_if_found(module_class: Any) -> List[str]:\n init_params_raw = list(inspect.signature(module_class.__init__).parameters)\n module_init_params = [\n param for param in init_params_raw if param not in ['self', 'args', 'kwargs']\n ]\n\n if len(module_init_params) > 1:\n raise UnsupportedOperation(\n \"A module class may accept a single `Web3` instance as the first argument of its \"\n f\"__init__() method. More than one argument found for {module_class.__name__}: \"\n f\"{module_init_params}\"\n )\n\n return module_init_params\n\n\ndef attach_modules(\n parent_module: Union[\"Web3\", \"Module\"],\n module_definitions: Dict[str, Any],\n w3: Optional[Union[\"Web3\", \"Module\"]] = None\n) -> None:\n for module_name, module_info in module_definitions.items():\n module_info_is_list_like = isinstance(module_info, Sequence)\n\n module_class = module_info[0] if module_info_is_list_like else module_info\n\n if hasattr(parent_module, module_name):\n raise AttributeError(\n f\"Cannot set {parent_module} module named '{module_name}'. The web3 object \"\n \"already has an attribute with that name\"\n )\n\n # The parent module is the ``Web3`` instance on first run of the loop and w3 is\n # None. Thus, set w3 to the parent_module. The import needs to happen locally\n # due to circular import issues.\n if w3 is None:\n from web3 import Web3\n if isinstance(parent_module, Web3):\n w3 = parent_module\n\n module_init_params = _validate_init_params_and_return_if_found(module_class)\n if len(module_init_params) == 1:\n # Modules that need access to the ``Web3`` instance may accept the instance as the first\n # arg in their ``__init__()`` method. This is the case for any module that inherits from\n # ``web3.module.Module``.\n # e.g. def __init__(self, w3):\n setattr(parent_module, module_name, module_class(w3))\n else:\n # Modules need not take in a ``Web3`` instance in their ``__init__()`` if not needed\n setattr(parent_module, module_name, module_class())\n\n if module_info_is_list_like:\n if len(module_info) == 2:\n submodule_definitions = module_info[1]\n module = getattr(parent_module, module_name)\n attach_modules(module, submodule_definitions, w3)\n elif len(module_info) != 1:\n raise ValidationError(\"Module definitions can only have 1 or 2 elements.\")\n", "path": "web3/_utils/module.py"}]}
| 1,574 | 221 |
gh_patches_debug_1968
|
rasdani/github-patches
|
git_diff
|
hylang__hy-358
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow macros to return None
```
(defmacro foo [])
(foo)
```
Will break as macros are not handling the NoneType yet
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/macros.py`
Content:
```
1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 from hy.models.expression import HyExpression
22 from hy.models.string import HyString
23 from hy.models.symbol import HySymbol
24 from hy.models.list import HyList
25 from hy.models.integer import HyInteger
26 from hy.models.float import HyFloat
27 from hy.models.complex import HyComplex
28 from hy.models.dict import HyDict
29 from hy._compat import str_type
30
31 from collections import defaultdict
32
33
34 CORE_MACROS = [
35 "hy.core.bootstrap",
36 ]
37
38 EXTRA_MACROS = [
39 "hy.core.macros",
40 ]
41
42 _hy_macros = defaultdict(dict)
43
44
45 def macro(name):
46 """Decorator to define a macro called `name`.
47
48 This stores the macro `name` in the namespace for the module where it is
49 defined.
50
51 If the module where it is defined is in `hy.core`, then the macro is stored
52 in the default `None` namespace.
53
54 This function is called from the `defmacro` special form in the compiler.
55
56 """
57 def _(fn):
58 module_name = fn.__module__
59 if module_name.startswith("hy.core"):
60 module_name = None
61 _hy_macros[module_name][name] = fn
62 return fn
63 return _
64
65
66 def require(source_module, target_module):
67 """Load the macros from `source_module` in the namespace of
68 `target_module`.
69
70 This function is called from the `require` special form in the compiler.
71
72 """
73 macros = _hy_macros[source_module]
74 refs = _hy_macros[target_module]
75 for name, macro in macros.items():
76 refs[name] = macro
77
78
79 # type -> wrapping function mapping for _wrap_value
80 _wrappers = {
81 int: HyInteger,
82 bool: lambda x: HySymbol("True") if x else HySymbol("False"),
83 float: HyFloat,
84 complex: HyComplex,
85 str_type: HyString,
86 dict: lambda d: HyDict(_wrap_value(x) for x in sum(d.items(), ())),
87 list: lambda l: HyList(_wrap_value(x) for x in l)
88 }
89
90
91 def _wrap_value(x):
92 """Wrap `x` into the corresponding Hy type.
93
94 This allows a macro to return an unquoted expression transparently.
95
96 """
97 wrapper = _wrappers.get(type(x))
98 if wrapper is None:
99 return x
100 else:
101 return wrapper(x)
102
103
104 def load_macros(module_name):
105 """Load the hy builtin macros for module `module_name`.
106
107 Modules from `hy.core` can only use the macros from CORE_MACROS.
108 Other modules get the macros from CORE_MACROS and EXTRA_MACROS.
109
110 """
111
112 def _import(module, module_name=module_name):
113 "__import__ a module, avoiding recursions"
114 if module != module_name:
115 __import__(module)
116
117 for module in CORE_MACROS:
118 _import(module)
119
120 if module_name.startswith("hy.core"):
121 return
122
123 for module in EXTRA_MACROS:
124 _import(module)
125
126
127 def macroexpand(tree, module_name):
128 """Expand the toplevel macros for the `tree`.
129
130 Load the macros from the given `module_name`, then expand the (top-level)
131 macros in `tree` until it stops changing.
132
133 """
134 load_macros(module_name)
135 old = None
136 while old != tree:
137 old = tree
138 tree = macroexpand_1(tree, module_name)
139 return tree
140
141
142 def macroexpand_1(tree, module_name):
143 """Expand the toplevel macro from `tree` once, in the context of
144 `module_name`."""
145 if isinstance(tree, HyExpression):
146 if tree == []:
147 return tree
148
149 fn = tree[0]
150 if fn in ("quote", "quasiquote"):
151 return tree
152 ntree = HyExpression(tree[:])
153 ntree.replace(tree)
154
155 if isinstance(fn, HyString):
156 m = _hy_macros[module_name].get(fn)
157 if m is None:
158 m = _hy_macros[None].get(fn)
159 if m is not None:
160 obj = _wrap_value(m(*ntree[1:]))
161 obj.replace(tree)
162 return obj
163
164 return ntree
165 return tree
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/hy/macros.py b/hy/macros.py
--- a/hy/macros.py
+++ b/hy/macros.py
@@ -84,7 +84,8 @@
complex: HyComplex,
str_type: HyString,
dict: lambda d: HyDict(_wrap_value(x) for x in sum(d.items(), ())),
- list: lambda l: HyList(_wrap_value(x) for x in l)
+ list: lambda l: HyList(_wrap_value(x) for x in l),
+ type(None): lambda foo: HySymbol("None"),
}
|
{"golden_diff": "diff --git a/hy/macros.py b/hy/macros.py\n--- a/hy/macros.py\n+++ b/hy/macros.py\n@@ -84,7 +84,8 @@\n complex: HyComplex,\n str_type: HyString,\n dict: lambda d: HyDict(_wrap_value(x) for x in sum(d.items(), ())),\n- list: lambda l: HyList(_wrap_value(x) for x in l)\n+ list: lambda l: HyList(_wrap_value(x) for x in l),\n+ type(None): lambda foo: HySymbol(\"None\"),\n }\n", "issue": "Allow macros to return None\n```\n (defmacro foo [])\n (foo) \n```\n\nWill break as macros are not handling the NoneType yet\n\n", "before_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\nfrom hy.models.integer import HyInteger\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.dict import HyDict\nfrom hy._compat import str_type\n\nfrom collections import defaultdict\n\n\nCORE_MACROS = [\n \"hy.core.bootstrap\",\n]\n\nEXTRA_MACROS = [\n \"hy.core.macros\",\n]\n\n_hy_macros = defaultdict(dict)\n\n\ndef macro(name):\n \"\"\"Decorator to define a macro called `name`.\n\n This stores the macro `name` in the namespace for the module where it is\n defined.\n\n If the module where it is defined is in `hy.core`, then the macro is stored\n in the default `None` namespace.\n\n This function is called from the `defmacro` special form in the compiler.\n\n \"\"\"\n def _(fn):\n module_name = fn.__module__\n if module_name.startswith(\"hy.core\"):\n module_name = None\n _hy_macros[module_name][name] = fn\n return fn\n return _\n\n\ndef require(source_module, target_module):\n \"\"\"Load the macros from `source_module` in the namespace of\n `target_module`.\n\n This function is called from the `require` special form in the compiler.\n\n \"\"\"\n macros = _hy_macros[source_module]\n refs = _hy_macros[target_module]\n for name, macro in macros.items():\n refs[name] = macro\n\n\n# type -> wrapping function mapping for _wrap_value\n_wrappers = {\n int: HyInteger,\n bool: lambda x: HySymbol(\"True\") if x else HySymbol(\"False\"),\n float: HyFloat,\n complex: HyComplex,\n str_type: HyString,\n dict: lambda d: HyDict(_wrap_value(x) for x in sum(d.items(), ())),\n list: lambda l: HyList(_wrap_value(x) for x in l)\n}\n\n\ndef _wrap_value(x):\n \"\"\"Wrap `x` into the corresponding Hy type.\n\n This allows a macro to return an unquoted expression transparently.\n\n \"\"\"\n wrapper = _wrappers.get(type(x))\n if wrapper is None:\n return x\n else:\n return wrapper(x)\n\n\ndef load_macros(module_name):\n \"\"\"Load the hy builtin macros for module `module_name`.\n\n Modules from `hy.core` can only use the macros from CORE_MACROS.\n Other modules get the macros from CORE_MACROS and EXTRA_MACROS.\n\n \"\"\"\n\n def _import(module, module_name=module_name):\n \"__import__ a module, avoiding recursions\"\n if module != module_name:\n __import__(module)\n\n for module in CORE_MACROS:\n _import(module)\n\n if module_name.startswith(\"hy.core\"):\n return\n\n for module in EXTRA_MACROS:\n _import(module)\n\n\ndef macroexpand(tree, module_name):\n \"\"\"Expand the toplevel macros for the `tree`.\n\n Load the macros from the given `module_name`, then expand the (top-level)\n macros in `tree` until it stops changing.\n\n \"\"\"\n load_macros(module_name)\n old = None\n while old != tree:\n old = tree\n tree = macroexpand_1(tree, module_name)\n return tree\n\n\ndef macroexpand_1(tree, module_name):\n \"\"\"Expand the toplevel macro from `tree` once, in the context of\n `module_name`.\"\"\"\n if isinstance(tree, HyExpression):\n if tree == []:\n return tree\n\n fn = tree[0]\n if fn in (\"quote\", \"quasiquote\"):\n return tree\n ntree = HyExpression(tree[:])\n ntree.replace(tree)\n\n if isinstance(fn, HyString):\n m = _hy_macros[module_name].get(fn)\n if m is None:\n m = _hy_macros[None].get(fn)\n if m is not None:\n obj = _wrap_value(m(*ntree[1:]))\n obj.replace(tree)\n return obj\n\n return ntree\n return tree\n", "path": "hy/macros.py"}], "after_files": [{"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.models.expression import HyExpression\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\nfrom hy.models.list import HyList\nfrom hy.models.integer import HyInteger\nfrom hy.models.float import HyFloat\nfrom hy.models.complex import HyComplex\nfrom hy.models.dict import HyDict\nfrom hy._compat import str_type\n\nfrom collections import defaultdict\n\n\nCORE_MACROS = [\n \"hy.core.bootstrap\",\n]\n\nEXTRA_MACROS = [\n \"hy.core.macros\",\n]\n\n_hy_macros = defaultdict(dict)\n\n\ndef macro(name):\n \"\"\"Decorator to define a macro called `name`.\n\n This stores the macro `name` in the namespace for the module where it is\n defined.\n\n If the module where it is defined is in `hy.core`, then the macro is stored\n in the default `None` namespace.\n\n This function is called from the `defmacro` special form in the compiler.\n\n \"\"\"\n def _(fn):\n module_name = fn.__module__\n if module_name.startswith(\"hy.core\"):\n module_name = None\n _hy_macros[module_name][name] = fn\n return fn\n return _\n\n\ndef require(source_module, target_module):\n \"\"\"Load the macros from `source_module` in the namespace of\n `target_module`.\n\n This function is called from the `require` special form in the compiler.\n\n \"\"\"\n macros = _hy_macros[source_module]\n refs = _hy_macros[target_module]\n for name, macro in macros.items():\n refs[name] = macro\n\n\n# type -> wrapping function mapping for _wrap_value\n_wrappers = {\n int: HyInteger,\n bool: lambda x: HySymbol(\"True\") if x else HySymbol(\"False\"),\n float: HyFloat,\n complex: HyComplex,\n str_type: HyString,\n dict: lambda d: HyDict(_wrap_value(x) for x in sum(d.items(), ())),\n list: lambda l: HyList(_wrap_value(x) for x in l),\n type(None): lambda foo: HySymbol(\"None\"),\n}\n\n\ndef _wrap_value(x):\n \"\"\"Wrap `x` into the corresponding Hy type.\n\n This allows a macro to return an unquoted expression transparently.\n\n \"\"\"\n wrapper = _wrappers.get(type(x))\n if wrapper is None:\n return x\n else:\n return wrapper(x)\n\n\ndef load_macros(module_name):\n \"\"\"Load the hy builtin macros for module `module_name`.\n\n Modules from `hy.core` can only use the macros from CORE_MACROS.\n Other modules get the macros from CORE_MACROS and EXTRA_MACROS.\n\n \"\"\"\n\n def _import(module, module_name=module_name):\n \"__import__ a module, avoiding recursions\"\n if module != module_name:\n __import__(module)\n\n for module in CORE_MACROS:\n _import(module)\n\n if module_name.startswith(\"hy.core\"):\n return\n\n for module in EXTRA_MACROS:\n _import(module)\n\n\ndef macroexpand(tree, module_name):\n \"\"\"Expand the toplevel macros for the `tree`.\n\n Load the macros from the given `module_name`, then expand the (top-level)\n macros in `tree` until it stops changing.\n\n \"\"\"\n load_macros(module_name)\n old = None\n while old != tree:\n old = tree\n tree = macroexpand_1(tree, module_name)\n return tree\n\n\ndef macroexpand_1(tree, module_name):\n \"\"\"Expand the toplevel macro from `tree` once, in the context of\n `module_name`.\"\"\"\n if isinstance(tree, HyExpression):\n if tree == []:\n return tree\n\n fn = tree[0]\n if fn in (\"quote\", \"quasiquote\"):\n return tree\n ntree = HyExpression(tree[:])\n ntree.replace(tree)\n\n if isinstance(fn, HyString):\n m = _hy_macros[module_name].get(fn)\n if m is None:\n m = _hy_macros[None].get(fn)\n if m is not None:\n obj = _wrap_value(m(*ntree[1:]))\n obj.replace(tree)\n return obj\n\n return ntree\n return tree\n", "path": "hy/macros.py"}]}
| 1,829 | 131 |
gh_patches_debug_13579
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-2234
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong behavior of SerialIterator if dataset is numpy array.
In `serial_iterator.py` lines [66](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L66) and [68](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L68) contain `+=` to extend lists.
Unfortunately, this does not crash for `numpy` arrays but produces undesired behavior. The dataset itself gets modified (see for `6` in the second example):
```
import numpy as np
from chainer.iterators.serial_iterator import SerialIterator
dataset = [1, 2, 3, 4, 5]
it = SerialIterator(dataset, batch_size=2, shuffle=False)
for _ in range(6):
example = it.next()
print(example)
```
```
[1, 2]
[3, 4]
[5, 1]
[2, 3]
[4, 5]
[1, 2]
```
```
dataset = np.asarray([1, 2, 3, 4, 5])
it = SerialIterator(dataset, batch_size=2, shuffle=False)
for _ in range(6):
example = it.next()
print(example)
```
```
[1 2]
[3 4]
[6]
[2 3]
[4 6]
[1 2]
```
When the two lines get changed to
```
batch.extend(list(self.dataset[:rest]))
```
and
```
batch.extend([self.dataset[index] for index in self._order[:rest]])
```
a useful error is raised.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/iterators/serial_iterator.py`
Content:
```
1 from __future__ import division
2
3 import numpy
4
5 from chainer.dataset import iterator
6
7
8 class SerialIterator(iterator.Iterator):
9
10 """Dataset iterator that serially reads the examples.
11
12 This is a simple implementation of :class:`~chainer.dataset.Iterator`
13 that just visits each example in either the order of indexes or a shuffled
14 order.
15
16 To avoid unintentional performance degradation, the ``shuffle`` option is
17 set to ``True`` by default. For validation, it is better to set it to
18 ``False`` when the underlying dataset supports fast slicing. If the
19 order of examples has an important meaning and the updater depends on the
20 original order, this option should be set to ``False``.
21
22 Args:
23 dataset: Dataset to iterate.
24 batch_size (int): Number of examples within each batch.
25 repeat (bool): If ``True``, it infinitely loops over the dataset.
26 Otherwise, it stops iteration at the end of the first epoch.
27 shuffle (bool): If ``True``, the order of examples is shuffled at the
28 beginning of each epoch. Otherwise, examples are extracted in the
29 order of indexes.
30
31 """
32
33 def __init__(self, dataset, batch_size, repeat=True, shuffle=True):
34 self.dataset = dataset
35 self.batch_size = batch_size
36 self._repeat = repeat
37 if shuffle:
38 self._order = numpy.random.permutation(len(dataset))
39 else:
40 self._order = None
41
42 self.current_position = 0
43 self.epoch = 0
44 self.is_new_epoch = False
45
46 def __next__(self):
47 if not self._repeat and self.epoch > 0:
48 raise StopIteration
49
50 i = self.current_position
51 i_end = i + self.batch_size
52 N = len(self.dataset)
53
54 if self._order is None:
55 batch = self.dataset[i:i_end]
56 else:
57 batch = [self.dataset[index] for index in self._order[i:i_end]]
58
59 if i_end >= N:
60 if self._repeat:
61 rest = i_end - N
62 if self._order is not None:
63 numpy.random.shuffle(self._order)
64 if rest > 0:
65 if self._order is None:
66 batch += list(self.dataset[:rest])
67 else:
68 batch += [self.dataset[index]
69 for index in self._order[:rest]]
70 self.current_position = rest
71 else:
72 self.current_position = N
73
74 self.epoch += 1
75 self.is_new_epoch = True
76 else:
77 self.is_new_epoch = False
78 self.current_position = i_end
79
80 return batch
81
82 next = __next__
83
84 @property
85 def epoch_detail(self):
86 return self.epoch + self.current_position / len(self.dataset)
87
88 def serialize(self, serializer):
89 self.current_position = serializer('current_position',
90 self.current_position)
91 self.epoch = serializer('epoch', self.epoch)
92 self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
93 if self._order is not None:
94 serializer('_order', self._order)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/chainer/iterators/serial_iterator.py b/chainer/iterators/serial_iterator.py
--- a/chainer/iterators/serial_iterator.py
+++ b/chainer/iterators/serial_iterator.py
@@ -63,10 +63,10 @@
numpy.random.shuffle(self._order)
if rest > 0:
if self._order is None:
- batch += list(self.dataset[:rest])
+ batch.extend(self.dataset[:rest])
else:
- batch += [self.dataset[index]
- for index in self._order[:rest]]
+ batch.extend([self.dataset[index]
+ for index in self._order[:rest]])
self.current_position = rest
else:
self.current_position = N
|
{"golden_diff": "diff --git a/chainer/iterators/serial_iterator.py b/chainer/iterators/serial_iterator.py\n--- a/chainer/iterators/serial_iterator.py\n+++ b/chainer/iterators/serial_iterator.py\n@@ -63,10 +63,10 @@\n numpy.random.shuffle(self._order)\n if rest > 0:\n if self._order is None:\n- batch += list(self.dataset[:rest])\n+ batch.extend(self.dataset[:rest])\n else:\n- batch += [self.dataset[index]\n- for index in self._order[:rest]]\n+ batch.extend([self.dataset[index]\n+ for index in self._order[:rest]])\n self.current_position = rest\n else:\n self.current_position = N\n", "issue": "Wrong behavior of SerialIterator if dataset is numpy array.\nIn `serial_iterator.py` lines [66](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L66) and [68](https://github.com/pfnet/chainer/blob/master/chainer/iterators/serial_iterator.py#L68) contain `+=` to extend lists.\r\n\r\nUnfortunately, this does not crash for `numpy` arrays but produces undesired behavior. The dataset itself gets modified (see for `6` in the second example):\r\n\r\n```\r\nimport numpy as np\r\nfrom chainer.iterators.serial_iterator import SerialIterator\r\n\r\ndataset = [1, 2, 3, 4, 5]\r\nit = SerialIterator(dataset, batch_size=2, shuffle=False)\r\n\r\nfor _ in range(6):\r\n example = it.next()\r\n print(example)\r\n```\r\n\r\n```\r\n[1, 2]\r\n[3, 4]\r\n[5, 1]\r\n[2, 3]\r\n[4, 5]\r\n[1, 2]\r\n```\r\n\r\n```\r\ndataset = np.asarray([1, 2, 3, 4, 5])\r\nit = SerialIterator(dataset, batch_size=2, shuffle=False)\r\n\r\nfor _ in range(6):\r\n example = it.next()\r\n print(example)\r\n```\r\n\r\n```\r\n[1 2]\r\n[3 4]\r\n[6]\r\n[2 3]\r\n[4 6]\r\n[1 2]\r\n```\r\n\r\nWhen the two lines get changed to\r\n```\r\nbatch.extend(list(self.dataset[:rest]))\r\n```\r\n\r\nand\r\n```\r\nbatch.extend([self.dataset[index] for index in self._order[:rest]])\r\n```\r\n\r\na useful error is raised.\n", "before_files": [{"content": "from __future__ import division\n\nimport numpy\n\nfrom chainer.dataset import iterator\n\n\nclass SerialIterator(iterator.Iterator):\n\n \"\"\"Dataset iterator that serially reads the examples.\n\n This is a simple implementation of :class:`~chainer.dataset.Iterator`\n that just visits each example in either the order of indexes or a shuffled\n order.\n\n To avoid unintentional performance degradation, the ``shuffle`` option is\n set to ``True`` by default. For validation, it is better to set it to\n ``False`` when the underlying dataset supports fast slicing. If the\n order of examples has an important meaning and the updater depends on the\n original order, this option should be set to ``False``.\n\n Args:\n dataset: Dataset to iterate.\n batch_size (int): Number of examples within each batch.\n repeat (bool): If ``True``, it infinitely loops over the dataset.\n Otherwise, it stops iteration at the end of the first epoch.\n shuffle (bool): If ``True``, the order of examples is shuffled at the\n beginning of each epoch. Otherwise, examples are extracted in the\n order of indexes.\n\n \"\"\"\n\n def __init__(self, dataset, batch_size, repeat=True, shuffle=True):\n self.dataset = dataset\n self.batch_size = batch_size\n self._repeat = repeat\n if shuffle:\n self._order = numpy.random.permutation(len(dataset))\n else:\n self._order = None\n\n self.current_position = 0\n self.epoch = 0\n self.is_new_epoch = False\n\n def __next__(self):\n if not self._repeat and self.epoch > 0:\n raise StopIteration\n\n i = self.current_position\n i_end = i + self.batch_size\n N = len(self.dataset)\n\n if self._order is None:\n batch = self.dataset[i:i_end]\n else:\n batch = [self.dataset[index] for index in self._order[i:i_end]]\n\n if i_end >= N:\n if self._repeat:\n rest = i_end - N\n if self._order is not None:\n numpy.random.shuffle(self._order)\n if rest > 0:\n if self._order is None:\n batch += list(self.dataset[:rest])\n else:\n batch += [self.dataset[index]\n for index in self._order[:rest]]\n self.current_position = rest\n else:\n self.current_position = N\n\n self.epoch += 1\n self.is_new_epoch = True\n else:\n self.is_new_epoch = False\n self.current_position = i_end\n\n return batch\n\n next = __next__\n\n @property\n def epoch_detail(self):\n return self.epoch + self.current_position / len(self.dataset)\n\n def serialize(self, serializer):\n self.current_position = serializer('current_position',\n self.current_position)\n self.epoch = serializer('epoch', self.epoch)\n self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)\n if self._order is not None:\n serializer('_order', self._order)\n", "path": "chainer/iterators/serial_iterator.py"}], "after_files": [{"content": "from __future__ import division\n\nimport numpy\n\nfrom chainer.dataset import iterator\n\n\nclass SerialIterator(iterator.Iterator):\n\n \"\"\"Dataset iterator that serially reads the examples.\n\n This is a simple implementation of :class:`~chainer.dataset.Iterator`\n that just visits each example in either the order of indexes or a shuffled\n order.\n\n To avoid unintentional performance degradation, the ``shuffle`` option is\n set to ``True`` by default. For validation, it is better to set it to\n ``False`` when the underlying dataset supports fast slicing. If the\n order of examples has an important meaning and the updater depends on the\n original order, this option should be set to ``False``.\n\n Args:\n dataset: Dataset to iterate.\n batch_size (int): Number of examples within each batch.\n repeat (bool): If ``True``, it infinitely loops over the dataset.\n Otherwise, it stops iteration at the end of the first epoch.\n shuffle (bool): If ``True``, the order of examples is shuffled at the\n beginning of each epoch. Otherwise, examples are extracted in the\n order of indexes.\n\n \"\"\"\n\n def __init__(self, dataset, batch_size, repeat=True, shuffle=True):\n self.dataset = dataset\n self.batch_size = batch_size\n self._repeat = repeat\n if shuffle:\n self._order = numpy.random.permutation(len(dataset))\n else:\n self._order = None\n\n self.current_position = 0\n self.epoch = 0\n self.is_new_epoch = False\n\n def __next__(self):\n if not self._repeat and self.epoch > 0:\n raise StopIteration\n\n i = self.current_position\n i_end = i + self.batch_size\n N = len(self.dataset)\n\n if self._order is None:\n batch = self.dataset[i:i_end]\n else:\n batch = [self.dataset[index] for index in self._order[i:i_end]]\n\n if i_end >= N:\n if self._repeat:\n rest = i_end - N\n if self._order is not None:\n numpy.random.shuffle(self._order)\n if rest > 0:\n if self._order is None:\n batch.extend(self.dataset[:rest])\n else:\n batch.extend([self.dataset[index]\n for index in self._order[:rest]])\n self.current_position = rest\n else:\n self.current_position = N\n\n self.epoch += 1\n self.is_new_epoch = True\n else:\n self.is_new_epoch = False\n self.current_position = i_end\n\n return batch\n\n next = __next__\n\n @property\n def epoch_detail(self):\n return self.epoch + self.current_position / len(self.dataset)\n\n def serialize(self, serializer):\n self.current_position = serializer('current_position',\n self.current_position)\n self.epoch = serializer('epoch', self.epoch)\n self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)\n if self._order is not None:\n serializer('_order', self._order)\n", "path": "chainer/iterators/serial_iterator.py"}]}
| 1,474 | 164 |
gh_patches_debug_39730
|
rasdani/github-patches
|
git_diff
|
carpentries__amy-351
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deal with breadcrumbs
As Greg mentioned in #296, he hadn't liked the way breadcrumbs repeat current page title (or header) in their last element, for example: page "Event 2015-05-25-something" will have breadcrumbs "Amy / All events / Event 2015-05-25-something".
I took a look at other big websites and how they do breadcrumbs and @gvwilson was right. They don't repeat current site at the end of breadcrumbs.
This means we'd only have breadcrumbs at most 3 links long: Amy / All \* / \* [ / action ], for example:
Was:
- Amy / All events / Event 2015-05-25-something / Edit
Will be:
- Amy / All events / Event 2015-05-25-something
But this does not bug me. In case of `All *` pages, we can just as well drop breadcrumbs (because they'd look like "Amy / ").
So I don't really know what to do:
1. Display breadcrumbs on the same pages as now, but hide the last item.
2. Display breadcrumbs on the pages that would have more than 1 breadcrumbs.
3. Drop breadcrumbs completely.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `workshops/templatetags/breadcrumbs.py`
Content:
```
1 import logging
2
3 from django import template
4 from django.core.urlresolvers import reverse
5 from django.utils.encoding import force_text
6 from django.utils.html import escape
7
8 register = template.Library()
9 _LOG = logging.getLogger(__name__)
10
11
12 @register.simple_tag
13 def breadcrumb(title, url):
14 '''
15 Create a simple anchor with provided text and already-resolved URL.
16 Example usage:
17 {% breadcrumb "Title of breadcrumb" resolved_url %}
18 '''
19 return create_crumb(title, url)
20
21
22 @register.simple_tag
23 def breadcrumb_url(title, url_name):
24 '''
25 Add non-active breadcrumb with specified title. Second argument should be
26 a string name of URL that needs to be resolved.
27 Example usage:
28 {% breadcrumb_url "Title of breadcrumb" url_name %}
29 '''
30 url = reverse(url_name)
31 return create_crumb(title, url)
32
33
34 @register.simple_tag
35 def breadcrumb_active(title):
36 '''
37 Add active breadcrumb, but not in an anchor.
38 Example usage:
39 {% breadcrumb_active "Title of breadcrumb" %}
40 '''
41 return create_crumb(str(title), url=None, active=True)
42
43
44 @register.simple_tag
45 def breadcrumb_index_all_objects(model):
46 '''
47 Add breadcrumb linking to the listing of all objects of specific type.
48 This tag accepts both models or model instances as an argument.
49 Example usage:
50 {% breadcrumb_index_all_objects model %}
51 {% breadcrumb_index_all_objects person %}
52 '''
53 plural = force_text(model._meta.verbose_name_plural)
54 title = 'All {}'.format(plural)
55 url_name = 'all_{}'.format(plural)
56 url = reverse(url_name)
57 return create_crumb(title, url)
58
59
60 @register.simple_tag
61 def breadcrumb_edit_object(obj):
62 '''
63 Add an active breadcrumb with the title "Edit MODEL_NAME".
64 This tag accepts model instance as an argument.
65 Example usage:
66 {% breadcrumb_edit_object person %}
67 '''
68 singular = force_text(obj._meta.verbose_name)
69 title = 'Edit {}'.format(singular)
70 return create_crumb(title, url=None, active=True)
71
72
73 @register.simple_tag
74 def breadcrumb_new_object(model):
75 '''
76 Add an active breadcrumb with the title "Add new MODEL_NAME".
77 This tag accepts model class as an argument.
78 Example usage:
79 {% breadcrumb_new_object person %}
80 '''
81 singular = force_text(model._meta.verbose_name)
82 title = 'Add new {}'.format(singular)
83 return create_crumb(title, url=None, active=True)
84
85
86 @register.simple_tag
87 def breadcrumb_object(obj):
88 '''
89 Add non-active breadcrumb with the title "Add new MODEL_NAME".
90 This tag accepts model instance as an argument.
91 Example usage:
92 {% breadcrumb_object person %}
93 '''
94 title = str(obj)
95 url = obj.get_absolute_url()
96 return create_crumb(title, url, active=False)
97
98
99 @register.simple_tag
100 def breadcrumb_main_page():
101 '''
102 Special case of ``breadcrumb_url``. In all templates there's always a link
103 to the main page so I wanted to save everyone thinking & writing by
104 introducing this helper tag.
105 Example usage:
106 {% breadcrumb_main_page %}
107 '''
108 title = 'Amy'
109 url = reverse('index')
110 return create_crumb(title, url)
111
112
113 def create_crumb(title, url=None, active=False):
114 '''
115 Helper function that creates breadcrumb.
116 '''
117 active_str = ''
118 if active:
119 active_str = ' class="active"'
120
121 title = escape(title)
122 inner_str = title
123 if url:
124 inner_str = '<a href="{0}">{1}</a>'.format(url, title)
125
126 crumb = '<li{0}>{1}</li>'.format(active_str, inner_str)
127
128 return crumb
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/workshops/templatetags/breadcrumbs.py b/workshops/templatetags/breadcrumbs.py
deleted file mode 100644
--- a/workshops/templatetags/breadcrumbs.py
+++ /dev/null
@@ -1,128 +0,0 @@
-import logging
-
-from django import template
-from django.core.urlresolvers import reverse
-from django.utils.encoding import force_text
-from django.utils.html import escape
-
-register = template.Library()
-_LOG = logging.getLogger(__name__)
-
-
[email protected]_tag
-def breadcrumb(title, url):
- '''
- Create a simple anchor with provided text and already-resolved URL.
- Example usage:
- {% breadcrumb "Title of breadcrumb" resolved_url %}
- '''
- return create_crumb(title, url)
-
-
[email protected]_tag
-def breadcrumb_url(title, url_name):
- '''
- Add non-active breadcrumb with specified title. Second argument should be
- a string name of URL that needs to be resolved.
- Example usage:
- {% breadcrumb_url "Title of breadcrumb" url_name %}
- '''
- url = reverse(url_name)
- return create_crumb(title, url)
-
-
[email protected]_tag
-def breadcrumb_active(title):
- '''
- Add active breadcrumb, but not in an anchor.
- Example usage:
- {% breadcrumb_active "Title of breadcrumb" %}
- '''
- return create_crumb(str(title), url=None, active=True)
-
-
[email protected]_tag
-def breadcrumb_index_all_objects(model):
- '''
- Add breadcrumb linking to the listing of all objects of specific type.
- This tag accepts both models or model instances as an argument.
- Example usage:
- {% breadcrumb_index_all_objects model %}
- {% breadcrumb_index_all_objects person %}
- '''
- plural = force_text(model._meta.verbose_name_plural)
- title = 'All {}'.format(plural)
- url_name = 'all_{}'.format(plural)
- url = reverse(url_name)
- return create_crumb(title, url)
-
-
[email protected]_tag
-def breadcrumb_edit_object(obj):
- '''
- Add an active breadcrumb with the title "Edit MODEL_NAME".
- This tag accepts model instance as an argument.
- Example usage:
- {% breadcrumb_edit_object person %}
- '''
- singular = force_text(obj._meta.verbose_name)
- title = 'Edit {}'.format(singular)
- return create_crumb(title, url=None, active=True)
-
-
[email protected]_tag
-def breadcrumb_new_object(model):
- '''
- Add an active breadcrumb with the title "Add new MODEL_NAME".
- This tag accepts model class as an argument.
- Example usage:
- {% breadcrumb_new_object person %}
- '''
- singular = force_text(model._meta.verbose_name)
- title = 'Add new {}'.format(singular)
- return create_crumb(title, url=None, active=True)
-
-
[email protected]_tag
-def breadcrumb_object(obj):
- '''
- Add non-active breadcrumb with the title "Add new MODEL_NAME".
- This tag accepts model instance as an argument.
- Example usage:
- {% breadcrumb_object person %}
- '''
- title = str(obj)
- url = obj.get_absolute_url()
- return create_crumb(title, url, active=False)
-
-
[email protected]_tag
-def breadcrumb_main_page():
- '''
- Special case of ``breadcrumb_url``. In all templates there's always a link
- to the main page so I wanted to save everyone thinking & writing by
- introducing this helper tag.
- Example usage:
- {% breadcrumb_main_page %}
- '''
- title = 'Amy'
- url = reverse('index')
- return create_crumb(title, url)
-
-
-def create_crumb(title, url=None, active=False):
- '''
- Helper function that creates breadcrumb.
- '''
- active_str = ''
- if active:
- active_str = ' class="active"'
-
- title = escape(title)
- inner_str = title
- if url:
- inner_str = '<a href="{0}">{1}</a>'.format(url, title)
-
- crumb = '<li{0}>{1}</li>'.format(active_str, inner_str)
-
- return crumb
|
{"golden_diff": "diff --git a/workshops/templatetags/breadcrumbs.py b/workshops/templatetags/breadcrumbs.py\ndeleted file mode 100644\n--- a/workshops/templatetags/breadcrumbs.py\n+++ /dev/null\n@@ -1,128 +0,0 @@\n-import logging\n-\n-from django import template\n-from django.core.urlresolvers import reverse\n-from django.utils.encoding import force_text\n-from django.utils.html import escape\n-\n-register = template.Library()\n-_LOG = logging.getLogger(__name__)\n-\n-\[email protected]_tag\n-def breadcrumb(title, url):\n- '''\n- Create a simple anchor with provided text and already-resolved URL.\n- Example usage:\n- {% breadcrumb \"Title of breadcrumb\" resolved_url %}\n- '''\n- return create_crumb(title, url)\n-\n-\[email protected]_tag\n-def breadcrumb_url(title, url_name):\n- '''\n- Add non-active breadcrumb with specified title. Second argument should be\n- a string name of URL that needs to be resolved.\n- Example usage:\n- {% breadcrumb_url \"Title of breadcrumb\" url_name %}\n- '''\n- url = reverse(url_name)\n- return create_crumb(title, url)\n-\n-\[email protected]_tag\n-def breadcrumb_active(title):\n- '''\n- Add active breadcrumb, but not in an anchor.\n- Example usage:\n- {% breadcrumb_active \"Title of breadcrumb\" %}\n- '''\n- return create_crumb(str(title), url=None, active=True)\n-\n-\[email protected]_tag\n-def breadcrumb_index_all_objects(model):\n- '''\n- Add breadcrumb linking to the listing of all objects of specific type.\n- This tag accepts both models or model instances as an argument.\n- Example usage:\n- {% breadcrumb_index_all_objects model %}\n- {% breadcrumb_index_all_objects person %}\n- '''\n- plural = force_text(model._meta.verbose_name_plural)\n- title = 'All {}'.format(plural)\n- url_name = 'all_{}'.format(plural)\n- url = reverse(url_name)\n- return create_crumb(title, url)\n-\n-\[email protected]_tag\n-def breadcrumb_edit_object(obj):\n- '''\n- Add an active breadcrumb with the title \"Edit MODEL_NAME\".\n- This tag accepts model instance as an argument.\n- Example usage:\n- {% breadcrumb_edit_object person %}\n- '''\n- singular = force_text(obj._meta.verbose_name)\n- title = 'Edit {}'.format(singular)\n- return create_crumb(title, url=None, active=True)\n-\n-\[email protected]_tag\n-def breadcrumb_new_object(model):\n- '''\n- Add an active breadcrumb with the title \"Add new MODEL_NAME\".\n- This tag accepts model class as an argument.\n- Example usage:\n- {% breadcrumb_new_object person %}\n- '''\n- singular = force_text(model._meta.verbose_name)\n- title = 'Add new {}'.format(singular)\n- return create_crumb(title, url=None, active=True)\n-\n-\[email protected]_tag\n-def breadcrumb_object(obj):\n- '''\n- Add non-active breadcrumb with the title \"Add new MODEL_NAME\".\n- This tag accepts model instance as an argument.\n- Example usage:\n- {% breadcrumb_object person %}\n- '''\n- title = str(obj)\n- url = obj.get_absolute_url()\n- return create_crumb(title, url, active=False)\n-\n-\[email protected]_tag\n-def breadcrumb_main_page():\n- '''\n- Special case of ``breadcrumb_url``. In all templates there's always a link\n- to the main page so I wanted to save everyone thinking & writing by\n- introducing this helper tag.\n- Example usage:\n- {% breadcrumb_main_page %}\n- '''\n- title = 'Amy'\n- url = reverse('index')\n- return create_crumb(title, url)\n-\n-\n-def create_crumb(title, url=None, active=False):\n- '''\n- Helper function that creates breadcrumb.\n- '''\n- active_str = ''\n- if active:\n- active_str = ' class=\"active\"'\n-\n- title = escape(title)\n- inner_str = title\n- if url:\n- inner_str = '<a href=\"{0}\">{1}</a>'.format(url, title)\n-\n- crumb = '<li{0}>{1}</li>'.format(active_str, inner_str)\n-\n- return crumb\n", "issue": "Deal with breadcrumbs\nAs Greg mentioned in #296, he hadn't liked the way breadcrumbs repeat current page title (or header) in their last element, for example: page \"Event 2015-05-25-something\" will have breadcrumbs \"Amy / All events / Event 2015-05-25-something\".\n\nI took a look at other big websites and how they do breadcrumbs and @gvwilson was right. They don't repeat current site at the end of breadcrumbs.\n\nThis means we'd only have breadcrumbs at most 3 links long: Amy / All \\* / \\* [ / action ], for example:\n\nWas:\n- Amy / All events / Event 2015-05-25-something / Edit\n\nWill be:\n- Amy / All events / Event 2015-05-25-something\n\nBut this does not bug me. In case of `All *` pages, we can just as well drop breadcrumbs (because they'd look like \"Amy / \").\n\nSo I don't really know what to do:\n1. Display breadcrumbs on the same pages as now, but hide the last item.\n2. Display breadcrumbs on the pages that would have more than 1 breadcrumbs.\n3. Drop breadcrumbs completely.\n\n", "before_files": [{"content": "import logging\n\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.utils.encoding import force_text\nfrom django.utils.html import escape\n\nregister = template.Library()\n_LOG = logging.getLogger(__name__)\n\n\[email protected]_tag\ndef breadcrumb(title, url):\n '''\n Create a simple anchor with provided text and already-resolved URL.\n Example usage:\n {% breadcrumb \"Title of breadcrumb\" resolved_url %}\n '''\n return create_crumb(title, url)\n\n\[email protected]_tag\ndef breadcrumb_url(title, url_name):\n '''\n Add non-active breadcrumb with specified title. Second argument should be\n a string name of URL that needs to be resolved.\n Example usage:\n {% breadcrumb_url \"Title of breadcrumb\" url_name %}\n '''\n url = reverse(url_name)\n return create_crumb(title, url)\n\n\[email protected]_tag\ndef breadcrumb_active(title):\n '''\n Add active breadcrumb, but not in an anchor.\n Example usage:\n {% breadcrumb_active \"Title of breadcrumb\" %}\n '''\n return create_crumb(str(title), url=None, active=True)\n\n\[email protected]_tag\ndef breadcrumb_index_all_objects(model):\n '''\n Add breadcrumb linking to the listing of all objects of specific type.\n This tag accepts both models or model instances as an argument.\n Example usage:\n {% breadcrumb_index_all_objects model %}\n {% breadcrumb_index_all_objects person %}\n '''\n plural = force_text(model._meta.verbose_name_plural)\n title = 'All {}'.format(plural)\n url_name = 'all_{}'.format(plural)\n url = reverse(url_name)\n return create_crumb(title, url)\n\n\[email protected]_tag\ndef breadcrumb_edit_object(obj):\n '''\n Add an active breadcrumb with the title \"Edit MODEL_NAME\".\n This tag accepts model instance as an argument.\n Example usage:\n {% breadcrumb_edit_object person %}\n '''\n singular = force_text(obj._meta.verbose_name)\n title = 'Edit {}'.format(singular)\n return create_crumb(title, url=None, active=True)\n\n\[email protected]_tag\ndef breadcrumb_new_object(model):\n '''\n Add an active breadcrumb with the title \"Add new MODEL_NAME\".\n This tag accepts model class as an argument.\n Example usage:\n {% breadcrumb_new_object person %}\n '''\n singular = force_text(model._meta.verbose_name)\n title = 'Add new {}'.format(singular)\n return create_crumb(title, url=None, active=True)\n\n\[email protected]_tag\ndef breadcrumb_object(obj):\n '''\n Add non-active breadcrumb with the title \"Add new MODEL_NAME\".\n This tag accepts model instance as an argument.\n Example usage:\n {% breadcrumb_object person %}\n '''\n title = str(obj)\n url = obj.get_absolute_url()\n return create_crumb(title, url, active=False)\n\n\[email protected]_tag\ndef breadcrumb_main_page():\n '''\n Special case of ``breadcrumb_url``. In all templates there's always a link\n to the main page so I wanted to save everyone thinking & writing by\n introducing this helper tag.\n Example usage:\n {% breadcrumb_main_page %}\n '''\n title = 'Amy'\n url = reverse('index')\n return create_crumb(title, url)\n\n\ndef create_crumb(title, url=None, active=False):\n '''\n Helper function that creates breadcrumb.\n '''\n active_str = ''\n if active:\n active_str = ' class=\"active\"'\n\n title = escape(title)\n inner_str = title\n if url:\n inner_str = '<a href=\"{0}\">{1}</a>'.format(url, title)\n\n crumb = '<li{0}>{1}</li>'.format(active_str, inner_str)\n\n return crumb\n", "path": "workshops/templatetags/breadcrumbs.py"}], "after_files": [{"content": null, "path": "workshops/templatetags/breadcrumbs.py"}]}
| 1,608 | 950 |
gh_patches_debug_25587
|
rasdani/github-patches
|
git_diff
|
vllm-project__vllm-148
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation on running basic python server and FastAPI server
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vllm/outputs.py`
Content:
```
1 from typing import Dict, List, Optional
2
3 from vllm.sequence import SequenceGroup, SequenceStatus
4
5
6 class CompletionOutput:
7
8 def __init__(
9 self,
10 index: int,
11 text: str,
12 token_ids: List[int],
13 cumulative_logprob: float,
14 logprobs: List[Dict[int, float]],
15 finish_reason: Optional[str] = None,
16 ) -> None:
17 self.index = index
18 self.text = text
19 self.token_ids = token_ids
20 self.cumulative_logprob = cumulative_logprob
21 self.logprobs = logprobs
22 self.finish_reason = finish_reason
23
24 def finished(self) -> bool:
25 return self.finish_reason is not None
26
27 def __repr__(self) -> str:
28 return (f"CompletionOutput(index={self.index}, "
29 f"text={self.text!r}, "
30 f"token_ids={self.token_ids}, "
31 f"cumulative_logprob={self.cumulative_logprob}, "
32 f"logprobs={self.logprobs},"
33 f"finish_reason={self.finish_reason})")
34
35
36 class RequestOutput:
37
38 def __init__(
39 self,
40 request_id: str,
41 prompt: str,
42 prompt_token_ids: List[int],
43 outputs: List[CompletionOutput],
44 ) -> None:
45 self.request_id = request_id
46 self.prompt = prompt
47 self.prompt_token_ids = prompt_token_ids
48 self.outputs = outputs
49
50 @classmethod
51 def from_seq_group(cls, seq_group: SequenceGroup) -> "RequestOutput":
52 # Get the top-n sequences.
53 n = seq_group.sampling_params.n
54 seqs = seq_group.get_seqs()
55 assert n <= len(seqs)
56 sorted_seqs = sorted(
57 seqs, key=lambda seq: seq.get_cumulative_logprob(), reverse=True)
58 top_n_seqs = sorted_seqs[:n]
59
60 # Create the outputs.
61 outputs: List[CompletionOutput] = []
62 for seq in top_n_seqs:
63 logprobs = seq.output_logprobs
64 if seq_group.sampling_params.logprobs is None:
65 # NOTE: We need to take care of this case because the sequence
66 # always has the logprobs of the sampled tokens even if the
67 # logprobs are not requested.
68 logprobs = {}
69 finshed_reason = SequenceStatus.get_finished_reason(seq.status)
70 output = CompletionOutput(seqs.index(seq), seq.output_text,
71 seq.get_output_token_ids(),
72 seq.get_cumulative_logprob(), logprobs,
73 finshed_reason)
74 outputs.append(output)
75
76 # Every sequence in the sequence group should have the same prompt.
77 prompt = top_n_seqs[0].prompt
78 prompt_token_ids = top_n_seqs[0].data.prompt_token_ids
79 return cls(seq_group.request_id, prompt, prompt_token_ids, outputs)
80
81 def __repr__(self) -> str:
82 return (f"RequestOutput(request_id={self.request_id}, "
83 f"prompt={self.prompt!r}, "
84 f"prompt_token_ids={self.prompt_token_ids}, "
85 f"outputs={self.outputs})")
86
87 def finished(self) -> bool:
88 return all(output.finished() for output in self.outputs)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/vllm/outputs.py b/vllm/outputs.py
--- a/vllm/outputs.py
+++ b/vllm/outputs.py
@@ -4,6 +4,18 @@
class CompletionOutput:
+ """The output data of one completion output of a request.
+
+ Args:
+ index: The index of the output in the request.
+ text: The generated output text.
+ token_ids: The token IDs of the generated output text.
+ cumulative_logprob: The cumulative log probability of the generated
+ output text.
+ logprobs: The log probabilities of the top probability words at each
+ position if the logprobs are requested.
+ finish_reason: The reason why the sequence is finished.
+ """
def __init__(
self,
@@ -11,7 +23,7 @@
text: str,
token_ids: List[int],
cumulative_logprob: float,
- logprobs: List[Dict[int, float]],
+ logprobs: Optional[List[Dict[int, float]]],
finish_reason: Optional[str] = None,
) -> None:
self.index = index
@@ -34,7 +46,14 @@
class RequestOutput:
+ """The output data of a request to the LLM.
+ Args:
+ request_id: The unique ID of the request.
+ prompt: The prompt string of the request.
+ prompt_token_ids: The token IDs of the prompt.
+ outputs: The output sequences of the request.
+ """
def __init__(
self,
request_id: str,
|
{"golden_diff": "diff --git a/vllm/outputs.py b/vllm/outputs.py\n--- a/vllm/outputs.py\n+++ b/vllm/outputs.py\n@@ -4,6 +4,18 @@\n \n \n class CompletionOutput:\n+ \"\"\"The output data of one completion output of a request.\n+\n+ Args:\n+ index: The index of the output in the request.\n+ text: The generated output text.\n+ token_ids: The token IDs of the generated output text.\n+ cumulative_logprob: The cumulative log probability of the generated\n+ output text.\n+ logprobs: The log probabilities of the top probability words at each\n+ position if the logprobs are requested.\n+ finish_reason: The reason why the sequence is finished.\n+ \"\"\"\n \n def __init__(\n self,\n@@ -11,7 +23,7 @@\n text: str,\n token_ids: List[int],\n cumulative_logprob: float,\n- logprobs: List[Dict[int, float]],\n+ logprobs: Optional[List[Dict[int, float]]],\n finish_reason: Optional[str] = None,\n ) -> None:\n self.index = index\n@@ -34,7 +46,14 @@\n \n \n class RequestOutput:\n+ \"\"\"The output data of a request to the LLM.\n \n+ Args:\n+ request_id: The unique ID of the request.\n+ prompt: The prompt string of the request.\n+ prompt_token_ids: The token IDs of the prompt.\n+ outputs: The output sequences of the request.\n+ \"\"\"\n def __init__(\n self,\n request_id: str,\n", "issue": "Documentation on running basic python server and FastAPI server\n\n", "before_files": [{"content": "from typing import Dict, List, Optional\n\nfrom vllm.sequence import SequenceGroup, SequenceStatus\n\n\nclass CompletionOutput:\n\n def __init__(\n self,\n index: int,\n text: str,\n token_ids: List[int],\n cumulative_logprob: float,\n logprobs: List[Dict[int, float]],\n finish_reason: Optional[str] = None,\n ) -> None:\n self.index = index\n self.text = text\n self.token_ids = token_ids\n self.cumulative_logprob = cumulative_logprob\n self.logprobs = logprobs\n self.finish_reason = finish_reason\n\n def finished(self) -> bool:\n return self.finish_reason is not None\n\n def __repr__(self) -> str:\n return (f\"CompletionOutput(index={self.index}, \"\n f\"text={self.text!r}, \"\n f\"token_ids={self.token_ids}, \"\n f\"cumulative_logprob={self.cumulative_logprob}, \"\n f\"logprobs={self.logprobs},\"\n f\"finish_reason={self.finish_reason})\")\n\n\nclass RequestOutput:\n\n def __init__(\n self,\n request_id: str,\n prompt: str,\n prompt_token_ids: List[int],\n outputs: List[CompletionOutput],\n ) -> None:\n self.request_id = request_id\n self.prompt = prompt\n self.prompt_token_ids = prompt_token_ids\n self.outputs = outputs\n\n @classmethod\n def from_seq_group(cls, seq_group: SequenceGroup) -> \"RequestOutput\":\n # Get the top-n sequences.\n n = seq_group.sampling_params.n\n seqs = seq_group.get_seqs()\n assert n <= len(seqs)\n sorted_seqs = sorted(\n seqs, key=lambda seq: seq.get_cumulative_logprob(), reverse=True)\n top_n_seqs = sorted_seqs[:n]\n\n # Create the outputs.\n outputs: List[CompletionOutput] = []\n for seq in top_n_seqs:\n logprobs = seq.output_logprobs\n if seq_group.sampling_params.logprobs is None:\n # NOTE: We need to take care of this case because the sequence\n # always has the logprobs of the sampled tokens even if the\n # logprobs are not requested.\n logprobs = {}\n finshed_reason = SequenceStatus.get_finished_reason(seq.status)\n output = CompletionOutput(seqs.index(seq), seq.output_text,\n seq.get_output_token_ids(),\n seq.get_cumulative_logprob(), logprobs,\n finshed_reason)\n outputs.append(output)\n\n # Every sequence in the sequence group should have the same prompt.\n prompt = top_n_seqs[0].prompt\n prompt_token_ids = top_n_seqs[0].data.prompt_token_ids\n return cls(seq_group.request_id, prompt, prompt_token_ids, outputs)\n\n def __repr__(self) -> str:\n return (f\"RequestOutput(request_id={self.request_id}, \"\n f\"prompt={self.prompt!r}, \"\n f\"prompt_token_ids={self.prompt_token_ids}, \"\n f\"outputs={self.outputs})\")\n\n def finished(self) -> bool:\n return all(output.finished() for output in self.outputs)\n", "path": "vllm/outputs.py"}], "after_files": [{"content": "from typing import Dict, List, Optional\n\nfrom vllm.sequence import SequenceGroup, SequenceStatus\n\n\nclass CompletionOutput:\n \"\"\"The output data of one completion output of a request.\n\n Args:\n index: The index of the output in the request.\n text: The generated output text.\n token_ids: The token IDs of the generated output text.\n cumulative_logprob: The cumulative log probability of the generated\n output text.\n logprobs: The log probabilities of the top probability words at each\n position if the logprobs are requested.\n finish_reason: The reason why the sequence is finished.\n \"\"\"\n\n def __init__(\n self,\n index: int,\n text: str,\n token_ids: List[int],\n cumulative_logprob: float,\n logprobs: Optional[List[Dict[int, float]]],\n finish_reason: Optional[str] = None,\n ) -> None:\n self.index = index\n self.text = text\n self.token_ids = token_ids\n self.cumulative_logprob = cumulative_logprob\n self.logprobs = logprobs\n self.finish_reason = finish_reason\n\n def finished(self) -> bool:\n return self.finish_reason is not None\n\n def __repr__(self) -> str:\n return (f\"CompletionOutput(index={self.index}, \"\n f\"text={self.text!r}, \"\n f\"token_ids={self.token_ids}, \"\n f\"cumulative_logprob={self.cumulative_logprob}, \"\n f\"logprobs={self.logprobs},\"\n f\"finish_reason={self.finish_reason})\")\n\n\nclass RequestOutput:\n \"\"\"The output data of a request to the LLM.\n\n Args:\n request_id: The unique ID of the request.\n prompt: The prompt string of the request.\n prompt_token_ids: The token IDs of the prompt.\n outputs: The output sequences of the request.\n \"\"\"\n def __init__(\n self,\n request_id: str,\n prompt: str,\n prompt_token_ids: List[int],\n outputs: List[CompletionOutput],\n ) -> None:\n self.request_id = request_id\n self.prompt = prompt\n self.prompt_token_ids = prompt_token_ids\n self.outputs = outputs\n\n @classmethod\n def from_seq_group(cls, seq_group: SequenceGroup) -> \"RequestOutput\":\n # Get the top-n sequences.\n n = seq_group.sampling_params.n\n seqs = seq_group.get_seqs()\n assert n <= len(seqs)\n sorted_seqs = sorted(\n seqs, key=lambda seq: seq.get_cumulative_logprob(), reverse=True)\n top_n_seqs = sorted_seqs[:n]\n\n # Create the outputs.\n outputs: List[CompletionOutput] = []\n for seq in top_n_seqs:\n logprobs = seq.output_logprobs\n if seq_group.sampling_params.logprobs is None:\n # NOTE: We need to take care of this case because the sequence\n # always has the logprobs of the sampled tokens even if the\n # logprobs are not requested.\n logprobs = {}\n finshed_reason = SequenceStatus.get_finished_reason(seq.status)\n output = CompletionOutput(seqs.index(seq), seq.output_text,\n seq.get_output_token_ids(),\n seq.get_cumulative_logprob(), logprobs,\n finshed_reason)\n outputs.append(output)\n\n # Every sequence in the sequence group should have the same prompt.\n prompt = top_n_seqs[0].prompt\n prompt_token_ids = top_n_seqs[0].data.prompt_token_ids\n return cls(seq_group.request_id, prompt, prompt_token_ids, outputs)\n\n def __repr__(self) -> str:\n return (f\"RequestOutput(request_id={self.request_id}, \"\n f\"prompt={self.prompt!r}, \"\n f\"prompt_token_ids={self.prompt_token_ids}, \"\n f\"outputs={self.outputs})\")\n\n def finished(self) -> bool:\n return all(output.finished() for output in self.outputs)\n", "path": "vllm/outputs.py"}]}
| 1,137 | 360 |
gh_patches_debug_22017
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-563
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`statistics.py` wrong parameter name
## 🐛 Bug
The functions [here](https://github.com/facebookresearch/CompilerGym/blob/e248330d2475fbcdf473cc3df951f25b5eaf4945/compiler_gym/util/statistics.py#L8) says they take `iterable` as inputs. However, `np.asarray` actually take `array_like`.
[Quote:
](https://numpy.org/doc/stable/reference/generated/numpy.asarray.html)
> Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.
e.g.
```python
geometric_mean(i for i in range(10))
```
This will fail because though it's an `iterable`, it's not an `array_like`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/util/statistics.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 import numpy as np
6
7
8 def geometric_mean(iterable):
9 """Zero-length-safe geometric mean."""
10 values = np.asarray(iterable)
11 if not values.size:
12 return 0
13 # Shortcut to return 0 when any element of the input is not positive.
14 if not np.all(values > 0):
15 return 0
16 a = np.log(values)
17 return np.exp(a.sum() / len(a))
18
19
20 def arithmetic_mean(iterable):
21 """Zero-length-safe arithmetic mean."""
22 values = np.asarray(iterable)
23 if not values.size:
24 return 0
25 return values.mean()
26
27
28 def stdev(iterable):
29 """Zero-length-safe standard deviation."""
30 values = np.asarray(iterable)
31 if not values.size:
32 return 0
33 return values.std()
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/compiler_gym/util/statistics.py b/compiler_gym/util/statistics.py
--- a/compiler_gym/util/statistics.py
+++ b/compiler_gym/util/statistics.py
@@ -5,9 +5,9 @@
import numpy as np
-def geometric_mean(iterable):
+def geometric_mean(array_like):
"""Zero-length-safe geometric mean."""
- values = np.asarray(iterable)
+ values = np.asarray(array_like)
if not values.size:
return 0
# Shortcut to return 0 when any element of the input is not positive.
@@ -17,17 +17,17 @@
return np.exp(a.sum() / len(a))
-def arithmetic_mean(iterable):
+def arithmetic_mean(array_like):
"""Zero-length-safe arithmetic mean."""
- values = np.asarray(iterable)
+ values = np.asarray(array_like)
if not values.size:
return 0
return values.mean()
-def stdev(iterable):
+def stdev(array_like):
"""Zero-length-safe standard deviation."""
- values = np.asarray(iterable)
+ values = np.asarray(array_like)
if not values.size:
return 0
return values.std()
|
{"golden_diff": "diff --git a/compiler_gym/util/statistics.py b/compiler_gym/util/statistics.py\n--- a/compiler_gym/util/statistics.py\n+++ b/compiler_gym/util/statistics.py\n@@ -5,9 +5,9 @@\n import numpy as np\n \n \n-def geometric_mean(iterable):\n+def geometric_mean(array_like):\n \"\"\"Zero-length-safe geometric mean.\"\"\"\n- values = np.asarray(iterable)\n+ values = np.asarray(array_like)\n if not values.size:\n return 0\n # Shortcut to return 0 when any element of the input is not positive.\n@@ -17,17 +17,17 @@\n return np.exp(a.sum() / len(a))\n \n \n-def arithmetic_mean(iterable):\n+def arithmetic_mean(array_like):\n \"\"\"Zero-length-safe arithmetic mean.\"\"\"\n- values = np.asarray(iterable)\n+ values = np.asarray(array_like)\n if not values.size:\n return 0\n return values.mean()\n \n \n-def stdev(iterable):\n+def stdev(array_like):\n \"\"\"Zero-length-safe standard deviation.\"\"\"\n- values = np.asarray(iterable)\n+ values = np.asarray(array_like)\n if not values.size:\n return 0\n return values.std()\n", "issue": "`statistics.py` wrong parameter name\n## \ud83d\udc1b Bug\r\n\r\nThe functions [here](https://github.com/facebookresearch/CompilerGym/blob/e248330d2475fbcdf473cc3df951f25b5eaf4945/compiler_gym/util/statistics.py#L8) says they take `iterable` as inputs. However, `np.asarray` actually take `array_like`.\r\n\r\n[Quote:\r\n](https://numpy.org/doc/stable/reference/generated/numpy.asarray.html)\r\n\r\n> Input data, in any form that can be converted to an array. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and ndarrays.\r\n\r\ne.g.\r\n```python\r\ngeometric_mean(i for i in range(10))\r\n```\r\nThis will fail because though it's an `iterable`, it's not an `array_like`.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport numpy as np\n\n\ndef geometric_mean(iterable):\n \"\"\"Zero-length-safe geometric mean.\"\"\"\n values = np.asarray(iterable)\n if not values.size:\n return 0\n # Shortcut to return 0 when any element of the input is not positive.\n if not np.all(values > 0):\n return 0\n a = np.log(values)\n return np.exp(a.sum() / len(a))\n\n\ndef arithmetic_mean(iterable):\n \"\"\"Zero-length-safe arithmetic mean.\"\"\"\n values = np.asarray(iterable)\n if not values.size:\n return 0\n return values.mean()\n\n\ndef stdev(iterable):\n \"\"\"Zero-length-safe standard deviation.\"\"\"\n values = np.asarray(iterable)\n if not values.size:\n return 0\n return values.std()\n", "path": "compiler_gym/util/statistics.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport numpy as np\n\n\ndef geometric_mean(array_like):\n \"\"\"Zero-length-safe geometric mean.\"\"\"\n values = np.asarray(array_like)\n if not values.size:\n return 0\n # Shortcut to return 0 when any element of the input is not positive.\n if not np.all(values > 0):\n return 0\n a = np.log(values)\n return np.exp(a.sum() / len(a))\n\n\ndef arithmetic_mean(array_like):\n \"\"\"Zero-length-safe arithmetic mean.\"\"\"\n values = np.asarray(array_like)\n if not values.size:\n return 0\n return values.mean()\n\n\ndef stdev(array_like):\n \"\"\"Zero-length-safe standard deviation.\"\"\"\n values = np.asarray(array_like)\n if not values.size:\n return 0\n return values.std()\n", "path": "compiler_gym/util/statistics.py"}]}
| 723 | 260 |
gh_patches_debug_42303
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1690
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Format JSON on save
Is there a way to get this LSP to format json files ons save?
It works for other LSPs but just not JSON,
Here are my configs:
Preferences
```
{
...
"lsp_format_on_save": true
...
}
```
LSP
```
"lsp_code_actions_on_save": {
"source.organizeImports": true,
"source.fixAll.eslint": true,
}
```
All LSP-JSON settings are default
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/formatting.py`
Content:
```
1 from .core.edit import parse_text_edit
2 from .core.protocol import TextEdit
3 from .core.registry import LspTextCommand
4 from .core.sessions import Session
5 from .core.settings import userprefs
6 from .core.typing import Any, Callable, List, Optional, Iterator
7 from .core.views import entire_content_region
8 from .core.views import first_selection_region
9 from .core.views import text_document_formatting
10 from .core.views import text_document_range_formatting
11 from .core.views import will_save_wait_until
12 from .save_command import LspSaveCommand, SaveTask
13 import sublime
14
15
16 def apply_response_to_view(response: Optional[List[TextEdit]], view: sublime.View) -> None:
17 edits = list(parse_text_edit(change) for change in response) if response else []
18 view.run_command('lsp_apply_document_edit', {'changes': edits})
19
20
21 class WillSaveWaitTask(SaveTask):
22 @classmethod
23 def is_applicable(cls, view: sublime.View) -> bool:
24 return bool(view.file_name())
25
26 def __init__(self, task_runner: LspTextCommand, on_complete: Callable[[], None]) -> None:
27 super().__init__(task_runner, on_complete)
28 self._session_iterator = None # type: Optional[Iterator[Session]]
29
30 def run_async(self) -> None:
31 super().run_async()
32 self._session_iterator = self._task_runner.sessions('textDocumentSync.willSaveWaitUntil')
33 self._handle_next_session_async()
34
35 def _handle_next_session_async(self) -> None:
36 session = next(self._session_iterator, None) if self._session_iterator else None
37 if session:
38 self._purge_changes_async()
39 self._will_save_wait_until_async(session)
40 else:
41 self._on_complete()
42
43 def _will_save_wait_until_async(self, session: Session) -> None:
44 session.send_request_async(
45 will_save_wait_until(self._task_runner.view, reason=1), # TextDocumentSaveReason.Manual
46 self._on_response,
47 lambda error: self._on_response(None))
48
49 def _on_response(self, response: Any) -> None:
50 if response and not self._cancelled:
51 apply_response_to_view(response, self._task_runner.view)
52 sublime.set_timeout_async(self._handle_next_session_async)
53
54
55 class FormattingTask(SaveTask):
56 @classmethod
57 def is_applicable(cls, view: sublime.View) -> bool:
58 settings = view.settings()
59 view_format_on_save = settings.get('lsp_format_on_save', None)
60 enabled = view_format_on_save if isinstance(view_format_on_save, bool) else userprefs().lsp_format_on_save
61 return enabled and bool(view.window()) and bool(view.file_name())
62
63 def run_async(self) -> None:
64 super().run_async()
65 self._purge_changes_async()
66 session = self._task_runner.best_session(LspFormatDocumentCommand.capability)
67 if session:
68 session.send_request_async(
69 text_document_formatting(self._task_runner.view), self._on_response,
70 lambda error: self._on_response(None))
71 else:
72 self._on_complete()
73
74 def _on_response(self, response: Any) -> None:
75 if response and not self._cancelled:
76 apply_response_to_view(response, self._task_runner.view)
77 sublime.set_timeout_async(self._on_complete)
78
79
80 LspSaveCommand.register_task(WillSaveWaitTask)
81 LspSaveCommand.register_task(FormattingTask)
82
83
84 class LspFormatDocumentCommand(LspTextCommand):
85
86 capability = 'documentFormattingProvider'
87
88 def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
89 return super().is_enabled() or bool(self.best_session(LspFormatDocumentRangeCommand.capability))
90
91 def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:
92 session = self.best_session(self.capability)
93 if session:
94 # Either use the documentFormattingProvider ...
95 session.send_request(text_document_formatting(self.view), self.on_result)
96 else:
97 session = self.best_session(LspFormatDocumentRangeCommand.capability)
98 if session:
99 # ... or use the documentRangeFormattingProvider and format the entire range.
100 req = text_document_range_formatting(self.view, entire_content_region(self.view))
101 session.send_request(req, self.on_result)
102
103 def on_result(self, params: Any) -> None:
104 apply_response_to_view(params, self.view)
105
106
107 class LspFormatDocumentRangeCommand(LspTextCommand):
108
109 capability = 'documentRangeFormattingProvider'
110
111 def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
112 if super().is_enabled(event, point):
113 if len(self.view.sel()) == 1:
114 region = self.view.sel()[0]
115 if region.begin() != region.end():
116 return True
117 return False
118
119 def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:
120 session = self.best_session(self.capability)
121 selection = first_selection_region(self.view)
122 if session and selection is not None:
123 req = text_document_range_formatting(self.view, selection)
124 session.send_request(req, lambda response: apply_response_to_view(response, self.view))
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/formatting.py b/plugin/formatting.py
--- a/plugin/formatting.py
+++ b/plugin/formatting.py
@@ -1,9 +1,11 @@
from .core.edit import parse_text_edit
+from .core.promise import Promise
+from .core.protocol import Error
from .core.protocol import TextEdit
from .core.registry import LspTextCommand
from .core.sessions import Session
from .core.settings import userprefs
-from .core.typing import Any, Callable, List, Optional, Iterator
+from .core.typing import Any, Callable, List, Optional, Iterator, Union
from .core.views import entire_content_region
from .core.views import first_selection_region
from .core.views import text_document_formatting
@@ -13,6 +15,22 @@
import sublime
+FormatResponse = Union[List[TextEdit], None, Error]
+
+
+def format_document(text_command: LspTextCommand) -> Promise[FormatResponse]:
+ view = text_command.view
+ session = text_command.best_session(LspFormatDocumentCommand.capability)
+ if session:
+ # Either use the documentFormattingProvider ...
+ return session.send_request_task(text_document_formatting(view))
+ session = text_command.best_session(LspFormatDocumentRangeCommand.capability)
+ if session:
+ # ... or use the documentRangeFormattingProvider and format the entire range.
+ return session.send_request_task(text_document_range_formatting(view, entire_content_region(view)))
+ return Promise.resolve(None)
+
+
def apply_response_to_view(response: Optional[List[TextEdit]], view: sublime.View) -> None:
edits = list(parse_text_edit(change) for change in response) if response else []
view.run_command('lsp_apply_document_edit', {'changes': edits})
@@ -63,16 +81,10 @@
def run_async(self) -> None:
super().run_async()
self._purge_changes_async()
- session = self._task_runner.best_session(LspFormatDocumentCommand.capability)
- if session:
- session.send_request_async(
- text_document_formatting(self._task_runner.view), self._on_response,
- lambda error: self._on_response(None))
- else:
- self._on_complete()
+ format_document(self._task_runner).then(self._on_response)
- def _on_response(self, response: Any) -> None:
- if response and not self._cancelled:
+ def _on_response(self, response: FormatResponse) -> None:
+ if response and not isinstance(response, Error) and not self._cancelled:
apply_response_to_view(response, self._task_runner.view)
sublime.set_timeout_async(self._on_complete)
@@ -89,19 +101,11 @@
return super().is_enabled() or bool(self.best_session(LspFormatDocumentRangeCommand.capability))
def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:
- session = self.best_session(self.capability)
- if session:
- # Either use the documentFormattingProvider ...
- session.send_request(text_document_formatting(self.view), self.on_result)
- else:
- session = self.best_session(LspFormatDocumentRangeCommand.capability)
- if session:
- # ... or use the documentRangeFormattingProvider and format the entire range.
- req = text_document_range_formatting(self.view, entire_content_region(self.view))
- session.send_request(req, self.on_result)
-
- def on_result(self, params: Any) -> None:
- apply_response_to_view(params, self.view)
+ format_document(self).then(self.on_result)
+
+ def on_result(self, result: FormatResponse) -> None:
+ if result and not isinstance(result, Error):
+ apply_response_to_view(result, self.view)
class LspFormatDocumentRangeCommand(LspTextCommand):
|
{"golden_diff": "diff --git a/plugin/formatting.py b/plugin/formatting.py\n--- a/plugin/formatting.py\n+++ b/plugin/formatting.py\n@@ -1,9 +1,11 @@\n from .core.edit import parse_text_edit\n+from .core.promise import Promise\n+from .core.protocol import Error\n from .core.protocol import TextEdit\n from .core.registry import LspTextCommand\n from .core.sessions import Session\n from .core.settings import userprefs\n-from .core.typing import Any, Callable, List, Optional, Iterator\n+from .core.typing import Any, Callable, List, Optional, Iterator, Union\n from .core.views import entire_content_region\n from .core.views import first_selection_region\n from .core.views import text_document_formatting\n@@ -13,6 +15,22 @@\n import sublime\n \n \n+FormatResponse = Union[List[TextEdit], None, Error]\n+\n+\n+def format_document(text_command: LspTextCommand) -> Promise[FormatResponse]:\n+ view = text_command.view\n+ session = text_command.best_session(LspFormatDocumentCommand.capability)\n+ if session:\n+ # Either use the documentFormattingProvider ...\n+ return session.send_request_task(text_document_formatting(view))\n+ session = text_command.best_session(LspFormatDocumentRangeCommand.capability)\n+ if session:\n+ # ... or use the documentRangeFormattingProvider and format the entire range.\n+ return session.send_request_task(text_document_range_formatting(view, entire_content_region(view)))\n+ return Promise.resolve(None)\n+\n+\n def apply_response_to_view(response: Optional[List[TextEdit]], view: sublime.View) -> None:\n edits = list(parse_text_edit(change) for change in response) if response else []\n view.run_command('lsp_apply_document_edit', {'changes': edits})\n@@ -63,16 +81,10 @@\n def run_async(self) -> None:\n super().run_async()\n self._purge_changes_async()\n- session = self._task_runner.best_session(LspFormatDocumentCommand.capability)\n- if session:\n- session.send_request_async(\n- text_document_formatting(self._task_runner.view), self._on_response,\n- lambda error: self._on_response(None))\n- else:\n- self._on_complete()\n+ format_document(self._task_runner).then(self._on_response)\n \n- def _on_response(self, response: Any) -> None:\n- if response and not self._cancelled:\n+ def _on_response(self, response: FormatResponse) -> None:\n+ if response and not isinstance(response, Error) and not self._cancelled:\n apply_response_to_view(response, self._task_runner.view)\n sublime.set_timeout_async(self._on_complete)\n \n@@ -89,19 +101,11 @@\n return super().is_enabled() or bool(self.best_session(LspFormatDocumentRangeCommand.capability))\n \n def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:\n- session = self.best_session(self.capability)\n- if session:\n- # Either use the documentFormattingProvider ...\n- session.send_request(text_document_formatting(self.view), self.on_result)\n- else:\n- session = self.best_session(LspFormatDocumentRangeCommand.capability)\n- if session:\n- # ... or use the documentRangeFormattingProvider and format the entire range.\n- req = text_document_range_formatting(self.view, entire_content_region(self.view))\n- session.send_request(req, self.on_result)\n-\n- def on_result(self, params: Any) -> None:\n- apply_response_to_view(params, self.view)\n+ format_document(self).then(self.on_result)\n+\n+ def on_result(self, result: FormatResponse) -> None:\n+ if result and not isinstance(result, Error):\n+ apply_response_to_view(result, self.view)\n \n \n class LspFormatDocumentRangeCommand(LspTextCommand):\n", "issue": "Format JSON on save\nIs there a way to get this LSP to format json files ons save?\r\n\r\nIt works for other LSPs but just not JSON,\r\n\r\nHere are my configs:\r\n\r\nPreferences\r\n```\r\n{\r\n ...\r\n\t\"lsp_format_on_save\": true\r\n ...\r\n}\r\n```\r\n\r\nLSP\r\n```\r\n\t\"lsp_code_actions_on_save\": {\r\n\t\t\"source.organizeImports\": true,\r\n\t\t\"source.fixAll.eslint\": true,\r\n\t}\r\n```\r\n\r\nAll LSP-JSON settings are default\n", "before_files": [{"content": "from .core.edit import parse_text_edit\nfrom .core.protocol import TextEdit\nfrom .core.registry import LspTextCommand\nfrom .core.sessions import Session\nfrom .core.settings import userprefs\nfrom .core.typing import Any, Callable, List, Optional, Iterator\nfrom .core.views import entire_content_region\nfrom .core.views import first_selection_region\nfrom .core.views import text_document_formatting\nfrom .core.views import text_document_range_formatting\nfrom .core.views import will_save_wait_until\nfrom .save_command import LspSaveCommand, SaveTask\nimport sublime\n\n\ndef apply_response_to_view(response: Optional[List[TextEdit]], view: sublime.View) -> None:\n edits = list(parse_text_edit(change) for change in response) if response else []\n view.run_command('lsp_apply_document_edit', {'changes': edits})\n\n\nclass WillSaveWaitTask(SaveTask):\n @classmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n return bool(view.file_name())\n\n def __init__(self, task_runner: LspTextCommand, on_complete: Callable[[], None]) -> None:\n super().__init__(task_runner, on_complete)\n self._session_iterator = None # type: Optional[Iterator[Session]]\n\n def run_async(self) -> None:\n super().run_async()\n self._session_iterator = self._task_runner.sessions('textDocumentSync.willSaveWaitUntil')\n self._handle_next_session_async()\n\n def _handle_next_session_async(self) -> None:\n session = next(self._session_iterator, None) if self._session_iterator else None\n if session:\n self._purge_changes_async()\n self._will_save_wait_until_async(session)\n else:\n self._on_complete()\n\n def _will_save_wait_until_async(self, session: Session) -> None:\n session.send_request_async(\n will_save_wait_until(self._task_runner.view, reason=1), # TextDocumentSaveReason.Manual\n self._on_response,\n lambda error: self._on_response(None))\n\n def _on_response(self, response: Any) -> None:\n if response and not self._cancelled:\n apply_response_to_view(response, self._task_runner.view)\n sublime.set_timeout_async(self._handle_next_session_async)\n\n\nclass FormattingTask(SaveTask):\n @classmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n settings = view.settings()\n view_format_on_save = settings.get('lsp_format_on_save', None)\n enabled = view_format_on_save if isinstance(view_format_on_save, bool) else userprefs().lsp_format_on_save\n return enabled and bool(view.window()) and bool(view.file_name())\n\n def run_async(self) -> None:\n super().run_async()\n self._purge_changes_async()\n session = self._task_runner.best_session(LspFormatDocumentCommand.capability)\n if session:\n session.send_request_async(\n text_document_formatting(self._task_runner.view), self._on_response,\n lambda error: self._on_response(None))\n else:\n self._on_complete()\n\n def _on_response(self, response: Any) -> None:\n if response and not self._cancelled:\n apply_response_to_view(response, self._task_runner.view)\n sublime.set_timeout_async(self._on_complete)\n\n\nLspSaveCommand.register_task(WillSaveWaitTask)\nLspSaveCommand.register_task(FormattingTask)\n\n\nclass LspFormatDocumentCommand(LspTextCommand):\n\n capability = 'documentFormattingProvider'\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n return super().is_enabled() or bool(self.best_session(LspFormatDocumentRangeCommand.capability))\n\n def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:\n session = self.best_session(self.capability)\n if session:\n # Either use the documentFormattingProvider ...\n session.send_request(text_document_formatting(self.view), self.on_result)\n else:\n session = self.best_session(LspFormatDocumentRangeCommand.capability)\n if session:\n # ... or use the documentRangeFormattingProvider and format the entire range.\n req = text_document_range_formatting(self.view, entire_content_region(self.view))\n session.send_request(req, self.on_result)\n\n def on_result(self, params: Any) -> None:\n apply_response_to_view(params, self.view)\n\n\nclass LspFormatDocumentRangeCommand(LspTextCommand):\n\n capability = 'documentRangeFormattingProvider'\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n if super().is_enabled(event, point):\n if len(self.view.sel()) == 1:\n region = self.view.sel()[0]\n if region.begin() != region.end():\n return True\n return False\n\n def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:\n session = self.best_session(self.capability)\n selection = first_selection_region(self.view)\n if session and selection is not None:\n req = text_document_range_formatting(self.view, selection)\n session.send_request(req, lambda response: apply_response_to_view(response, self.view))\n", "path": "plugin/formatting.py"}], "after_files": [{"content": "from .core.edit import parse_text_edit\nfrom .core.promise import Promise\nfrom .core.protocol import Error\nfrom .core.protocol import TextEdit\nfrom .core.registry import LspTextCommand\nfrom .core.sessions import Session\nfrom .core.settings import userprefs\nfrom .core.typing import Any, Callable, List, Optional, Iterator, Union\nfrom .core.views import entire_content_region\nfrom .core.views import first_selection_region\nfrom .core.views import text_document_formatting\nfrom .core.views import text_document_range_formatting\nfrom .core.views import will_save_wait_until\nfrom .save_command import LspSaveCommand, SaveTask\nimport sublime\n\n\nFormatResponse = Union[List[TextEdit], None, Error]\n\n\ndef format_document(text_command: LspTextCommand) -> Promise[FormatResponse]:\n view = text_command.view\n session = text_command.best_session(LspFormatDocumentCommand.capability)\n if session:\n # Either use the documentFormattingProvider ...\n return session.send_request_task(text_document_formatting(view))\n session = text_command.best_session(LspFormatDocumentRangeCommand.capability)\n if session:\n # ... or use the documentRangeFormattingProvider and format the entire range.\n return session.send_request_task(text_document_range_formatting(view, entire_content_region(view)))\n return Promise.resolve(None)\n\n\ndef apply_response_to_view(response: Optional[List[TextEdit]], view: sublime.View) -> None:\n edits = list(parse_text_edit(change) for change in response) if response else []\n view.run_command('lsp_apply_document_edit', {'changes': edits})\n\n\nclass WillSaveWaitTask(SaveTask):\n @classmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n return bool(view.file_name())\n\n def __init__(self, task_runner: LspTextCommand, on_complete: Callable[[], None]) -> None:\n super().__init__(task_runner, on_complete)\n self._session_iterator = None # type: Optional[Iterator[Session]]\n\n def run_async(self) -> None:\n super().run_async()\n self._session_iterator = self._task_runner.sessions('textDocumentSync.willSaveWaitUntil')\n self._handle_next_session_async()\n\n def _handle_next_session_async(self) -> None:\n session = next(self._session_iterator, None) if self._session_iterator else None\n if session:\n self._purge_changes_async()\n self._will_save_wait_until_async(session)\n else:\n self._on_complete()\n\n def _will_save_wait_until_async(self, session: Session) -> None:\n session.send_request_async(\n will_save_wait_until(self._task_runner.view, reason=1), # TextDocumentSaveReason.Manual\n self._on_response,\n lambda error: self._on_response(None))\n\n def _on_response(self, response: Any) -> None:\n if response and not self._cancelled:\n apply_response_to_view(response, self._task_runner.view)\n sublime.set_timeout_async(self._handle_next_session_async)\n\n\nclass FormattingTask(SaveTask):\n @classmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n settings = view.settings()\n view_format_on_save = settings.get('lsp_format_on_save', None)\n enabled = view_format_on_save if isinstance(view_format_on_save, bool) else userprefs().lsp_format_on_save\n return enabled and bool(view.window()) and bool(view.file_name())\n\n def run_async(self) -> None:\n super().run_async()\n self._purge_changes_async()\n format_document(self._task_runner).then(self._on_response)\n\n def _on_response(self, response: FormatResponse) -> None:\n if response and not isinstance(response, Error) and not self._cancelled:\n apply_response_to_view(response, self._task_runner.view)\n sublime.set_timeout_async(self._on_complete)\n\n\nLspSaveCommand.register_task(WillSaveWaitTask)\nLspSaveCommand.register_task(FormattingTask)\n\n\nclass LspFormatDocumentCommand(LspTextCommand):\n\n capability = 'documentFormattingProvider'\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n return super().is_enabled() or bool(self.best_session(LspFormatDocumentRangeCommand.capability))\n\n def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:\n format_document(self).then(self.on_result)\n\n def on_result(self, result: FormatResponse) -> None:\n if result and not isinstance(result, Error):\n apply_response_to_view(result, self.view)\n\n\nclass LspFormatDocumentRangeCommand(LspTextCommand):\n\n capability = 'documentRangeFormattingProvider'\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n if super().is_enabled(event, point):\n if len(self.view.sel()) == 1:\n region = self.view.sel()[0]\n if region.begin() != region.end():\n return True\n return False\n\n def run(self, edit: sublime.Edit, event: Optional[dict] = None) -> None:\n session = self.best_session(self.capability)\n selection = first_selection_region(self.view)\n if session and selection is not None:\n req = text_document_range_formatting(self.view, selection)\n session.send_request(req, lambda response: apply_response_to_view(response, self.view))\n", "path": "plugin/formatting.py"}]}
| 1,775 | 847 |
gh_patches_debug_23027
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-172
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unbound enum values are None when used in arguments
When used as a mutation input, enum parameter should be `str`, but actually is `None`.
```python
def test_executing_mutation_takes_enum():
type_defs = """
type Query {
_: String
}
type Mutation {
eat(meal: Meal!): Int!
}
enum Meal {
SPAM
}
"""
mutation = MutationType()
@mutation.field("eat")
def resolve_eat(*_, meal): # pylint: disable=unused-variable
assert meal == "SPAM"
return 42
schema = make_executable_schema(type_defs, mutation)
result = graphql_sync(schema, 'mutation { eat(meal: SPAM) }')
assert result.errors is None
assert result.data == {"eat": 42}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ariadne/enums.py`
Content:
```
1 import enum
2
3 from typing import Any, Dict, Optional, Union, cast
4
5 from graphql.type import GraphQLEnumType, GraphQLNamedType, GraphQLSchema
6
7 from .types import SchemaBindable
8
9
10 class EnumType(SchemaBindable):
11 def __init__(
12 self, name: str, values=Union[Dict[str, Any], enum.Enum, enum.IntEnum]
13 ) -> None:
14 self.name = name
15 try:
16 self.values = values.__members__ # pylint: disable=no-member
17 except AttributeError:
18 self.values = values
19
20 def bind_to_schema(self, schema: GraphQLSchema) -> None:
21 graphql_type = schema.type_map.get(self.name)
22 self.validate_graphql_type(graphql_type)
23 graphql_type = cast(GraphQLEnumType, graphql_type)
24
25 for key, value in self.values.items():
26 if key not in graphql_type.values:
27 raise ValueError(
28 "Value %s is not defined on enum %s" % (key, self.name)
29 )
30 graphql_type.values[key].value = value
31
32 def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:
33 if not graphql_type:
34 raise ValueError("Enum %s is not defined in the schema" % self.name)
35 if not isinstance(graphql_type, GraphQLEnumType):
36 raise ValueError(
37 "%s is defined in the schema, but it is instance of %s (expected %s)"
38 % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)
39 )
40
```
Path: `ariadne/executable_schema.py`
Content:
```
1 from typing import List, Union
2
3 from graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse
4
5 from .types import SchemaBindable
6
7
8 def make_executable_schema(
9 type_defs: Union[str, List[str]],
10 bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,
11 ) -> GraphQLSchema:
12 if isinstance(type_defs, list):
13 type_defs = join_type_defs(type_defs)
14
15 ast_document = parse(type_defs)
16 schema = build_and_extend_schema(ast_document)
17
18 if isinstance(bindables, list):
19 for obj in bindables:
20 obj.bind_to_schema(schema)
21 elif bindables:
22 bindables.bind_to_schema(schema)
23
24 return schema
25
26
27 def join_type_defs(type_defs: List[str]) -> str:
28 return "\n\n".join(t.strip() for t in type_defs)
29
30
31 def build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:
32 schema = build_ast_schema(ast)
33 extension_ast = extract_extensions(ast)
34
35 if extension_ast.definitions:
36 schema = extend_schema(schema, extension_ast)
37
38 return schema
39
40
41 EXTENSION_KINDS = [
42 "scalar_type_extension",
43 "object_type_extension",
44 "interface_type_extension",
45 "union_type_extension",
46 "enum_type_extension",
47 "input_object_type_extension",
48 ]
49
50
51 def extract_extensions(ast: DocumentNode) -> DocumentNode:
52 extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]
53 return DocumentNode(definitions=extensions)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ariadne/enums.py b/ariadne/enums.py
--- a/ariadne/enums.py
+++ b/ariadne/enums.py
@@ -37,3 +37,15 @@
"%s is defined in the schema, but it is instance of %s (expected %s)"
% (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)
)
+
+
+def set_default_enum_values_on_schema(schema: GraphQLSchema):
+ for type_object in schema.type_map.values():
+ if isinstance(type_object, GraphQLEnumType):
+ set_default_enum_values(type_object)
+
+
+def set_default_enum_values(graphql_type: GraphQLEnumType):
+ for key in graphql_type.values:
+ if graphql_type.values[key].value is None:
+ graphql_type.values[key].value = key
diff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py
--- a/ariadne/executable_schema.py
+++ b/ariadne/executable_schema.py
@@ -2,6 +2,7 @@
from graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse
+from .enums import set_default_enum_values_on_schema
from .types import SchemaBindable
@@ -21,6 +22,8 @@
elif bindables:
bindables.bind_to_schema(schema)
+ set_default_enum_values_on_schema(schema)
+
return schema
|
{"golden_diff": "diff --git a/ariadne/enums.py b/ariadne/enums.py\n--- a/ariadne/enums.py\n+++ b/ariadne/enums.py\n@@ -37,3 +37,15 @@\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)\n )\n+\n+\n+def set_default_enum_values_on_schema(schema: GraphQLSchema):\n+ for type_object in schema.type_map.values():\n+ if isinstance(type_object, GraphQLEnumType):\n+ set_default_enum_values(type_object)\n+\n+\n+def set_default_enum_values(graphql_type: GraphQLEnumType):\n+ for key in graphql_type.values:\n+ if graphql_type.values[key].value is None:\n+ graphql_type.values[key].value = key\ndiff --git a/ariadne/executable_schema.py b/ariadne/executable_schema.py\n--- a/ariadne/executable_schema.py\n+++ b/ariadne/executable_schema.py\n@@ -2,6 +2,7 @@\n \n from graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse\n \n+from .enums import set_default_enum_values_on_schema\n from .types import SchemaBindable\n \n \n@@ -21,6 +22,8 @@\n elif bindables:\n bindables.bind_to_schema(schema)\n \n+ set_default_enum_values_on_schema(schema)\n+\n return schema\n", "issue": "Unbound enum values are None when used in arguments\nWhen used as a mutation input, enum parameter should be `str`, but actually is `None`.\r\n\r\n```python\r\ndef test_executing_mutation_takes_enum():\r\n type_defs = \"\"\"\r\n type Query {\r\n _: String\r\n }\r\n\r\n type Mutation {\r\n eat(meal: Meal!): Int!\r\n }\r\n\r\n enum Meal {\r\n SPAM\r\n }\r\n \"\"\"\r\n\r\n mutation = MutationType()\r\n\r\n @mutation.field(\"eat\")\r\n def resolve_eat(*_, meal): # pylint: disable=unused-variable\r\n assert meal == \"SPAM\"\r\n return 42\r\n\r\n schema = make_executable_schema(type_defs, mutation)\r\n\r\n result = graphql_sync(schema, 'mutation { eat(meal: SPAM) }')\r\n assert result.errors is None\r\n assert result.data == {\"eat\": 42}\r\n```\n", "before_files": [{"content": "import enum\n\nfrom typing import Any, Dict, Optional, Union, cast\n\nfrom graphql.type import GraphQLEnumType, GraphQLNamedType, GraphQLSchema\n\nfrom .types import SchemaBindable\n\n\nclass EnumType(SchemaBindable):\n def __init__(\n self, name: str, values=Union[Dict[str, Any], enum.Enum, enum.IntEnum]\n ) -> None:\n self.name = name\n try:\n self.values = values.__members__ # pylint: disable=no-member\n except AttributeError:\n self.values = values\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLEnumType, graphql_type)\n\n for key, value in self.values.items():\n if key not in graphql_type.values:\n raise ValueError(\n \"Value %s is not defined on enum %s\" % (key, self.name)\n )\n graphql_type.values[key].value = value\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Enum %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLEnumType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)\n )\n", "path": "ariadne/enums.py"}, {"content": "from typing import List, Union\n\nfrom graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse\n\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_and_extend_schema(ast_document)\n\n if isinstance(bindables, list):\n for obj in bindables:\n obj.bind_to_schema(schema)\n elif bindables:\n bindables.bind_to_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:\n schema = build_ast_schema(ast)\n extension_ast = extract_extensions(ast)\n\n if extension_ast.definitions:\n schema = extend_schema(schema, extension_ast)\n\n return schema\n\n\nEXTENSION_KINDS = [\n \"scalar_type_extension\",\n \"object_type_extension\",\n \"interface_type_extension\",\n \"union_type_extension\",\n \"enum_type_extension\",\n \"input_object_type_extension\",\n]\n\n\ndef extract_extensions(ast: DocumentNode) -> DocumentNode:\n extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]\n return DocumentNode(definitions=extensions)\n", "path": "ariadne/executable_schema.py"}], "after_files": [{"content": "import enum\n\nfrom typing import Any, Dict, Optional, Union, cast\n\nfrom graphql.type import GraphQLEnumType, GraphQLNamedType, GraphQLSchema\n\nfrom .types import SchemaBindable\n\n\nclass EnumType(SchemaBindable):\n def __init__(\n self, name: str, values=Union[Dict[str, Any], enum.Enum, enum.IntEnum]\n ) -> None:\n self.name = name\n try:\n self.values = values.__members__ # pylint: disable=no-member\n except AttributeError:\n self.values = values\n\n def bind_to_schema(self, schema: GraphQLSchema) -> None:\n graphql_type = schema.type_map.get(self.name)\n self.validate_graphql_type(graphql_type)\n graphql_type = cast(GraphQLEnumType, graphql_type)\n\n for key, value in self.values.items():\n if key not in graphql_type.values:\n raise ValueError(\n \"Value %s is not defined on enum %s\" % (key, self.name)\n )\n graphql_type.values[key].value = value\n\n def validate_graphql_type(self, graphql_type: Optional[GraphQLNamedType]) -> None:\n if not graphql_type:\n raise ValueError(\"Enum %s is not defined in the schema\" % self.name)\n if not isinstance(graphql_type, GraphQLEnumType):\n raise ValueError(\n \"%s is defined in the schema, but it is instance of %s (expected %s)\"\n % (self.name, type(graphql_type).__name__, GraphQLEnumType.__name__)\n )\n\n\ndef set_default_enum_values_on_schema(schema: GraphQLSchema):\n for type_object in schema.type_map.values():\n if isinstance(type_object, GraphQLEnumType):\n set_default_enum_values(type_object)\n\n\ndef set_default_enum_values(graphql_type: GraphQLEnumType):\n for key in graphql_type.values:\n if graphql_type.values[key].value is None:\n graphql_type.values[key].value = key\n", "path": "ariadne/enums.py"}, {"content": "from typing import List, Union\n\nfrom graphql import DocumentNode, GraphQLSchema, build_ast_schema, extend_schema, parse\n\nfrom .enums import set_default_enum_values_on_schema\nfrom .types import SchemaBindable\n\n\ndef make_executable_schema(\n type_defs: Union[str, List[str]],\n bindables: Union[SchemaBindable, List[SchemaBindable], None] = None,\n) -> GraphQLSchema:\n if isinstance(type_defs, list):\n type_defs = join_type_defs(type_defs)\n\n ast_document = parse(type_defs)\n schema = build_and_extend_schema(ast_document)\n\n if isinstance(bindables, list):\n for obj in bindables:\n obj.bind_to_schema(schema)\n elif bindables:\n bindables.bind_to_schema(schema)\n\n set_default_enum_values_on_schema(schema)\n\n return schema\n\n\ndef join_type_defs(type_defs: List[str]) -> str:\n return \"\\n\\n\".join(t.strip() for t in type_defs)\n\n\ndef build_and_extend_schema(ast: DocumentNode) -> GraphQLSchema:\n schema = build_ast_schema(ast)\n extension_ast = extract_extensions(ast)\n\n if extension_ast.definitions:\n schema = extend_schema(schema, extension_ast)\n\n return schema\n\n\nEXTENSION_KINDS = [\n \"scalar_type_extension\",\n \"object_type_extension\",\n \"interface_type_extension\",\n \"union_type_extension\",\n \"enum_type_extension\",\n \"input_object_type_extension\",\n]\n\n\ndef extract_extensions(ast: DocumentNode) -> DocumentNode:\n extensions = [node for node in ast.definitions if node.kind in EXTENSION_KINDS]\n return DocumentNode(definitions=extensions)\n", "path": "ariadne/executable_schema.py"}]}
| 1,296 | 329 |
gh_patches_debug_11707
|
rasdani/github-patches
|
git_diff
|
elastic__ecs-1164
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect output of the "tracing" fields in the Beats yml file
Just like the `base` fields, the `tracing` fields are not nested under the name of the field set. So it's not `base.@timestamp`, it's `@timestamp`, and it's not `tracing.trace.id`, it's `trace.id`.
In the Beats field yaml file the ECS project generates, the tracing fields are incorrectly nested under a `tracing` section, which means Beats interprets the field names incorrectly (`tracing.trace.id`).
This is a bug, these fields shouldn't be nested this way.
In order to fix this issue, we should remove this nesting in the Beats yml output. Just like `@timestamp` and other base fields are not nested under a field group.
I think this bug fix will be at minimum backported to 1.7. Thoughts welcome on this, is there a need to backport to 1.6 as well?
The Beats PR https://github.com/elastic/beats/pull/22571 to import ECS 1.7 should be adjusted with these changes, once the bug fix is ready. cc @andrewstucki
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/generators/beats.py`
Content:
```
1 from os.path import join
2 from collections import OrderedDict
3 from generators import ecs_helpers
4
5
6 def generate(ecs_nested, ecs_version, out_dir):
7 # Load temporary whitelist for default_fields workaround.
8 df_whitelist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_whitelist.yml')
9
10 # base first
11 beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_whitelist, ecs_nested['base']['prefix'])
12
13 allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']
14 # other fieldsets
15 for fieldset_name in sorted(ecs_nested):
16 if 'base' == fieldset_name:
17 continue
18 fieldset = ecs_nested[fieldset_name]
19
20 beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)
21 beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])
22 beats_fields.append(beats_field)
23
24 beats_file = OrderedDict()
25 beats_file['key'] = 'ecs'
26 beats_file['title'] = 'ECS'
27 beats_file['description'] = 'ECS Fields.'
28 beats_file['fields'] = beats_fields
29
30 write_beats_yaml(beats_file, ecs_version, out_dir)
31
32
33 def fieldset_field_array(source_fields, df_whitelist, fieldset_prefix):
34 allowed_keys = ['name', 'level', 'required', 'type', 'object_type',
35 'ignore_above', 'multi_fields', 'format', 'input_format',
36 'output_format', 'output_precision', 'description',
37 'example', 'enabled', 'index', 'path', 'scaling_factor']
38 multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']
39
40 fields = []
41 for nested_field_name in source_fields:
42 ecs_field = source_fields[nested_field_name]
43 beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)
44 if '' == fieldset_prefix:
45 contextual_name = nested_field_name
46 else:
47 contextual_name = '.'.join(nested_field_name.split('.')[1:])
48
49 cleaned_multi_fields = []
50 if 'multi_fields' in ecs_field:
51 for mf in ecs_field['multi_fields']:
52 # Set default_field if necessary. Avoid adding the key if the parent
53 # field already is marked with default_field: false.
54 if not mf['flat_name'] in df_whitelist and ecs_field['flat_name'] in df_whitelist:
55 mf['default_field'] = False
56 cleaned_multi_fields.append(
57 ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))
58 beats_field['multi_fields'] = cleaned_multi_fields
59
60 beats_field['name'] = contextual_name
61
62 if not ecs_field['flat_name'] in df_whitelist:
63 beats_field['default_field'] = False
64
65 fields.append(beats_field)
66 return sorted(fields, key=lambda x: x['name'])
67
68 # Helpers
69
70
71 def write_beats_yaml(beats_file, ecs_version, out_dir):
72 ecs_helpers.make_dirs(join(out_dir, 'beats'))
73 warning = file_header().format(version=ecs_version)
74 ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)
75
76
77 # Templates
78
79
80 def file_header():
81 return '''
82 # WARNING! Do not edit this file directly, it was generated by the ECS project,
83 # based on ECS version {version}.
84 # Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.
85
86 '''.lstrip()
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py
--- a/scripts/generators/beats.py
+++ b/scripts/generators/beats.py
@@ -17,6 +17,11 @@
continue
fieldset = ecs_nested[fieldset_name]
+ # Handle when `root:true`
+ if fieldset.get('root', False):
+ beats_fields.extend(fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix']))
+ continue
+
beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)
beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])
beats_fields.append(beats_field)
|
{"golden_diff": "diff --git a/scripts/generators/beats.py b/scripts/generators/beats.py\n--- a/scripts/generators/beats.py\n+++ b/scripts/generators/beats.py\n@@ -17,6 +17,11 @@\n continue\n fieldset = ecs_nested[fieldset_name]\n \n+ # Handle when `root:true`\n+ if fieldset.get('root', False):\n+ beats_fields.extend(fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix']))\n+ continue\n+\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])\n beats_fields.append(beats_field)\n", "issue": "Incorrect output of the \"tracing\" fields in the Beats yml file\nJust like the `base` fields, the `tracing` fields are not nested under the name of the field set. So it's not `base.@timestamp`, it's `@timestamp`, and it's not `tracing.trace.id`, it's `trace.id`.\r\n\r\nIn the Beats field yaml file the ECS project generates, the tracing fields are incorrectly nested under a `tracing` section, which means Beats interprets the field names incorrectly (`tracing.trace.id`).\r\n\r\nThis is a bug, these fields shouldn't be nested this way.\r\n\r\nIn order to fix this issue, we should remove this nesting in the Beats yml output. Just like `@timestamp` and other base fields are not nested under a field group.\r\n\r\nI think this bug fix will be at minimum backported to 1.7. Thoughts welcome on this, is there a need to backport to 1.6 as well?\r\n\r\nThe Beats PR https://github.com/elastic/beats/pull/22571 to import ECS 1.7 should be adjusted with these changes, once the bug fix is ready. cc @andrewstucki \r\n\n", "before_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary whitelist for default_fields workaround.\n df_whitelist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_whitelist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_whitelist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_whitelist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'path', 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_whitelist and ecs_field['flat_name'] in df_whitelist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_whitelist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return '''\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n'''.lstrip()\n", "path": "scripts/generators/beats.py"}], "after_files": [{"content": "from os.path import join\nfrom collections import OrderedDict\nfrom generators import ecs_helpers\n\n\ndef generate(ecs_nested, ecs_version, out_dir):\n # Load temporary whitelist for default_fields workaround.\n df_whitelist = ecs_helpers.yaml_load('scripts/generators/beats_default_fields_whitelist.yml')\n\n # base first\n beats_fields = fieldset_field_array(ecs_nested['base']['fields'], df_whitelist, ecs_nested['base']['prefix'])\n\n allowed_fieldset_keys = ['name', 'title', 'group', 'description', 'footnote', 'type']\n # other fieldsets\n for fieldset_name in sorted(ecs_nested):\n if 'base' == fieldset_name:\n continue\n fieldset = ecs_nested[fieldset_name]\n\n # Handle when `root:true`\n if fieldset.get('root', False):\n beats_fields.extend(fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix']))\n continue\n\n beats_field = ecs_helpers.dict_copy_keys_ordered(fieldset, allowed_fieldset_keys)\n beats_field['fields'] = fieldset_field_array(fieldset['fields'], df_whitelist, fieldset['prefix'])\n beats_fields.append(beats_field)\n\n beats_file = OrderedDict()\n beats_file['key'] = 'ecs'\n beats_file['title'] = 'ECS'\n beats_file['description'] = 'ECS Fields.'\n beats_file['fields'] = beats_fields\n\n write_beats_yaml(beats_file, ecs_version, out_dir)\n\n\ndef fieldset_field_array(source_fields, df_whitelist, fieldset_prefix):\n allowed_keys = ['name', 'level', 'required', 'type', 'object_type',\n 'ignore_above', 'multi_fields', 'format', 'input_format',\n 'output_format', 'output_precision', 'description',\n 'example', 'enabled', 'index', 'path', 'scaling_factor']\n multi_fields_allowed_keys = ['name', 'type', 'norms', 'default_field', 'normalizer', 'ignore_above']\n\n fields = []\n for nested_field_name in source_fields:\n ecs_field = source_fields[nested_field_name]\n beats_field = ecs_helpers.dict_copy_keys_ordered(ecs_field, allowed_keys)\n if '' == fieldset_prefix:\n contextual_name = nested_field_name\n else:\n contextual_name = '.'.join(nested_field_name.split('.')[1:])\n\n cleaned_multi_fields = []\n if 'multi_fields' in ecs_field:\n for mf in ecs_field['multi_fields']:\n # Set default_field if necessary. Avoid adding the key if the parent\n # field already is marked with default_field: false.\n if not mf['flat_name'] in df_whitelist and ecs_field['flat_name'] in df_whitelist:\n mf['default_field'] = False\n cleaned_multi_fields.append(\n ecs_helpers.dict_copy_keys_ordered(mf, multi_fields_allowed_keys))\n beats_field['multi_fields'] = cleaned_multi_fields\n\n beats_field['name'] = contextual_name\n\n if not ecs_field['flat_name'] in df_whitelist:\n beats_field['default_field'] = False\n\n fields.append(beats_field)\n return sorted(fields, key=lambda x: x['name'])\n\n# Helpers\n\n\ndef write_beats_yaml(beats_file, ecs_version, out_dir):\n ecs_helpers.make_dirs(join(out_dir, 'beats'))\n warning = file_header().format(version=ecs_version)\n ecs_helpers.yaml_dump(join(out_dir, 'beats/fields.ecs.yml'), [beats_file], preamble=warning)\n\n\n# Templates\n\n\ndef file_header():\n return '''\n# WARNING! Do not edit this file directly, it was generated by the ECS project,\n# based on ECS version {version}.\n# Please visit https://github.com/elastic/ecs to suggest changes to ECS fields.\n\n'''.lstrip()\n", "path": "scripts/generators/beats.py"}]}
| 1,461 | 166 |
gh_patches_debug_34022
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-594
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
engine status util references removed engine.slots attribute
```
Traceback (most recent call last): Less
File "/usr/lib/pymodules/python2.7/scrapy/xlib/pydispatch/robustapply.py", line 54, in robustApply
return receiver(*arguments, **named)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py", line 63, in engine_started
tsk.start(60.0, now=True)
File "/usr/lib/python2.7/dist-packages/twisted/internet/task.py", line 163, in start
self()
File "/usr/lib/python2.7/dist-packages/twisted/internet/task.py", line 208, in __call__
d = defer.maybeDeferred(self.f, *self.a, **self.kw)
--- <exception caught here> ---
File "/usr/lib/python2.7/dist-packages/twisted/internet/defer.py", line 134, in maybeDeferred
result = f(*args, **kw)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py", line 103, in _check_warning
self._send_report(self.notify_mails, subj)
File "/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py", line 116, in _send_report
s += pformat(get_engine_status(self.crawler.engine))
File "/usr/lib/pymodules/python2.7/scrapy/utils/engine.py", line 33, in get_engine_status
for spider in engine.slots.keys():
exceptions.AttributeError: 'ExecutionEngine' object has no attribute 'slots'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/utils/engine.py`
Content:
```
1 """Some debugging functions for working with the Scrapy engine"""
2
3 from __future__ import print_function
4 from time import time # used in global tests code
5
6 def get_engine_status(engine):
7 """Return a report of the current engine status"""
8 global_tests = [
9 "time()-engine.start_time",
10 "engine.has_capacity()",
11 "len(engine.downloader.active)",
12 "engine.scraper.is_idle()",
13 ]
14 spider_tests = [
15 "engine.spider_is_idle(spider)",
16 "engine.slot.closing",
17 "len(engine.slot.inprogress)",
18 "len(engine.slot.scheduler.dqs or [])",
19 "len(engine.slot.scheduler.mqs)",
20 "len(engine.scraper.slot.queue)",
21 "len(engine.scraper.slot.active)",
22 "engine.scraper.slot.active_size",
23 "engine.scraper.slot.itemproc_size",
24 "engine.scraper.slot.needs_backout()",
25 ]
26
27 status = {'global': [], 'spiders': {}}
28 for test in global_tests:
29 try:
30 status['global'] += [(test, eval(test))]
31 except Exception as e:
32 status['global'] += [(test, "%s (exception)" % type(e).__name__)]
33 for spider in engine.slots.keys():
34 x = []
35 for test in spider_tests:
36 try:
37 x += [(test, eval(test))]
38 except Exception as e:
39 x += [(test, "%s (exception)" % type(e).__name__)]
40 status['spiders'][spider] = x
41 return status
42
43 def format_engine_status(engine=None):
44 status = get_engine_status(engine)
45 s = "Execution engine status\n\n"
46 for test, result in status['global']:
47 s += "%-47s : %s\n" % (test, result)
48 s += "\n"
49 for spider, tests in status['spiders'].items():
50 s += "Spider: %s\n" % spider
51 for test, result in tests:
52 s += " %-50s : %s\n" % (test, result)
53 return s
54
55 def print_engine_status(engine):
56 print(format_engine_status(engine))
57
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/utils/engine.py b/scrapy/utils/engine.py
--- a/scrapy/utils/engine.py
+++ b/scrapy/utils/engine.py
@@ -5,14 +5,13 @@
def get_engine_status(engine):
"""Return a report of the current engine status"""
- global_tests = [
+ tests = [
"time()-engine.start_time",
"engine.has_capacity()",
"len(engine.downloader.active)",
"engine.scraper.is_idle()",
- ]
- spider_tests = [
- "engine.spider_is_idle(spider)",
+ "engine.spider.name",
+ "engine.spider_is_idle(engine.spider)",
"engine.slot.closing",
"len(engine.slot.inprogress)",
"len(engine.slot.scheduler.dqs or [])",
@@ -24,34 +23,23 @@
"engine.scraper.slot.needs_backout()",
]
- status = {'global': [], 'spiders': {}}
- for test in global_tests:
+ checks = []
+ for test in tests:
try:
- status['global'] += [(test, eval(test))]
+ checks += [(test, eval(test))]
except Exception as e:
- status['global'] += [(test, "%s (exception)" % type(e).__name__)]
- for spider in engine.slots.keys():
- x = []
- for test in spider_tests:
- try:
- x += [(test, eval(test))]
- except Exception as e:
- x += [(test, "%s (exception)" % type(e).__name__)]
- status['spiders'][spider] = x
- return status
+ checks += [(test, "%s (exception)" % type(e).__name__)]
+
+ return checks
def format_engine_status(engine=None):
- status = get_engine_status(engine)
+ checks = get_engine_status(engine)
s = "Execution engine status\n\n"
- for test, result in status['global']:
+ for test, result in checks:
s += "%-47s : %s\n" % (test, result)
s += "\n"
- for spider, tests in status['spiders'].items():
- s += "Spider: %s\n" % spider
- for test, result in tests:
- s += " %-50s : %s\n" % (test, result)
+
return s
def print_engine_status(engine):
print(format_engine_status(engine))
-
|
{"golden_diff": "diff --git a/scrapy/utils/engine.py b/scrapy/utils/engine.py\n--- a/scrapy/utils/engine.py\n+++ b/scrapy/utils/engine.py\n@@ -5,14 +5,13 @@\n \n def get_engine_status(engine):\n \"\"\"Return a report of the current engine status\"\"\"\n- global_tests = [\n+ tests = [\n \"time()-engine.start_time\",\n \"engine.has_capacity()\",\n \"len(engine.downloader.active)\",\n \"engine.scraper.is_idle()\",\n- ]\n- spider_tests = [\n- \"engine.spider_is_idle(spider)\",\n+ \"engine.spider.name\",\n+ \"engine.spider_is_idle(engine.spider)\",\n \"engine.slot.closing\",\n \"len(engine.slot.inprogress)\",\n \"len(engine.slot.scheduler.dqs or [])\",\n@@ -24,34 +23,23 @@\n \"engine.scraper.slot.needs_backout()\",\n ]\n \n- status = {'global': [], 'spiders': {}}\n- for test in global_tests:\n+ checks = []\n+ for test in tests:\n try:\n- status['global'] += [(test, eval(test))]\n+ checks += [(test, eval(test))]\n except Exception as e:\n- status['global'] += [(test, \"%s (exception)\" % type(e).__name__)]\n- for spider in engine.slots.keys():\n- x = []\n- for test in spider_tests:\n- try:\n- x += [(test, eval(test))]\n- except Exception as e:\n- x += [(test, \"%s (exception)\" % type(e).__name__)]\n- status['spiders'][spider] = x\n- return status\n+ checks += [(test, \"%s (exception)\" % type(e).__name__)]\n+\n+ return checks\n \n def format_engine_status(engine=None):\n- status = get_engine_status(engine)\n+ checks = get_engine_status(engine)\n s = \"Execution engine status\\n\\n\"\n- for test, result in status['global']:\n+ for test, result in checks:\n s += \"%-47s : %s\\n\" % (test, result)\n s += \"\\n\"\n- for spider, tests in status['spiders'].items():\n- s += \"Spider: %s\\n\" % spider\n- for test, result in tests:\n- s += \" %-50s : %s\\n\" % (test, result)\n+\n return s\n \n def print_engine_status(engine):\n print(format_engine_status(engine))\n-\n", "issue": "engine status util references removed engine.slots attribute\n```\nTraceback (most recent call last): Less\n File \"/usr/lib/pymodules/python2.7/scrapy/xlib/pydispatch/robustapply.py\", line 54, in robustApply\n return receiver(*arguments, **named)\n File \"/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py\", line 63, in engine_started\n tsk.start(60.0, now=True)\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/task.py\", line 163, in start\n self()\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/task.py\", line 208, in __call__\n d = defer.maybeDeferred(self.f, *self.a, **self.kw)\n --- <exception caught here> ---\n File \"/usr/lib/python2.7/dist-packages/twisted/internet/defer.py\", line 134, in maybeDeferred\n result = f(*args, **kw)\n File \"/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py\", line 103, in _check_warning\n self._send_report(self.notify_mails, subj)\n File \"/usr/lib/pymodules/python2.7/scrapy/contrib/memusage.py\", line 116, in _send_report\n s += pformat(get_engine_status(self.crawler.engine))\n File \"/usr/lib/pymodules/python2.7/scrapy/utils/engine.py\", line 33, in get_engine_status\n for spider in engine.slots.keys():\n exceptions.AttributeError: 'ExecutionEngine' object has no attribute 'slots'\n```\n\n", "before_files": [{"content": "\"\"\"Some debugging functions for working with the Scrapy engine\"\"\"\n\nfrom __future__ import print_function\nfrom time import time # used in global tests code\n\ndef get_engine_status(engine):\n \"\"\"Return a report of the current engine status\"\"\"\n global_tests = [\n \"time()-engine.start_time\",\n \"engine.has_capacity()\",\n \"len(engine.downloader.active)\",\n \"engine.scraper.is_idle()\",\n ]\n spider_tests = [\n \"engine.spider_is_idle(spider)\",\n \"engine.slot.closing\",\n \"len(engine.slot.inprogress)\",\n \"len(engine.slot.scheduler.dqs or [])\",\n \"len(engine.slot.scheduler.mqs)\",\n \"len(engine.scraper.slot.queue)\",\n \"len(engine.scraper.slot.active)\",\n \"engine.scraper.slot.active_size\",\n \"engine.scraper.slot.itemproc_size\",\n \"engine.scraper.slot.needs_backout()\",\n ]\n\n status = {'global': [], 'spiders': {}}\n for test in global_tests:\n try:\n status['global'] += [(test, eval(test))]\n except Exception as e:\n status['global'] += [(test, \"%s (exception)\" % type(e).__name__)]\n for spider in engine.slots.keys():\n x = []\n for test in spider_tests:\n try:\n x += [(test, eval(test))]\n except Exception as e:\n x += [(test, \"%s (exception)\" % type(e).__name__)]\n status['spiders'][spider] = x\n return status\n\ndef format_engine_status(engine=None):\n status = get_engine_status(engine)\n s = \"Execution engine status\\n\\n\"\n for test, result in status['global']:\n s += \"%-47s : %s\\n\" % (test, result)\n s += \"\\n\"\n for spider, tests in status['spiders'].items():\n s += \"Spider: %s\\n\" % spider\n for test, result in tests:\n s += \" %-50s : %s\\n\" % (test, result)\n return s\n\ndef print_engine_status(engine):\n print(format_engine_status(engine))\n\n", "path": "scrapy/utils/engine.py"}], "after_files": [{"content": "\"\"\"Some debugging functions for working with the Scrapy engine\"\"\"\n\nfrom __future__ import print_function\nfrom time import time # used in global tests code\n\ndef get_engine_status(engine):\n \"\"\"Return a report of the current engine status\"\"\"\n tests = [\n \"time()-engine.start_time\",\n \"engine.has_capacity()\",\n \"len(engine.downloader.active)\",\n \"engine.scraper.is_idle()\",\n \"engine.spider.name\",\n \"engine.spider_is_idle(engine.spider)\",\n \"engine.slot.closing\",\n \"len(engine.slot.inprogress)\",\n \"len(engine.slot.scheduler.dqs or [])\",\n \"len(engine.slot.scheduler.mqs)\",\n \"len(engine.scraper.slot.queue)\",\n \"len(engine.scraper.slot.active)\",\n \"engine.scraper.slot.active_size\",\n \"engine.scraper.slot.itemproc_size\",\n \"engine.scraper.slot.needs_backout()\",\n ]\n\n checks = []\n for test in tests:\n try:\n checks += [(test, eval(test))]\n except Exception as e:\n checks += [(test, \"%s (exception)\" % type(e).__name__)]\n\n return checks\n\ndef format_engine_status(engine=None):\n checks = get_engine_status(engine)\n s = \"Execution engine status\\n\\n\"\n for test, result in checks:\n s += \"%-47s : %s\\n\" % (test, result)\n s += \"\\n\"\n\n return s\n\ndef print_engine_status(engine):\n print(format_engine_status(engine))\n", "path": "scrapy/utils/engine.py"}]}
| 1,208 | 551 |
gh_patches_debug_26859
|
rasdani/github-patches
|
git_diff
|
SeldonIO__MLServer-850
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MLServer to hide http health request logs to avoid polluting the logs
As part of the Seldon Core addition https://github.com/SeldonIO/seldon-core/pull/4028 which moves the TCP ready checks into proper HTTP request ready checks to `v2/health/ready` there is now a lot of noise from the readiness checks every 5 seconds. We should explore ways in which we avoid this noise, perhaps making it completely silent by default, or eventually once the prometheus server is created on a separate server this could also be added (And both of them could be muted)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/rest/server.py`
Content:
```
1 import uvicorn
2
3 from ..settings import Settings
4 from ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers
5 from ..model import MLModel
6
7 from .utils import matches
8 from .app import create_app
9 from .logging import logger
10 from typing import Optional
11
12
13 class _NoSignalServer(uvicorn.Server):
14 def install_signal_handlers(self):
15 pass
16
17
18 class RESTServer:
19 def __init__(
20 self,
21 settings: Settings,
22 data_plane: DataPlane,
23 model_repository_handlers: ModelRepositoryHandlers,
24 ):
25 self._settings = settings
26 self._data_plane = data_plane
27 self._model_repository_handlers = model_repository_handlers
28 self._app = create_app(
29 self._settings,
30 data_plane=self._data_plane,
31 model_repository_handlers=self._model_repository_handlers,
32 )
33
34 async def add_custom_handlers(self, model: MLModel) -> MLModel:
35 handlers = get_custom_handlers(model)
36 for custom_handler, handler_method in handlers:
37 self._app.add_api_route(
38 custom_handler.rest_path,
39 handler_method,
40 methods=[custom_handler.rest_method],
41 )
42
43 return model
44
45 async def delete_custom_handlers(self, model: MLModel) -> MLModel:
46 handlers = get_custom_handlers(model)
47 if len(handlers) == 0:
48 return model
49
50 # NOTE: Loop in reverse, so that it's quicker to find all the recently
51 # added routes and we can remove routes on-the-fly
52 for i, route in reversed(list(enumerate(self._app.routes))):
53 for j, (custom_handler, handler_method) in enumerate(handlers):
54 if matches(route, custom_handler, handler_method): # type: ignore
55 self._app.routes.pop(i)
56 handlers.pop(j)
57
58 return model
59
60 async def start(self):
61 cfg = self._get_config()
62 self._server = _NoSignalServer(cfg)
63 await self._server.serve()
64
65 def _get_config(self):
66 kwargs = {}
67
68 if self._settings._custom_rest_server_settings:
69 logger.warning(
70 "REST custom configuration is out of support. Use as your own risk"
71 )
72 kwargs.update(self._settings._custom_rest_server_settings)
73
74 kwargs.update(
75 {
76 "host": self._settings.host,
77 "port": self._settings.http_port,
78 "root_path": self._settings.root_path,
79 "access_log": self._settings.debug,
80 }
81 )
82
83 if self._settings.logging_settings:
84 # If not None, use ours. Otherwise, let Uvicorn fall back on its
85 # own config.
86 kwargs.update({"log_config": self._settings.logging_settings})
87
88 return uvicorn.Config(self._app, **kwargs)
89
90 async def stop(self, sig: Optional[int] = None):
91 self._server.handle_exit(sig=sig, frame=None)
92
```
Path: `mlserver/rest/logging.py`
Content:
```
1 import logging
2
3 loggerName = "mlserver.rest"
4 logger = logging.getLogger(loggerName)
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlserver/rest/logging.py b/mlserver/rest/logging.py
--- a/mlserver/rest/logging.py
+++ b/mlserver/rest/logging.py
@@ -1,4 +1,35 @@
import logging
+
+class HealthEndpointFilter(logging.Filter):
+ """
+ Filter to avoid logging health endpoints.
+ From:
+ https://github.com/encode/starlette/issues/864#issuecomment-653076434
+ """
+
+ def filter(self, record: logging.LogRecord) -> bool:
+ if not isinstance(record.args, tuple):
+ return True
+
+ if len(record.args) < 3:
+ return True
+
+ request_method = record.args[1]
+ query_string = record.args[2]
+ if request_method != "GET":
+ return True
+
+ if query_string in ["/v2/health/live", "/v2/health/ready"]:
+ return False
+
+ return True
+
+
+def disable_health_access_logs() -> None:
+ uvicorn_logger = logging.getLogger("uvicorn.access")
+ uvicorn_logger.addFilter(HealthEndpointFilter())
+
+
loggerName = "mlserver.rest"
logger = logging.getLogger(loggerName)
diff --git a/mlserver/rest/server.py b/mlserver/rest/server.py
--- a/mlserver/rest/server.py
+++ b/mlserver/rest/server.py
@@ -6,7 +6,7 @@
from .utils import matches
from .app import create_app
-from .logging import logger
+from .logging import logger, disable_health_access_logs
from typing import Optional
@@ -60,6 +60,9 @@
async def start(self):
cfg = self._get_config()
self._server = _NoSignalServer(cfg)
+ if not self._settings.debug:
+ disable_health_access_logs()
+
await self._server.serve()
def _get_config(self):
|
{"golden_diff": "diff --git a/mlserver/rest/logging.py b/mlserver/rest/logging.py\n--- a/mlserver/rest/logging.py\n+++ b/mlserver/rest/logging.py\n@@ -1,4 +1,35 @@\n import logging\n \n+\n+class HealthEndpointFilter(logging.Filter):\n+ \"\"\"\n+ Filter to avoid logging health endpoints.\n+ From:\n+ https://github.com/encode/starlette/issues/864#issuecomment-653076434\n+ \"\"\"\n+\n+ def filter(self, record: logging.LogRecord) -> bool:\n+ if not isinstance(record.args, tuple):\n+ return True\n+\n+ if len(record.args) < 3:\n+ return True\n+\n+ request_method = record.args[1]\n+ query_string = record.args[2]\n+ if request_method != \"GET\":\n+ return True\n+\n+ if query_string in [\"/v2/health/live\", \"/v2/health/ready\"]:\n+ return False\n+\n+ return True\n+\n+\n+def disable_health_access_logs() -> None:\n+ uvicorn_logger = logging.getLogger(\"uvicorn.access\")\n+ uvicorn_logger.addFilter(HealthEndpointFilter())\n+\n+\n loggerName = \"mlserver.rest\"\n logger = logging.getLogger(loggerName)\ndiff --git a/mlserver/rest/server.py b/mlserver/rest/server.py\n--- a/mlserver/rest/server.py\n+++ b/mlserver/rest/server.py\n@@ -6,7 +6,7 @@\n \n from .utils import matches\n from .app import create_app\n-from .logging import logger\n+from .logging import logger, disable_health_access_logs\n from typing import Optional\n \n \n@@ -60,6 +60,9 @@\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n+ if not self._settings.debug:\n+ disable_health_access_logs()\n+\n await self._server.serve()\n \n def _get_config(self):\n", "issue": "MLServer to hide http health request logs to avoid polluting the logs\nAs part of the Seldon Core addition https://github.com/SeldonIO/seldon-core/pull/4028 which moves the TCP ready checks into proper HTTP request ready checks to `v2/health/ready` there is now a lot of noise from the readiness checks every 5 seconds. We should explore ways in which we avoid this noise, perhaps making it completely silent by default, or eventually once the prometheus server is created on a separate server this could also be added (And both of them could be muted)\r\n\r\n\r\n\n", "before_files": [{"content": "import uvicorn\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers\nfrom ..model import MLModel\n\nfrom .utils import matches\nfrom .app import create_app\nfrom .logging import logger\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass RESTServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n self._app = create_app(\n self._settings,\n data_plane=self._data_plane,\n model_repository_handlers=self._model_repository_handlers,\n )\n\n async def add_custom_handlers(self, model: MLModel) -> MLModel:\n handlers = get_custom_handlers(model)\n for custom_handler, handler_method in handlers:\n self._app.add_api_route(\n custom_handler.rest_path,\n handler_method,\n methods=[custom_handler.rest_method],\n )\n\n return model\n\n async def delete_custom_handlers(self, model: MLModel) -> MLModel:\n handlers = get_custom_handlers(model)\n if len(handlers) == 0:\n return model\n\n # NOTE: Loop in reverse, so that it's quicker to find all the recently\n # added routes and we can remove routes on-the-fly\n for i, route in reversed(list(enumerate(self._app.routes))):\n for j, (custom_handler, handler_method) in enumerate(handlers):\n if matches(route, custom_handler, handler_method): # type: ignore\n self._app.routes.pop(i)\n handlers.pop(j)\n\n return model\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_rest_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_rest_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.http_port,\n \"root_path\": self._settings.root_path,\n \"access_log\": self._settings.debug,\n }\n )\n\n if self._settings.logging_settings:\n # If not None, use ours. Otherwise, let Uvicorn fall back on its\n # own config.\n kwargs.update({\"log_config\": self._settings.logging_settings})\n\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/rest/server.py"}, {"content": "import logging\n\nloggerName = \"mlserver.rest\"\nlogger = logging.getLogger(loggerName)\n", "path": "mlserver/rest/logging.py"}], "after_files": [{"content": "import uvicorn\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers\nfrom ..model import MLModel\n\nfrom .utils import matches\nfrom .app import create_app\nfrom .logging import logger, disable_health_access_logs\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass RESTServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n self._app = create_app(\n self._settings,\n data_plane=self._data_plane,\n model_repository_handlers=self._model_repository_handlers,\n )\n\n async def add_custom_handlers(self, model: MLModel) -> MLModel:\n handlers = get_custom_handlers(model)\n for custom_handler, handler_method in handlers:\n self._app.add_api_route(\n custom_handler.rest_path,\n handler_method,\n methods=[custom_handler.rest_method],\n )\n\n return model\n\n async def delete_custom_handlers(self, model: MLModel) -> MLModel:\n handlers = get_custom_handlers(model)\n if len(handlers) == 0:\n return model\n\n # NOTE: Loop in reverse, so that it's quicker to find all the recently\n # added routes and we can remove routes on-the-fly\n for i, route in reversed(list(enumerate(self._app.routes))):\n for j, (custom_handler, handler_method) in enumerate(handlers):\n if matches(route, custom_handler, handler_method): # type: ignore\n self._app.routes.pop(i)\n handlers.pop(j)\n\n return model\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n if not self._settings.debug:\n disable_health_access_logs()\n\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_rest_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_rest_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.http_port,\n \"root_path\": self._settings.root_path,\n \"access_log\": self._settings.debug,\n }\n )\n\n if self._settings.logging_settings:\n # If not None, use ours. Otherwise, let Uvicorn fall back on its\n # own config.\n kwargs.update({\"log_config\": self._settings.logging_settings})\n\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/rest/server.py"}, {"content": "import logging\n\n\nclass HealthEndpointFilter(logging.Filter):\n \"\"\"\n Filter to avoid logging health endpoints.\n From:\n https://github.com/encode/starlette/issues/864#issuecomment-653076434\n \"\"\"\n\n def filter(self, record: logging.LogRecord) -> bool:\n if not isinstance(record.args, tuple):\n return True\n\n if len(record.args) < 3:\n return True\n\n request_method = record.args[1]\n query_string = record.args[2]\n if request_method != \"GET\":\n return True\n\n if query_string in [\"/v2/health/live\", \"/v2/health/ready\"]:\n return False\n\n return True\n\n\ndef disable_health_access_logs() -> None:\n uvicorn_logger = logging.getLogger(\"uvicorn.access\")\n uvicorn_logger.addFilter(HealthEndpointFilter())\n\n\nloggerName = \"mlserver.rest\"\nlogger = logging.getLogger(loggerName)\n", "path": "mlserver/rest/logging.py"}]}
| 1,279 | 424 |
gh_patches_debug_23720
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-22870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
lstsq
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/numpy/linalg.py`
Content:
```
1 # local
2 import ivy
3 from ivy.functional.frontends.jax import Array
4 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
5 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
6 from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs
7
8
9 @to_ivy_arrays_and_back
10 def cholesky(a):
11 return ivy.cholesky(a)
12
13
14 @to_ivy_arrays_and_back
15 def cond(x, p=None):
16 return ivy.cond(x, p=p)
17
18
19 @to_ivy_arrays_and_back
20 def det(a):
21 return ivy.det(a)
22
23
24 @to_ivy_arrays_and_back
25 def eig(a):
26 return ivy.eig(a)
27
28
29 @to_ivy_arrays_and_back
30 def eigh(a, UPLO="L", symmetrize_input=True):
31 def symmetrize(x):
32 # TODO : Take Hermitian transpose after complex numbers added
33 return (x + ivy.swapaxes(x, -1, -2)) / 2
34
35 if symmetrize_input:
36 a = symmetrize(a)
37
38 return ivy.eigh(a, UPLO=UPLO)
39
40
41 @to_ivy_arrays_and_back
42 def eigvals(a):
43 return ivy.eigvals(a)
44
45
46 @to_ivy_arrays_and_back
47 def eigvalsh(a, UPLO="L"):
48 return ivy.eigvalsh(a, UPLO=UPLO)
49
50
51 @to_ivy_arrays_and_back
52 def inv(a):
53 return ivy.inv(a)
54
55
56 @to_ivy_arrays_and_back
57 def matrix_power(a, n):
58 return ivy.matrix_power(a, n)
59
60
61 @to_ivy_arrays_and_back
62 def matrix_rank(M, tol=None):
63 return ivy.matrix_rank(M, atol=tol)
64
65
66 @to_ivy_arrays_and_back
67 def multi_dot(arrays, *, precision=None):
68 return ivy.multi_dot(arrays)
69
70
71 @to_ivy_arrays_and_back
72 @with_supported_dtypes(
73 {"0.4.14 and below": ("float32", "float64")},
74 "jax",
75 )
76 def norm(x, ord=None, axis=None, keepdims=False):
77 if ord is None:
78 ord = 2
79 if type(axis) in [list, tuple] and len(axis) == 2:
80 return Array(ivy.matrix_norm(x, ord=ord, axis=axis, keepdims=keepdims))
81 return Array(ivy.vector_norm(x, ord=ord, axis=axis, keepdims=keepdims))
82
83
84 @to_ivy_arrays_and_back
85 def pinv(a, rcond=None):
86 return ivy.pinv(a, rtol=rcond)
87
88
89 @to_ivy_arrays_and_back
90 def qr(a, mode="reduced"):
91 return ivy.qr(a, mode=mode)
92
93
94 @to_ivy_arrays_and_back
95 def slogdet(a, method=None):
96 return ivy.slogdet(a)
97
98
99 @to_ivy_arrays_and_back
100 def solve(a, b):
101 return ivy.solve(a, b)
102
103
104 @to_ivy_arrays_and_back
105 def svd(a, /, *, full_matrices=True, compute_uv=True, hermitian=None):
106 if not compute_uv:
107 return ivy.svdvals(a)
108 return ivy.svd(a, full_matrices=full_matrices)
109
110
111 @to_ivy_arrays_and_back
112 @with_unsupported_dtypes({"0.4.14 and below": ("float16", "bfloat16")}, "jax")
113 def tensorinv(a, ind=2):
114 old_shape = ivy.shape(a)
115 prod = 1
116 if ind > 0:
117 invshape = old_shape[ind:] + old_shape[:ind]
118 for k in old_shape[ind:]:
119 prod *= k
120 else:
121 raise ValueError("Invalid ind argument.")
122 a = ivy.reshape(a, shape=(prod, -1))
123 ia = ivy.inv(a)
124 new_shape = tuple([*invshape])
125 return Array(ivy.reshape(ia, shape=new_shape))
126
127
128 @to_ivy_arrays_and_back
129 def tensorsolve(a, b, axes=None):
130 a, b = promote_types_of_jax_inputs(a, b)
131 return ivy.tensorsolve(a, b, axes=axes)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/jax/numpy/linalg.py b/ivy/functional/frontends/jax/numpy/linalg.py
--- a/ivy/functional/frontends/jax/numpy/linalg.py
+++ b/ivy/functional/frontends/jax/numpy/linalg.py
@@ -4,6 +4,7 @@
from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back
from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs
+from ivy.functional.frontends.numpy.linalg import lstsq as numpy_lstsq
@to_ivy_arrays_and_back
@@ -53,6 +54,23 @@
return ivy.inv(a)
+# TODO: replace this with function from API
+# As the composition provides numerically unstable results
+@to_ivy_arrays_and_back
+def lstsq(a, b, rcond=None, *, numpy_resid=False):
+ if numpy_resid:
+ return numpy_lstsq(a, b, rcond=rcond)
+ least_squares_solution = ivy.matmul(
+ ivy.pinv(a, rtol=1e-15).astype(ivy.float64), b.astype(ivy.float64)
+ )
+ residuals = ivy.sum((b - ivy.matmul(a, least_squares_solution)) ** 2).astype(
+ ivy.float64
+ )
+ svd_values = ivy.svd(a, compute_uv=False)
+ rank = ivy.matrix_rank(a).astype(ivy.int32)
+ return (least_squares_solution, residuals, rank, svd_values[0])
+
+
@to_ivy_arrays_and_back
def matrix_power(a, n):
return ivy.matrix_power(a, n)
|
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/numpy/linalg.py b/ivy/functional/frontends/jax/numpy/linalg.py\n--- a/ivy/functional/frontends/jax/numpy/linalg.py\n+++ b/ivy/functional/frontends/jax/numpy/linalg.py\n@@ -4,6 +4,7 @@\n from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\n from ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs\n+from ivy.functional.frontends.numpy.linalg import lstsq as numpy_lstsq\n \n \n @to_ivy_arrays_and_back\n@@ -53,6 +54,23 @@\n return ivy.inv(a)\n \n \n+# TODO: replace this with function from API\n+# As the composition provides numerically unstable results\n+@to_ivy_arrays_and_back\n+def lstsq(a, b, rcond=None, *, numpy_resid=False):\n+ if numpy_resid:\n+ return numpy_lstsq(a, b, rcond=rcond)\n+ least_squares_solution = ivy.matmul(\n+ ivy.pinv(a, rtol=1e-15).astype(ivy.float64), b.astype(ivy.float64)\n+ )\n+ residuals = ivy.sum((b - ivy.matmul(a, least_squares_solution)) ** 2).astype(\n+ ivy.float64\n+ )\n+ svd_values = ivy.svd(a, compute_uv=False)\n+ rank = ivy.matrix_rank(a).astype(ivy.int32)\n+ return (least_squares_solution, residuals, rank, svd_values[0])\n+\n+\n @to_ivy_arrays_and_back\n def matrix_power(a, n):\n return ivy.matrix_power(a, n)\n", "issue": "lstsq\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax import Array\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs\n\n\n@to_ivy_arrays_and_back\ndef cholesky(a):\n return ivy.cholesky(a)\n\n\n@to_ivy_arrays_and_back\ndef cond(x, p=None):\n return ivy.cond(x, p=p)\n\n\n@to_ivy_arrays_and_back\ndef det(a):\n return ivy.det(a)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\ndef eigh(a, UPLO=\"L\", symmetrize_input=True):\n def symmetrize(x):\n # TODO : Take Hermitian transpose after complex numbers added\n return (x + ivy.swapaxes(x, -1, -2)) / 2\n\n if symmetrize_input:\n a = symmetrize(a)\n\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eigvals(a):\n return ivy.eigvals(a)\n\n\n@to_ivy_arrays_and_back\ndef eigvalsh(a, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef inv(a):\n return ivy.inv(a)\n\n\n@to_ivy_arrays_and_back\ndef matrix_power(a, n):\n return ivy.matrix_power(a, n)\n\n\n@to_ivy_arrays_and_back\ndef matrix_rank(M, tol=None):\n return ivy.matrix_rank(M, atol=tol)\n\n\n@to_ivy_arrays_and_back\ndef multi_dot(arrays, *, precision=None):\n return ivy.multi_dot(arrays)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes(\n {\"0.4.14 and below\": (\"float32\", \"float64\")},\n \"jax\",\n)\ndef norm(x, ord=None, axis=None, keepdims=False):\n if ord is None:\n ord = 2\n if type(axis) in [list, tuple] and len(axis) == 2:\n return Array(ivy.matrix_norm(x, ord=ord, axis=axis, keepdims=keepdims))\n return Array(ivy.vector_norm(x, ord=ord, axis=axis, keepdims=keepdims))\n\n\n@to_ivy_arrays_and_back\ndef pinv(a, rcond=None):\n return ivy.pinv(a, rtol=rcond)\n\n\n@to_ivy_arrays_and_back\ndef qr(a, mode=\"reduced\"):\n return ivy.qr(a, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef slogdet(a, method=None):\n return ivy.slogdet(a)\n\n\n@to_ivy_arrays_and_back\ndef solve(a, b):\n return ivy.solve(a, b)\n\n\n@to_ivy_arrays_and_back\ndef svd(a, /, *, full_matrices=True, compute_uv=True, hermitian=None):\n if not compute_uv:\n return ivy.svdvals(a)\n return ivy.svd(a, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"0.4.14 and below\": (\"float16\", \"bfloat16\")}, \"jax\")\ndef tensorinv(a, ind=2):\n old_shape = ivy.shape(a)\n prod = 1\n if ind > 0:\n invshape = old_shape[ind:] + old_shape[:ind]\n for k in old_shape[ind:]:\n prod *= k\n else:\n raise ValueError(\"Invalid ind argument.\")\n a = ivy.reshape(a, shape=(prod, -1))\n ia = ivy.inv(a)\n new_shape = tuple([*invshape])\n return Array(ivy.reshape(ia, shape=new_shape))\n\n\n@to_ivy_arrays_and_back\ndef tensorsolve(a, b, axes=None):\n a, b = promote_types_of_jax_inputs(a, b)\n return ivy.tensorsolve(a, b, axes=axes)\n", "path": "ivy/functional/frontends/jax/numpy/linalg.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.jax import Array\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.jax.numpy import promote_types_of_jax_inputs\nfrom ivy.functional.frontends.numpy.linalg import lstsq as numpy_lstsq\n\n\n@to_ivy_arrays_and_back\ndef cholesky(a):\n return ivy.cholesky(a)\n\n\n@to_ivy_arrays_and_back\ndef cond(x, p=None):\n return ivy.cond(x, p=p)\n\n\n@to_ivy_arrays_and_back\ndef det(a):\n return ivy.det(a)\n\n\n@to_ivy_arrays_and_back\ndef eig(a):\n return ivy.eig(a)\n\n\n@to_ivy_arrays_and_back\ndef eigh(a, UPLO=\"L\", symmetrize_input=True):\n def symmetrize(x):\n # TODO : Take Hermitian transpose after complex numbers added\n return (x + ivy.swapaxes(x, -1, -2)) / 2\n\n if symmetrize_input:\n a = symmetrize(a)\n\n return ivy.eigh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef eigvals(a):\n return ivy.eigvals(a)\n\n\n@to_ivy_arrays_and_back\ndef eigvalsh(a, UPLO=\"L\"):\n return ivy.eigvalsh(a, UPLO=UPLO)\n\n\n@to_ivy_arrays_and_back\ndef inv(a):\n return ivy.inv(a)\n\n\n# TODO: replace this with function from API\n# As the composition provides numerically unstable results\n@to_ivy_arrays_and_back\ndef lstsq(a, b, rcond=None, *, numpy_resid=False):\n if numpy_resid:\n return numpy_lstsq(a, b, rcond=rcond)\n least_squares_solution = ivy.matmul(\n ivy.pinv(a, rtol=1e-15).astype(ivy.float64), b.astype(ivy.float64)\n )\n residuals = ivy.sum((b - ivy.matmul(a, least_squares_solution)) ** 2).astype(\n ivy.float64\n )\n svd_values = ivy.svd(a, compute_uv=False)\n rank = ivy.matrix_rank(a).astype(ivy.int32)\n return (least_squares_solution, residuals, rank, svd_values[0])\n\n\n@to_ivy_arrays_and_back\ndef matrix_power(a, n):\n return ivy.matrix_power(a, n)\n\n\n@to_ivy_arrays_and_back\ndef matrix_rank(M, tol=None):\n return ivy.matrix_rank(M, atol=tol)\n\n\n@to_ivy_arrays_and_back\ndef multi_dot(arrays, *, precision=None):\n return ivy.multi_dot(arrays)\n\n\n@to_ivy_arrays_and_back\n@with_supported_dtypes(\n {\"0.4.14 and below\": (\"float32\", \"float64\")},\n \"jax\",\n)\ndef norm(x, ord=None, axis=None, keepdims=False):\n if ord is None:\n ord = 2\n if type(axis) in [list, tuple] and len(axis) == 2:\n return Array(ivy.matrix_norm(x, ord=ord, axis=axis, keepdims=keepdims))\n return Array(ivy.vector_norm(x, ord=ord, axis=axis, keepdims=keepdims))\n\n\n@to_ivy_arrays_and_back\ndef pinv(a, rcond=None):\n return ivy.pinv(a, rtol=rcond)\n\n\n@to_ivy_arrays_and_back\ndef qr(a, mode=\"reduced\"):\n return ivy.qr(a, mode=mode)\n\n\n@to_ivy_arrays_and_back\ndef slogdet(a, method=None):\n return ivy.slogdet(a)\n\n\n@to_ivy_arrays_and_back\ndef solve(a, b):\n return ivy.solve(a, b)\n\n\n@to_ivy_arrays_and_back\ndef svd(a, /, *, full_matrices=True, compute_uv=True, hermitian=None):\n if not compute_uv:\n return ivy.svdvals(a)\n return ivy.svd(a, full_matrices=full_matrices)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"0.4.14 and below\": (\"float16\", \"bfloat16\")}, \"jax\")\ndef tensorinv(a, ind=2):\n old_shape = ivy.shape(a)\n prod = 1\n if ind > 0:\n invshape = old_shape[ind:] + old_shape[:ind]\n for k in old_shape[ind:]:\n prod *= k\n else:\n raise ValueError(\"Invalid ind argument.\")\n a = ivy.reshape(a, shape=(prod, -1))\n ia = ivy.inv(a)\n new_shape = tuple([*invshape])\n return Array(ivy.reshape(ia, shape=new_shape))\n\n\n@to_ivy_arrays_and_back\ndef tensorsolve(a, b, axes=None):\n a, b = promote_types_of_jax_inputs(a, b)\n return ivy.tensorsolve(a, b, axes=axes)\n", "path": "ivy/functional/frontends/jax/numpy/linalg.py"}]}
| 1,508 | 404 |
gh_patches_debug_3684
|
rasdani/github-patches
|
git_diff
|
napari__napari-5474
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Visual Bug: Labels Layer Controls get squished when toggling 3D
## 🐛 Bug
When toggling to 3D, the labels layer Layer Controls widget gains an extra line `rendering`.
However the widget doesn't resize for this, so it results in a visual bug of everything squished and partially cut off:
<img width="267" alt="image" src="https://user-images.githubusercontent.com/76622105/212083289-a7333963-f66a-4875-bd11-e49965ef7a77.png">
If you manually expand the widget, it will look fine. However, in contrast to the 2D version of the widget, it will let you resize it vertically to be too small, squishing the contents again.
## To Reproduce
Steps to reproduce the behavior:
1. open napari
2. make a labels layer (can be empty)
3. toggle 3D
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
## Expected behavior
The widget should either resize to permit the extra line item or start out sufficiently large that when the line item is added the visual isn't squished.
## Environment
macOS 13.1, pyqt5, 0.4.17
## Additional context
<!-- Add any other context about the problem here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_qt/layer_controls/qt_layer_controls_container.py`
Content:
```
1 from qtpy.QtWidgets import QFrame, QStackedWidget
2
3 from napari._qt.layer_controls.qt_image_controls import QtImageControls
4 from napari._qt.layer_controls.qt_labels_controls import QtLabelsControls
5 from napari._qt.layer_controls.qt_points_controls import QtPointsControls
6 from napari._qt.layer_controls.qt_shapes_controls import QtShapesControls
7 from napari._qt.layer_controls.qt_surface_controls import QtSurfaceControls
8 from napari._qt.layer_controls.qt_tracks_controls import QtTracksControls
9 from napari._qt.layer_controls.qt_vectors_controls import QtVectorsControls
10 from napari.layers import (
11 Image,
12 Labels,
13 Points,
14 Shapes,
15 Surface,
16 Tracks,
17 Vectors,
18 )
19 from napari.utils import config
20 from napari.utils.translations import trans
21
22 layer_to_controls = {
23 Labels: QtLabelsControls,
24 Image: QtImageControls,
25 Points: QtPointsControls,
26 Shapes: QtShapesControls,
27 Surface: QtSurfaceControls,
28 Vectors: QtVectorsControls,
29 Tracks: QtTracksControls,
30 }
31
32 if config.async_loading:
33 from napari.layers.image.experimental.octree_image import _OctreeImageBase
34
35 # The user visible layer controls for OctreeImage layers are identical
36 # to the regular image layer controls, for now.
37 layer_to_controls[_OctreeImageBase] = QtImageControls
38
39
40 def create_qt_layer_controls(layer):
41 """
42 Create a qt controls widget for a layer based on its layer type.
43
44 In case of a subclass, the type higher in the layer's method resolution
45 order will be used.
46
47 Parameters
48 ----------
49 layer : napari.layers._base_layer.Layer
50 Layer that needs its controls widget created.
51
52 Returns
53 -------
54 controls : napari.layers.base.QtLayerControls
55 Qt controls widget
56 """
57 candidates = []
58 for layer_type in layer_to_controls:
59 if isinstance(layer, layer_type):
60 candidates.append(layer_type)
61
62 if not candidates:
63 raise TypeError(
64 trans._(
65 'Could not find QtControls for layer of type {type_}',
66 deferred=True,
67 type_=type(layer),
68 )
69 )
70
71 layer_cls = layer.__class__
72 # Sort the list of candidates by 'lineage'
73 candidates.sort(key=lambda layer_type: layer_cls.mro().index(layer_type))
74 controls = layer_to_controls[candidates[0]]
75 return controls(layer)
76
77
78 class QtLayerControlsContainer(QStackedWidget):
79 """Container widget for QtLayerControl widgets.
80
81 Parameters
82 ----------
83 viewer : napari.components.ViewerModel
84 Napari viewer containing the rendered scene, layers, and controls.
85
86 Attributes
87 ----------
88 empty_widget : qtpy.QtWidgets.QFrame
89 Empty placeholder frame for when no layer is selected.
90 viewer : napari.components.ViewerModel
91 Napari viewer containing the rendered scene, layers, and controls.
92 widgets : dict
93 Dictionary of key value pairs matching layer with its widget controls.
94 widgets[layer] = controls
95 """
96
97 def __init__(self, viewer):
98 super().__init__()
99 self.setProperty("emphasized", True)
100 self.viewer = viewer
101
102 self.setMouseTracking(True)
103 self.empty_widget = QFrame()
104 self.widgets = {}
105 self.addWidget(self.empty_widget)
106 self.setCurrentWidget(self.empty_widget)
107
108 self.viewer.layers.events.inserted.connect(self._add)
109 self.viewer.layers.events.removed.connect(self._remove)
110 viewer.layers.selection.events.active.connect(self._display)
111
112 def _display(self, event):
113 """Change the displayed controls to be those of the target layer.
114
115 Parameters
116 ----------
117 event : Event
118 Event with the target layer at `event.item`.
119 """
120 layer = event.value
121 if layer is None:
122 self.setCurrentWidget(self.empty_widget)
123 else:
124 controls = self.widgets[layer]
125 self.setCurrentWidget(controls)
126
127 def _add(self, event):
128 """Add the controls target layer to the list of control widgets.
129
130 Parameters
131 ----------
132 event : Event
133 Event with the target layer at `event.value`.
134 """
135 layer = event.value
136 controls = create_qt_layer_controls(layer)
137 self.addWidget(controls)
138 self.widgets[layer] = controls
139
140 def _remove(self, event):
141 """Remove the controls target layer from the list of control widgets.
142
143 Parameters
144 ----------
145 event : Event
146 Event with the target layer at `event.value`.
147 """
148 layer = event.value
149 controls = self.widgets[layer]
150 self.removeWidget(controls)
151 # controls.close()
152 controls.hide()
153 controls.deleteLater()
154 controls = None
155 del self.widgets[layer]
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/napari/_qt/layer_controls/qt_layer_controls_container.py b/napari/_qt/layer_controls/qt_layer_controls_container.py
--- a/napari/_qt/layer_controls/qt_layer_controls_container.py
+++ b/napari/_qt/layer_controls/qt_layer_controls_container.py
@@ -101,6 +101,7 @@
self.setMouseTracking(True)
self.empty_widget = QFrame()
+ self.empty_widget.setObjectName("empty_controls_widget")
self.widgets = {}
self.addWidget(self.empty_widget)
self.setCurrentWidget(self.empty_widget)
|
{"golden_diff": "diff --git a/napari/_qt/layer_controls/qt_layer_controls_container.py b/napari/_qt/layer_controls/qt_layer_controls_container.py\n--- a/napari/_qt/layer_controls/qt_layer_controls_container.py\n+++ b/napari/_qt/layer_controls/qt_layer_controls_container.py\n@@ -101,6 +101,7 @@\n \n self.setMouseTracking(True)\n self.empty_widget = QFrame()\n+ self.empty_widget.setObjectName(\"empty_controls_widget\")\n self.widgets = {}\n self.addWidget(self.empty_widget)\n self.setCurrentWidget(self.empty_widget)\n", "issue": "Visual Bug: Labels Layer Controls get squished when toggling 3D\n## \ud83d\udc1b Bug\r\n\r\nWhen toggling to 3D, the labels layer Layer Controls widget gains an extra line `rendering`.\r\nHowever the widget doesn't resize for this, so it results in a visual bug of everything squished and partially cut off:\r\n<img width=\"267\" alt=\"image\" src=\"https://user-images.githubusercontent.com/76622105/212083289-a7333963-f66a-4875-bd11-e49965ef7a77.png\">\r\n\r\nIf you manually expand the widget, it will look fine. However, in contrast to the 2D version of the widget, it will let you resize it vertically to be too small, squishing the contents again.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. open napari\r\n2. make a labels layer (can be empty)\r\n3. toggle 3D\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n## Expected behavior\r\n\r\nThe widget should either resize to permit the extra line item or start out sufficiently large that when the line item is added the visual isn't squished.\r\n\r\n## Environment\r\n\r\nmacOS 13.1, pyqt5, 0.4.17\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "from qtpy.QtWidgets import QFrame, QStackedWidget\n\nfrom napari._qt.layer_controls.qt_image_controls import QtImageControls\nfrom napari._qt.layer_controls.qt_labels_controls import QtLabelsControls\nfrom napari._qt.layer_controls.qt_points_controls import QtPointsControls\nfrom napari._qt.layer_controls.qt_shapes_controls import QtShapesControls\nfrom napari._qt.layer_controls.qt_surface_controls import QtSurfaceControls\nfrom napari._qt.layer_controls.qt_tracks_controls import QtTracksControls\nfrom napari._qt.layer_controls.qt_vectors_controls import QtVectorsControls\nfrom napari.layers import (\n Image,\n Labels,\n Points,\n Shapes,\n Surface,\n Tracks,\n Vectors,\n)\nfrom napari.utils import config\nfrom napari.utils.translations import trans\n\nlayer_to_controls = {\n Labels: QtLabelsControls,\n Image: QtImageControls,\n Points: QtPointsControls,\n Shapes: QtShapesControls,\n Surface: QtSurfaceControls,\n Vectors: QtVectorsControls,\n Tracks: QtTracksControls,\n}\n\nif config.async_loading:\n from napari.layers.image.experimental.octree_image import _OctreeImageBase\n\n # The user visible layer controls for OctreeImage layers are identical\n # to the regular image layer controls, for now.\n layer_to_controls[_OctreeImageBase] = QtImageControls\n\n\ndef create_qt_layer_controls(layer):\n \"\"\"\n Create a qt controls widget for a layer based on its layer type.\n\n In case of a subclass, the type higher in the layer's method resolution\n order will be used.\n\n Parameters\n ----------\n layer : napari.layers._base_layer.Layer\n Layer that needs its controls widget created.\n\n Returns\n -------\n controls : napari.layers.base.QtLayerControls\n Qt controls widget\n \"\"\"\n candidates = []\n for layer_type in layer_to_controls:\n if isinstance(layer, layer_type):\n candidates.append(layer_type)\n\n if not candidates:\n raise TypeError(\n trans._(\n 'Could not find QtControls for layer of type {type_}',\n deferred=True,\n type_=type(layer),\n )\n )\n\n layer_cls = layer.__class__\n # Sort the list of candidates by 'lineage'\n candidates.sort(key=lambda layer_type: layer_cls.mro().index(layer_type))\n controls = layer_to_controls[candidates[0]]\n return controls(layer)\n\n\nclass QtLayerControlsContainer(QStackedWidget):\n \"\"\"Container widget for QtLayerControl widgets.\n\n Parameters\n ----------\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n\n Attributes\n ----------\n empty_widget : qtpy.QtWidgets.QFrame\n Empty placeholder frame for when no layer is selected.\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n widgets : dict\n Dictionary of key value pairs matching layer with its widget controls.\n widgets[layer] = controls\n \"\"\"\n\n def __init__(self, viewer):\n super().__init__()\n self.setProperty(\"emphasized\", True)\n self.viewer = viewer\n\n self.setMouseTracking(True)\n self.empty_widget = QFrame()\n self.widgets = {}\n self.addWidget(self.empty_widget)\n self.setCurrentWidget(self.empty_widget)\n\n self.viewer.layers.events.inserted.connect(self._add)\n self.viewer.layers.events.removed.connect(self._remove)\n viewer.layers.selection.events.active.connect(self._display)\n\n def _display(self, event):\n \"\"\"Change the displayed controls to be those of the target layer.\n\n Parameters\n ----------\n event : Event\n Event with the target layer at `event.item`.\n \"\"\"\n layer = event.value\n if layer is None:\n self.setCurrentWidget(self.empty_widget)\n else:\n controls = self.widgets[layer]\n self.setCurrentWidget(controls)\n\n def _add(self, event):\n \"\"\"Add the controls target layer to the list of control widgets.\n\n Parameters\n ----------\n event : Event\n Event with the target layer at `event.value`.\n \"\"\"\n layer = event.value\n controls = create_qt_layer_controls(layer)\n self.addWidget(controls)\n self.widgets[layer] = controls\n\n def _remove(self, event):\n \"\"\"Remove the controls target layer from the list of control widgets.\n\n Parameters\n ----------\n event : Event\n Event with the target layer at `event.value`.\n \"\"\"\n layer = event.value\n controls = self.widgets[layer]\n self.removeWidget(controls)\n # controls.close()\n controls.hide()\n controls.deleteLater()\n controls = None\n del self.widgets[layer]\n", "path": "napari/_qt/layer_controls/qt_layer_controls_container.py"}], "after_files": [{"content": "from qtpy.QtWidgets import QFrame, QStackedWidget\n\nfrom napari._qt.layer_controls.qt_image_controls import QtImageControls\nfrom napari._qt.layer_controls.qt_labels_controls import QtLabelsControls\nfrom napari._qt.layer_controls.qt_points_controls import QtPointsControls\nfrom napari._qt.layer_controls.qt_shapes_controls import QtShapesControls\nfrom napari._qt.layer_controls.qt_surface_controls import QtSurfaceControls\nfrom napari._qt.layer_controls.qt_tracks_controls import QtTracksControls\nfrom napari._qt.layer_controls.qt_vectors_controls import QtVectorsControls\nfrom napari.layers import (\n Image,\n Labels,\n Points,\n Shapes,\n Surface,\n Tracks,\n Vectors,\n)\nfrom napari.utils import config\nfrom napari.utils.translations import trans\n\nlayer_to_controls = {\n Labels: QtLabelsControls,\n Image: QtImageControls,\n Points: QtPointsControls,\n Shapes: QtShapesControls,\n Surface: QtSurfaceControls,\n Vectors: QtVectorsControls,\n Tracks: QtTracksControls,\n}\n\nif config.async_loading:\n from napari.layers.image.experimental.octree_image import _OctreeImageBase\n\n # The user visible layer controls for OctreeImage layers are identical\n # to the regular image layer controls, for now.\n layer_to_controls[_OctreeImageBase] = QtImageControls\n\n\ndef create_qt_layer_controls(layer):\n \"\"\"\n Create a qt controls widget for a layer based on its layer type.\n\n In case of a subclass, the type higher in the layer's method resolution\n order will be used.\n\n Parameters\n ----------\n layer : napari.layers._base_layer.Layer\n Layer that needs its controls widget created.\n\n Returns\n -------\n controls : napari.layers.base.QtLayerControls\n Qt controls widget\n \"\"\"\n candidates = []\n for layer_type in layer_to_controls:\n if isinstance(layer, layer_type):\n candidates.append(layer_type)\n\n if not candidates:\n raise TypeError(\n trans._(\n 'Could not find QtControls for layer of type {type_}',\n deferred=True,\n type_=type(layer),\n )\n )\n\n layer_cls = layer.__class__\n # Sort the list of candidates by 'lineage'\n candidates.sort(key=lambda layer_type: layer_cls.mro().index(layer_type))\n controls = layer_to_controls[candidates[0]]\n return controls(layer)\n\n\nclass QtLayerControlsContainer(QStackedWidget):\n \"\"\"Container widget for QtLayerControl widgets.\n\n Parameters\n ----------\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n\n Attributes\n ----------\n empty_widget : qtpy.QtWidgets.QFrame\n Empty placeholder frame for when no layer is selected.\n viewer : napari.components.ViewerModel\n Napari viewer containing the rendered scene, layers, and controls.\n widgets : dict\n Dictionary of key value pairs matching layer with its widget controls.\n widgets[layer] = controls\n \"\"\"\n\n def __init__(self, viewer):\n super().__init__()\n self.setProperty(\"emphasized\", True)\n self.viewer = viewer\n\n self.setMouseTracking(True)\n self.empty_widget = QFrame()\n self.empty_widget.setObjectName(\"empty_controls_widget\")\n self.widgets = {}\n self.addWidget(self.empty_widget)\n self.setCurrentWidget(self.empty_widget)\n\n self.viewer.layers.events.inserted.connect(self._add)\n self.viewer.layers.events.removed.connect(self._remove)\n viewer.layers.selection.events.active.connect(self._display)\n\n def _display(self, event):\n \"\"\"Change the displayed controls to be those of the target layer.\n\n Parameters\n ----------\n event : Event\n Event with the target layer at `event.item`.\n \"\"\"\n layer = event.value\n if layer is None:\n self.setCurrentWidget(self.empty_widget)\n else:\n controls = self.widgets[layer]\n self.setCurrentWidget(controls)\n\n def _add(self, event):\n \"\"\"Add the controls target layer to the list of control widgets.\n\n Parameters\n ----------\n event : Event\n Event with the target layer at `event.value`.\n \"\"\"\n layer = event.value\n controls = create_qt_layer_controls(layer)\n self.addWidget(controls)\n self.widgets[layer] = controls\n\n def _remove(self, event):\n \"\"\"Remove the controls target layer from the list of control widgets.\n\n Parameters\n ----------\n event : Event\n Event with the target layer at `event.value`.\n \"\"\"\n layer = event.value\n controls = self.widgets[layer]\n self.removeWidget(controls)\n # controls.close()\n controls.hide()\n controls.deleteLater()\n controls = None\n del self.widgets[layer]\n", "path": "napari/_qt/layer_controls/qt_layer_controls_container.py"}]}
| 1,945 | 125 |
gh_patches_debug_10802
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-2147
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Audiências Públicas sem possibilidade de Edição
Ao criar uma Audiência Pública e salva-la, não aparecem os metadados da matéria legislativa inseridas no preenchimento.
Ao clicar em Editar, só aparece o título da audiência criada.
grato
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/audiencia/views.py`
Content:
```
1 from django.http import HttpResponse
2 from django.views.decorators.clickjacking import xframe_options_exempt
3 from django.views.generic import UpdateView
4 from sapl.crud.base import RP_DETAIL, RP_LIST, Crud
5
6 from .forms import AudienciaForm
7 from .models import AudienciaPublica
8
9
10 def index(request):
11 return HttpResponse("Audiência Pública")
12
13
14 class AudienciaCrud(Crud):
15 model = AudienciaPublica
16 public = [RP_LIST, RP_DETAIL, ]
17
18 class BaseMixin(Crud.BaseMixin):
19 list_field_names = ['numero', 'nome', 'tipo', 'materia',
20 'data']
21 ordering = 'nome', 'numero', 'tipo', 'data'
22
23 class ListView(Crud.ListView):
24 paginate_by = 10
25
26 class CreateView(Crud.CreateView):
27 form_class = AudienciaForm
28
29 def form_valid(self, form):
30 return super(Crud.CreateView, self).form_valid(form)
31
32 class UpdateView(Crud.UpdateView):
33 form_class = AudienciaForm
34
35 def get_initial(self):
36 initial = super(UpdateView, self).get_initial()
37 initial['tipo_materia'] = self.object.materia.tipo.id
38 initial['numero_materia'] = self.object.materia.numero
39 initial['ano_materia'] = self.object.materia.ano
40 return initial
41
42 class DeleteView(Crud.DeleteView):
43 pass
44
45 class DetailView(Crud.DetailView):
46
47 layout_key = 'AudienciaPublicaDetail'
48
49 @xframe_options_exempt
50 def get(self, request, *args, **kwargs):
51 return super().get(request, *args, **kwargs)
52
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py
--- a/sapl/audiencia/views.py
+++ b/sapl/audiencia/views.py
@@ -34,9 +34,10 @@
def get_initial(self):
initial = super(UpdateView, self).get_initial()
- initial['tipo_materia'] = self.object.materia.tipo.id
- initial['numero_materia'] = self.object.materia.numero
- initial['ano_materia'] = self.object.materia.ano
+ if self.object.materia:
+ initial['tipo_materia'] = self.object.materia.tipo.id
+ initial['numero_materia'] = self.object.materia.numero
+ initial['ano_materia'] = self.object.materia.ano
return initial
class DeleteView(Crud.DeleteView):
|
{"golden_diff": "diff --git a/sapl/audiencia/views.py b/sapl/audiencia/views.py\n--- a/sapl/audiencia/views.py\n+++ b/sapl/audiencia/views.py\n@@ -34,9 +34,10 @@\n \n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n- initial['tipo_materia'] = self.object.materia.tipo.id\n- initial['numero_materia'] = self.object.materia.numero\n- initial['ano_materia'] = self.object.materia.ano\n+ if self.object.materia:\n+ initial['tipo_materia'] = self.object.materia.tipo.id\n+ initial['numero_materia'] = self.object.materia.numero\n+ initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n", "issue": "Audi\u00eancias P\u00fablicas sem possibilidade de Edi\u00e7\u00e3o\nAo criar uma Audi\u00eancia P\u00fablica e salva-la, n\u00e3o aparecem os metadados da mat\u00e9ria legislativa inseridas no preenchimento. \r\nAo clicar em Editar, s\u00f3 aparece o t\u00edtulo da audi\u00eancia criada.\r\ngrato\n", "before_files": [{"content": "from django.http import HttpResponse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud\n\nfrom .forms import AudienciaForm\nfrom .models import AudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data']\n ordering = 'nome', 'numero', 'tipo', 'data'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n ", "path": "sapl/audiencia/views.py"}], "after_files": [{"content": "from django.http import HttpResponse\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.generic import UpdateView\nfrom sapl.crud.base import RP_DETAIL, RP_LIST, Crud\n\nfrom .forms import AudienciaForm\nfrom .models import AudienciaPublica\n\n\ndef index(request):\n return HttpResponse(\"Audi\u00eancia P\u00fablica\")\n\n\nclass AudienciaCrud(Crud):\n model = AudienciaPublica\n public = [RP_LIST, RP_DETAIL, ]\n\n class BaseMixin(Crud.BaseMixin):\n list_field_names = ['numero', 'nome', 'tipo', 'materia',\n 'data']\n ordering = 'nome', 'numero', 'tipo', 'data'\n\n class ListView(Crud.ListView):\n paginate_by = 10\n\n class CreateView(Crud.CreateView):\n form_class = AudienciaForm\n\n def form_valid(self, form):\n return super(Crud.CreateView, self).form_valid(form)\n\n class UpdateView(Crud.UpdateView):\n form_class = AudienciaForm\n\n def get_initial(self):\n initial = super(UpdateView, self).get_initial()\n if self.object.materia:\n initial['tipo_materia'] = self.object.materia.tipo.id\n initial['numero_materia'] = self.object.materia.numero\n initial['ano_materia'] = self.object.materia.ano\n return initial\n \n class DeleteView(Crud.DeleteView):\n pass\n\n class DetailView(Crud.DetailView):\n\n layout_key = 'AudienciaPublicaDetail'\n\n @xframe_options_exempt\n def get(self, request, *args, **kwargs):\n return super().get(request, *args, **kwargs)\n\n ", "path": "sapl/audiencia/views.py"}]}
| 788 | 188 |
gh_patches_debug_5571
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-6099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
certbot-nginx requires acme >= 0.25
Because of the import of symbols `from acme.magic_typing`, the nginx plugin released in 0.25 depends on acme 0.25 or better. However, setup.py only lists `acme>0.21.1`, leading to a failure to build from source (and potential run-time failures).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `certbot-nginx/setup.py`
Content:
```
1 from setuptools import setup
2 from setuptools import find_packages
3
4
5 version = '0.26.0.dev0'
6
7 # Remember to update local-oldest-requirements.txt when changing the minimum
8 # acme/certbot version.
9 install_requires = [
10 # This plugin works with an older version of acme, but Certbot does not.
11 # 0.22.0 is specified here to work around
12 # https://github.com/pypa/pip/issues/988.
13 'acme>0.21.1',
14 'certbot>0.21.1',
15 'mock',
16 'PyOpenSSL',
17 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?
18 'setuptools',
19 'zope.interface',
20 ]
21
22 docs_extras = [
23 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
24 'sphinx_rtd_theme',
25 ]
26
27 setup(
28 name='certbot-nginx',
29 version=version,
30 description="Nginx plugin for Certbot",
31 url='https://github.com/letsencrypt/letsencrypt',
32 author="Certbot Project",
33 author_email='[email protected]',
34 license='Apache License 2.0',
35 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
36 classifiers=[
37 'Development Status :: 3 - Alpha',
38 'Environment :: Plugins',
39 'Intended Audience :: System Administrators',
40 'License :: OSI Approved :: Apache Software License',
41 'Operating System :: POSIX :: Linux',
42 'Programming Language :: Python',
43 'Programming Language :: Python :: 2',
44 'Programming Language :: Python :: 2.7',
45 'Programming Language :: Python :: 3',
46 'Programming Language :: Python :: 3.4',
47 'Programming Language :: Python :: 3.5',
48 'Programming Language :: Python :: 3.6',
49 'Topic :: Internet :: WWW/HTTP',
50 'Topic :: Security',
51 'Topic :: System :: Installation/Setup',
52 'Topic :: System :: Networking',
53 'Topic :: System :: Systems Administration',
54 'Topic :: Utilities',
55 ],
56
57 packages=find_packages(),
58 include_package_data=True,
59 install_requires=install_requires,
60 extras_require={
61 'docs': docs_extras,
62 },
63 entry_points={
64 'certbot.plugins': [
65 'nginx = certbot_nginx.configurator:NginxConfigurator',
66 ],
67 },
68 test_suite='certbot_nginx',
69 )
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py
--- a/certbot-nginx/setup.py
+++ b/certbot-nginx/setup.py
@@ -7,10 +7,7 @@
# Remember to update local-oldest-requirements.txt when changing the minimum
# acme/certbot version.
install_requires = [
- # This plugin works with an older version of acme, but Certbot does not.
- # 0.22.0 is specified here to work around
- # https://github.com/pypa/pip/issues/988.
- 'acme>0.21.1',
+ 'acme>=0.25.0',
'certbot>0.21.1',
'mock',
'PyOpenSSL',
|
{"golden_diff": "diff --git a/certbot-nginx/setup.py b/certbot-nginx/setup.py\n--- a/certbot-nginx/setup.py\n+++ b/certbot-nginx/setup.py\n@@ -7,10 +7,7 @@\n # Remember to update local-oldest-requirements.txt when changing the minimum\n # acme/certbot version.\n install_requires = [\n- # This plugin works with an older version of acme, but Certbot does not.\n- # 0.22.0 is specified here to work around\n- # https://github.com/pypa/pip/issues/988.\n- 'acme>0.21.1',\n+ 'acme>=0.25.0',\n 'certbot>0.21.1',\n 'mock',\n 'PyOpenSSL',\n", "issue": "certbot-nginx requires acme >= 0.25\nBecause of the import of symbols `from acme.magic_typing`, the nginx plugin released in 0.25 depends on acme 0.25 or better. However, setup.py only lists `acme>0.21.1`, leading to a failure to build from source (and potential run-time failures).\n", "before_files": [{"content": "from setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '0.26.0.dev0'\n\n# Remember to update local-oldest-requirements.txt when changing the minimum\n# acme/certbot version.\ninstall_requires = [\n # This plugin works with an older version of acme, but Certbot does not.\n # 0.22.0 is specified here to work around\n # https://github.com/pypa/pip/issues/988.\n 'acme>0.21.1',\n 'certbot>0.21.1',\n 'mock',\n 'PyOpenSSL',\n 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n 'setuptools',\n 'zope.interface',\n]\n\ndocs_extras = [\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot-nginx',\n version=version,\n description=\"Nginx plugin for Certbot\",\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'docs': docs_extras,\n },\n entry_points={\n 'certbot.plugins': [\n 'nginx = certbot_nginx.configurator:NginxConfigurator',\n ],\n },\n test_suite='certbot_nginx',\n)\n", "path": "certbot-nginx/setup.py"}], "after_files": [{"content": "from setuptools import setup\nfrom setuptools import find_packages\n\n\nversion = '0.26.0.dev0'\n\n# Remember to update local-oldest-requirements.txt when changing the minimum\n# acme/certbot version.\ninstall_requires = [\n 'acme>=0.25.0',\n 'certbot>0.21.1',\n 'mock',\n 'PyOpenSSL',\n 'pyparsing>=1.5.5', # Python3 support; perhaps unnecessary?\n 'setuptools',\n 'zope.interface',\n]\n\ndocs_extras = [\n 'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='certbot-nginx',\n version=version,\n description=\"Nginx plugin for Certbot\",\n url='https://github.com/letsencrypt/letsencrypt',\n author=\"Certbot Project\",\n author_email='[email protected]',\n license='Apache License 2.0',\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Plugins',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Security',\n 'Topic :: System :: Installation/Setup',\n 'Topic :: System :: Networking',\n 'Topic :: System :: Systems Administration',\n 'Topic :: Utilities',\n ],\n\n packages=find_packages(),\n include_package_data=True,\n install_requires=install_requires,\n extras_require={\n 'docs': docs_extras,\n },\n entry_points={\n 'certbot.plugins': [\n 'nginx = certbot_nginx.configurator:NginxConfigurator',\n ],\n },\n test_suite='certbot_nginx',\n)\n", "path": "certbot-nginx/setup.py"}]}
| 1,038 | 182 |
gh_patches_debug_20003
|
rasdani/github-patches
|
git_diff
|
searx__searx-925
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a software categorie and add the Free software directory search engine
Shame on me I forgot to ask this.
I am a volunteer on the [FSD](https://directory.fsf.org/wiki/Main_Page) (Free software directory)
It would be nice if people could look for free/libre software in the searx engine.
When possible could someone please add the free software directory so that people can easily find free software.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/mediawiki.py`
Content:
```
1 """
2 general mediawiki-engine (Web)
3
4 @website websites built on mediawiki (https://www.mediawiki.org)
5 @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
6
7 @using-api yes
8 @results JSON
9 @stable yes
10 @parse url, title
11
12 @todo content
13 """
14
15 from json import loads
16 from string import Formatter
17 from searx.url_utils import urlencode, quote
18
19 # engine dependent config
20 categories = ['general']
21 language_support = True
22 paging = True
23 number_of_results = 1
24
25 # search-url
26 base_url = 'https://{language}.wikipedia.org/'
27 search_postfix = 'w/api.php?action=query'\
28 '&list=search'\
29 '&{query}'\
30 '&format=json'\
31 '&sroffset={offset}'\
32 '&srlimit={limit}'\
33 '&srwhat=nearmatch' # search for a near match in the title
34
35
36 # do search-request
37 def request(query, params):
38 offset = (params['pageno'] - 1) * number_of_results
39
40 string_args = dict(query=urlencode({'srsearch': query}),
41 offset=offset,
42 limit=number_of_results)
43
44 format_strings = list(Formatter().parse(base_url))
45
46 if params['language'] == 'all':
47 language = 'en'
48 else:
49 language = params['language'].split('-')[0]
50
51 # format_string [('https://', 'language', '', None), ('.wikipedia.org/', None, None, None)]
52 if any(x[1] == 'language' for x in format_strings):
53 string_args['language'] = language
54
55 # write search-language back to params, required in response
56 params['language'] = language
57
58 search_url = base_url + search_postfix
59
60 params['url'] = search_url.format(**string_args)
61
62 return params
63
64
65 # get response from search-request
66 def response(resp):
67 results = []
68
69 search_results = loads(resp.text)
70
71 # return empty array if there are no results
72 if not search_results.get('query', {}).get('search'):
73 return []
74
75 # parse results
76 for result in search_results['query']['search']:
77 if result.get('snippet', '').startswith('#REDIRECT'):
78 continue
79 url = base_url.format(language=resp.search_params['language']) +\
80 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
81
82 # append result
83 results.append({'url': url,
84 'title': result['title'],
85 'content': ''})
86
87 # return results
88 return results
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py
--- a/searx/engines/mediawiki.py
+++ b/searx/engines/mediawiki.py
@@ -21,6 +21,7 @@
language_support = True
paging = True
number_of_results = 1
+search_type = 'nearmatch' # possible values: title, text, nearmatch
# search-url
base_url = 'https://{language}.wikipedia.org/'
@@ -30,7 +31,7 @@
'&format=json'\
'&sroffset={offset}'\
'&srlimit={limit}'\
- '&srwhat=nearmatch' # search for a near match in the title
+ '&srwhat={searchtype}'
# do search-request
@@ -39,7 +40,8 @@
string_args = dict(query=urlencode({'srsearch': query}),
offset=offset,
- limit=number_of_results)
+ limit=number_of_results,
+ searchtype=search_type)
format_strings = list(Formatter().parse(base_url))
|
{"golden_diff": "diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py\n--- a/searx/engines/mediawiki.py\n+++ b/searx/engines/mediawiki.py\n@@ -21,6 +21,7 @@\n language_support = True\n paging = True\n number_of_results = 1\n+search_type = 'nearmatch' # possible values: title, text, nearmatch\n \n # search-url\n base_url = 'https://{language}.wikipedia.org/'\n@@ -30,7 +31,7 @@\n '&format=json'\\\n '&sroffset={offset}'\\\n '&srlimit={limit}'\\\n- '&srwhat=nearmatch' # search for a near match in the title\n+ '&srwhat={searchtype}'\n \n \n # do search-request\n@@ -39,7 +40,8 @@\n \n string_args = dict(query=urlencode({'srsearch': query}),\n offset=offset,\n- limit=number_of_results)\n+ limit=number_of_results,\n+ searchtype=search_type)\n \n format_strings = list(Formatter().parse(base_url))\n", "issue": "Add a software categorie and add the Free software directory search engine\nShame on me I forgot to ask this.\r\nI am a volunteer on the [FSD](https://directory.fsf.org/wiki/Main_Page) (Free software directory)\r\nIt would be nice if people could look for free/libre software in the searx engine.\r\nWhen possible could someone please add the free software directory so that people can easily find free software.\n", "before_files": [{"content": "\"\"\"\n general mediawiki-engine (Web)\n\n @website websites built on mediawiki (https://www.mediawiki.org)\n @provide-api yes (http://www.mediawiki.org/wiki/API:Search)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title\n\n @todo content\n\"\"\"\n\nfrom json import loads\nfrom string import Formatter\nfrom searx.url_utils import urlencode, quote\n\n# engine dependent config\ncategories = ['general']\nlanguage_support = True\npaging = True\nnumber_of_results = 1\n\n# search-url\nbase_url = 'https://{language}.wikipedia.org/'\nsearch_postfix = 'w/api.php?action=query'\\\n '&list=search'\\\n '&{query}'\\\n '&format=json'\\\n '&sroffset={offset}'\\\n '&srlimit={limit}'\\\n '&srwhat=nearmatch' # search for a near match in the title\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n string_args = dict(query=urlencode({'srsearch': query}),\n offset=offset,\n limit=number_of_results)\n\n format_strings = list(Formatter().parse(base_url))\n\n if params['language'] == 'all':\n language = 'en'\n else:\n language = params['language'].split('-')[0]\n\n # format_string [('https://', 'language', '', None), ('.wikipedia.org/', None, None, None)]\n if any(x[1] == 'language' for x in format_strings):\n string_args['language'] = language\n\n # write search-language back to params, required in response\n params['language'] = language\n\n search_url = base_url + search_postfix\n\n params['url'] = search_url.format(**string_args)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = loads(resp.text)\n\n # return empty array if there are no results\n if not search_results.get('query', {}).get('search'):\n return []\n\n # parse results\n for result in search_results['query']['search']:\n if result.get('snippet', '').startswith('#REDIRECT'):\n continue\n url = base_url.format(language=resp.search_params['language']) +\\\n 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))\n\n # append result\n results.append({'url': url,\n 'title': result['title'],\n 'content': ''})\n\n # return results\n return results\n", "path": "searx/engines/mediawiki.py"}], "after_files": [{"content": "\"\"\"\n general mediawiki-engine (Web)\n\n @website websites built on mediawiki (https://www.mediawiki.org)\n @provide-api yes (http://www.mediawiki.org/wiki/API:Search)\n\n @using-api yes\n @results JSON\n @stable yes\n @parse url, title\n\n @todo content\n\"\"\"\n\nfrom json import loads\nfrom string import Formatter\nfrom searx.url_utils import urlencode, quote\n\n# engine dependent config\ncategories = ['general']\nlanguage_support = True\npaging = True\nnumber_of_results = 1\nsearch_type = 'nearmatch' # possible values: title, text, nearmatch\n\n# search-url\nbase_url = 'https://{language}.wikipedia.org/'\nsearch_postfix = 'w/api.php?action=query'\\\n '&list=search'\\\n '&{query}'\\\n '&format=json'\\\n '&sroffset={offset}'\\\n '&srlimit={limit}'\\\n '&srwhat={searchtype}'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n string_args = dict(query=urlencode({'srsearch': query}),\n offset=offset,\n limit=number_of_results,\n searchtype=search_type)\n\n format_strings = list(Formatter().parse(base_url))\n\n if params['language'] == 'all':\n language = 'en'\n else:\n language = params['language'].split('-')[0]\n\n # format_string [('https://', 'language', '', None), ('.wikipedia.org/', None, None, None)]\n if any(x[1] == 'language' for x in format_strings):\n string_args['language'] = language\n\n # write search-language back to params, required in response\n params['language'] = language\n\n search_url = base_url + search_postfix\n\n params['url'] = search_url.format(**string_args)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = loads(resp.text)\n\n # return empty array if there are no results\n if not search_results.get('query', {}).get('search'):\n return []\n\n # parse results\n for result in search_results['query']['search']:\n if result.get('snippet', '').startswith('#REDIRECT'):\n continue\n url = base_url.format(language=resp.search_params['language']) +\\\n 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))\n\n # append result\n results.append({'url': url,\n 'title': result['title'],\n 'content': ''})\n\n # return results\n return results\n", "path": "searx/engines/mediawiki.py"}]}
| 1,112 | 258 |
gh_patches_debug_6063
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1627
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MI scraper failing since at least 2017-04-01
MI has been failing since 2017-04-01
Based on automated runs it appears that MI has not run successfully in 5 days (2017-04-01).
```
06:00:31 INFO billy: billy-update abbr=mi
actions=scrape,import,report
types=bills,legislators,votes,committees,alldata,events
sessions=2017-2018
terms=2017-2018
06:00:31 INFO scrapelib: GET - http://www.senate.michigan.gov/senatorinfo.html
File "/usr/local/bin/billy-update", line 9, in <module>
load_entry_point('billy==1.9.0', 'console_scripts', 'billy-update')()
File "/opt/openstates/billy/billy/bin/update.py", line 368, in main
Traceback (most recent call last):
run_record += _run_scraper(stype, args, metadata)
File "/opt/openstates/billy/billy/bin/update.py", line 102, in _run_scraper
response = self.get(url)
File "/usr/local/lib/python2.7/dist-packages/requests/sessions.py", line 501, in get
return self.request('GET', url, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/scrapelib/__init__.py", line 272, in request
raise HTTPError(resp)
scrapelib.HTTPError: 500 while retrieving http://www.senate.michigan.gov/senatorinfo.html
File "/srv/openstates-web/openstates/mi/legislators.py", line 77, in scrape_upper
scraper.scrape(chamber, time)
File "/srv/openstates-web/openstates/mi/legislators.py", line 16, in scrape
return self.scrape_upper(chamber, term)
doc = self.lxmlize(url)
File "/srv/openstates-web/openstates/utils/lxmlize.py", line 19, in lxmlize
```
Visit http://bobsled.openstates.org/ for more info.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openstates/mi/legislators.py`
Content:
```
1 import re
2
3 from billy.scrape.legislators import LegislatorScraper, Legislator
4 from openstates.utils import LXMLMixin
5
6 abbr = {'D': 'Democratic', 'R': 'Republican'}
7
8
9 class MILegislatorScraper(LegislatorScraper, LXMLMixin):
10 jurisdiction = 'mi'
11
12 def scrape(self, chamber, term):
13 self.validate_term(term, latest_only=True)
14 if chamber == 'lower':
15 return self.scrape_lower(chamber, term)
16 return self.scrape_upper(chamber, term)
17
18 def scrape_lower(self, chamber, term):
19 url = 'http://www.house.mi.gov/mhrpublic/frmRepList.aspx'
20 table = [
21 "website",
22 "district",
23 "name",
24 "party",
25 "location",
26 "phone",
27 "email"
28 ]
29 doc = self.lxmlize(url)
30 # skip two rows at top
31 for row in doc.xpath('//table[@id="grvRepInfo"]/*'):
32 tds = row.xpath('.//td')
33 if len(tds) == 0:
34 continue
35 metainf = {}
36 for i in range(0, len(table)):
37 metainf[table[i]] = tds[i]
38 district = str(int(metainf['district'].text_content().strip()))
39 party = metainf['party'].text_content().strip()
40 phone = metainf['phone'].text_content().strip()
41 email = metainf['email'].text_content().strip()
42 leg_url = metainf['website'].xpath("./a")[0].attrib['href']
43 name = metainf['name'].text_content().strip()
44 if name == 'Vacant' or re.match(r'^District \d{1,3}$', name):
45 self.warning('District {} appears vacant, and will be skipped'.format(district))
46 continue
47
48 office = metainf['location'].text_content().strip()
49 office = re.sub(
50 ' HOB',
51 ' Anderson House Office Building\n124 North Capitol Avenue\nLansing, MI 48933',
52 office
53 )
54 office = re.sub(
55 ' CB',
56 ' State Capitol Building\nLansing, MI 48909',
57 office
58 )
59
60 leg = Legislator(term=term,
61 chamber=chamber,
62 full_name=name,
63 district=district,
64 party=abbr[party],
65 url=leg_url)
66
67 leg.add_office('capitol', 'Capitol Office',
68 address=office,
69 phone=phone,
70 email=email)
71
72 leg.add_source(url)
73 self.save_legislator(leg)
74
75 def scrape_upper(self, chamber, term):
76 url = 'http://www.senate.michigan.gov/senatorinfo.html'
77 doc = self.lxmlize(url)
78 for row in doc.xpath('//table[not(@class="calendar")]//tr')[3:]:
79 if len(row) != 7:
80 continue
81
82 # party, dist, member, office_phone, office_fax, office_loc
83 party, dist, member, contact, phone, fax, loc = row.getchildren()
84 if (party.text_content().strip() == "" or
85 'Lieutenant Governor' in member.text_content()):
86 continue
87
88 party = abbr[party.text]
89 district = dist.text_content().strip()
90 name = member.text_content().strip()
91 name = re.sub(r'\s+', " ", name)
92
93 if name == 'Vacant':
94 self.info('district %s is vacant', district)
95 continue
96
97 leg_url = member.xpath('a/@href')[0]
98 office_phone = phone.text
99 office_fax = fax.text
100
101 office_loc = loc.text
102 office_loc = re.sub(
103 ' Farnum Bldg',
104 ' Farnum Office Building\n125 West Allegan Street\nLansing, MI 48933',
105 office_loc
106 )
107 office_loc = re.sub(
108 ' Capitol Bldg',
109 ' State Capitol Building\nLansing, MI 48909',
110 office_loc
111 )
112
113 leg = Legislator(term=term, chamber=chamber,
114 district=district,
115 full_name=name,
116 party=party,
117 url=leg_url)
118
119 leg.add_office('capitol', 'Capitol Office',
120 address=office_loc,
121 fax=office_fax,
122 phone=office_phone)
123
124 leg.add_source(url)
125 self.save_legislator(leg)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openstates/mi/legislators.py b/openstates/mi/legislators.py
--- a/openstates/mi/legislators.py
+++ b/openstates/mi/legislators.py
@@ -73,7 +73,7 @@
self.save_legislator(leg)
def scrape_upper(self, chamber, term):
- url = 'http://www.senate.michigan.gov/senatorinfo.html'
+ url = 'http://www.senate.michigan.gov/senatorinfo_list.html'
doc = self.lxmlize(url)
for row in doc.xpath('//table[not(@class="calendar")]//tr')[3:]:
if len(row) != 7:
|
{"golden_diff": "diff --git a/openstates/mi/legislators.py b/openstates/mi/legislators.py\n--- a/openstates/mi/legislators.py\n+++ b/openstates/mi/legislators.py\n@@ -73,7 +73,7 @@\n self.save_legislator(leg)\n \n def scrape_upper(self, chamber, term):\n- url = 'http://www.senate.michigan.gov/senatorinfo.html'\n+ url = 'http://www.senate.michigan.gov/senatorinfo_list.html'\n doc = self.lxmlize(url)\n for row in doc.xpath('//table[not(@class=\"calendar\")]//tr')[3:]:\n if len(row) != 7:\n", "issue": "MI scraper failing since at least 2017-04-01\nMI has been failing since 2017-04-01\n\nBased on automated runs it appears that MI has not run successfully in 5 days (2017-04-01).\n\n\n```\n 06:00:31 INFO billy: billy-update abbr=mi\n actions=scrape,import,report\n types=bills,legislators,votes,committees,alldata,events\n sessions=2017-2018\n terms=2017-2018\n06:00:31 INFO scrapelib: GET - http://www.senate.michigan.gov/senatorinfo.html\n File \"/usr/local/bin/billy-update\", line 9, in <module>\n load_entry_point('billy==1.9.0', 'console_scripts', 'billy-update')()\n File \"/opt/openstates/billy/billy/bin/update.py\", line 368, in main\nTraceback (most recent call last):\n run_record += _run_scraper(stype, args, metadata)\n File \"/opt/openstates/billy/billy/bin/update.py\", line 102, in _run_scraper\n response = self.get(url)\n File \"/usr/local/lib/python2.7/dist-packages/requests/sessions.py\", line 501, in get\n return self.request('GET', url, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/scrapelib/__init__.py\", line 272, in request\n raise HTTPError(resp)\nscrapelib.HTTPError: 500 while retrieving http://www.senate.michigan.gov/senatorinfo.html\n File \"/srv/openstates-web/openstates/mi/legislators.py\", line 77, in scrape_upper\n scraper.scrape(chamber, time)\n File \"/srv/openstates-web/openstates/mi/legislators.py\", line 16, in scrape\n return self.scrape_upper(chamber, term)\n doc = self.lxmlize(url)\n File \"/srv/openstates-web/openstates/utils/lxmlize.py\", line 19, in lxmlize\n```\n\nVisit http://bobsled.openstates.org/ for more info.\n\n", "before_files": [{"content": "import re\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nabbr = {'D': 'Democratic', 'R': 'Republican'}\n\n\nclass MILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'mi'\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=True)\n if chamber == 'lower':\n return self.scrape_lower(chamber, term)\n return self.scrape_upper(chamber, term)\n\n def scrape_lower(self, chamber, term):\n url = 'http://www.house.mi.gov/mhrpublic/frmRepList.aspx'\n table = [\n \"website\",\n \"district\",\n \"name\",\n \"party\",\n \"location\",\n \"phone\",\n \"email\"\n ]\n doc = self.lxmlize(url)\n # skip two rows at top\n for row in doc.xpath('//table[@id=\"grvRepInfo\"]/*'):\n tds = row.xpath('.//td')\n if len(tds) == 0:\n continue\n metainf = {}\n for i in range(0, len(table)):\n metainf[table[i]] = tds[i]\n district = str(int(metainf['district'].text_content().strip()))\n party = metainf['party'].text_content().strip()\n phone = metainf['phone'].text_content().strip()\n email = metainf['email'].text_content().strip()\n leg_url = metainf['website'].xpath(\"./a\")[0].attrib['href']\n name = metainf['name'].text_content().strip()\n if name == 'Vacant' or re.match(r'^District \\d{1,3}$', name):\n self.warning('District {} appears vacant, and will be skipped'.format(district))\n continue\n\n office = metainf['location'].text_content().strip()\n office = re.sub(\n ' HOB',\n ' Anderson House Office Building\\n124 North Capitol Avenue\\nLansing, MI 48933',\n office\n )\n office = re.sub(\n ' CB',\n ' State Capitol Building\\nLansing, MI 48909',\n office\n )\n\n leg = Legislator(term=term,\n chamber=chamber,\n full_name=name,\n district=district,\n party=abbr[party],\n url=leg_url)\n\n leg.add_office('capitol', 'Capitol Office',\n address=office,\n phone=phone,\n email=email)\n\n leg.add_source(url)\n self.save_legislator(leg)\n\n def scrape_upper(self, chamber, term):\n url = 'http://www.senate.michigan.gov/senatorinfo.html'\n doc = self.lxmlize(url)\n for row in doc.xpath('//table[not(@class=\"calendar\")]//tr')[3:]:\n if len(row) != 7:\n continue\n\n # party, dist, member, office_phone, office_fax, office_loc\n party, dist, member, contact, phone, fax, loc = row.getchildren()\n if (party.text_content().strip() == \"\" or\n 'Lieutenant Governor' in member.text_content()):\n continue\n\n party = abbr[party.text]\n district = dist.text_content().strip()\n name = member.text_content().strip()\n name = re.sub(r'\\s+', \" \", name)\n\n if name == 'Vacant':\n self.info('district %s is vacant', district)\n continue\n\n leg_url = member.xpath('a/@href')[0]\n office_phone = phone.text\n office_fax = fax.text\n\n office_loc = loc.text\n office_loc = re.sub(\n ' Farnum Bldg',\n ' Farnum Office Building\\n125 West Allegan Street\\nLansing, MI 48933',\n office_loc\n )\n office_loc = re.sub(\n ' Capitol Bldg',\n ' State Capitol Building\\nLansing, MI 48909',\n office_loc\n )\n\n leg = Legislator(term=term, chamber=chamber,\n district=district,\n full_name=name,\n party=party,\n url=leg_url)\n\n leg.add_office('capitol', 'Capitol Office',\n address=office_loc,\n fax=office_fax,\n phone=office_phone)\n\n leg.add_source(url)\n self.save_legislator(leg)\n", "path": "openstates/mi/legislators.py"}], "after_files": [{"content": "import re\n\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nabbr = {'D': 'Democratic', 'R': 'Republican'}\n\n\nclass MILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'mi'\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=True)\n if chamber == 'lower':\n return self.scrape_lower(chamber, term)\n return self.scrape_upper(chamber, term)\n\n def scrape_lower(self, chamber, term):\n url = 'http://www.house.mi.gov/mhrpublic/frmRepList.aspx'\n table = [\n \"website\",\n \"district\",\n \"name\",\n \"party\",\n \"location\",\n \"phone\",\n \"email\"\n ]\n doc = self.lxmlize(url)\n # skip two rows at top\n for row in doc.xpath('//table[@id=\"grvRepInfo\"]/*'):\n tds = row.xpath('.//td')\n if len(tds) == 0:\n continue\n metainf = {}\n for i in range(0, len(table)):\n metainf[table[i]] = tds[i]\n district = str(int(metainf['district'].text_content().strip()))\n party = metainf['party'].text_content().strip()\n phone = metainf['phone'].text_content().strip()\n email = metainf['email'].text_content().strip()\n leg_url = metainf['website'].xpath(\"./a\")[0].attrib['href']\n name = metainf['name'].text_content().strip()\n if name == 'Vacant' or re.match(r'^District \\d{1,3}$', name):\n self.warning('District {} appears vacant, and will be skipped'.format(district))\n continue\n\n office = metainf['location'].text_content().strip()\n office = re.sub(\n ' HOB',\n ' Anderson House Office Building\\n124 North Capitol Avenue\\nLansing, MI 48933',\n office\n )\n office = re.sub(\n ' CB',\n ' State Capitol Building\\nLansing, MI 48909',\n office\n )\n\n leg = Legislator(term=term,\n chamber=chamber,\n full_name=name,\n district=district,\n party=abbr[party],\n url=leg_url)\n\n leg.add_office('capitol', 'Capitol Office',\n address=office,\n phone=phone,\n email=email)\n\n leg.add_source(url)\n self.save_legislator(leg)\n\n def scrape_upper(self, chamber, term):\n url = 'http://www.senate.michigan.gov/senatorinfo_list.html'\n doc = self.lxmlize(url)\n for row in doc.xpath('//table[not(@class=\"calendar\")]//tr')[3:]:\n if len(row) != 7:\n continue\n\n # party, dist, member, office_phone, office_fax, office_loc\n party, dist, member, contact, phone, fax, loc = row.getchildren()\n if (party.text_content().strip() == \"\" or\n 'Lieutenant Governor' in member.text_content()):\n continue\n\n party = abbr[party.text]\n district = dist.text_content().strip()\n name = member.text_content().strip()\n name = re.sub(r'\\s+', \" \", name)\n\n if name == 'Vacant':\n self.info('district %s is vacant', district)\n continue\n\n leg_url = member.xpath('a/@href')[0]\n office_phone = phone.text\n office_fax = fax.text\n\n office_loc = loc.text\n office_loc = re.sub(\n ' Farnum Bldg',\n ' Farnum Office Building\\n125 West Allegan Street\\nLansing, MI 48933',\n office_loc\n )\n office_loc = re.sub(\n ' Capitol Bldg',\n ' State Capitol Building\\nLansing, MI 48909',\n office_loc\n )\n\n leg = Legislator(term=term, chamber=chamber,\n district=district,\n full_name=name,\n party=party,\n url=leg_url)\n\n leg.add_office('capitol', 'Capitol Office',\n address=office_loc,\n fax=office_fax,\n phone=office_phone)\n\n leg.add_source(url)\n self.save_legislator(leg)\n", "path": "openstates/mi/legislators.py"}]}
| 2,042 | 159 |
gh_patches_debug_25714
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-443
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`mkdocs new` broken under python2
current master, python 2.7.9 virtualenv
only top directory and mkdocs.yml created, no docs dir or index.md
```
(karasu)[lashni@orphan src]$ mkdocs new karasu
Creating project directory: karasu
Writing config file: karasu/mkdocs.yml
Traceback (most recent call last):
File "/home/lashni/dev/karasu/bin/mkdocs", line 9, in <module>
load_entry_point('mkdocs==0.11.1', 'console_scripts', 'mkdocs')()
File "/home/lashni/dev/karasu/src/mkdocs/mkdocs/main.py", line 74, in run_main
main(cmd, args=sys.argv[2:], options=dict(opts))
File "/home/lashni/dev/karasu/src/mkdocs/mkdocs/main.py", line 58, in main
new(args, options)
File "/home/lashni/dev/karasu/src/mkdocs/mkdocs/new.py", line 47, in new
open(config_path, 'w', encoding='utf-8').write(config_text)
TypeError: must be unicode, not str
```
current master, python 3.4.3 virtualenv, files/dirs created successfully
```
(test)[lashni@orphan src]$ mkdocs new karasu
Creating project directory: karasu
Writing config file: karasu/mkdocs.yml
Writing initial docs: karasu/docs/index.md
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/new.py`
Content:
```
1 # coding: utf-8
2 from __future__ import print_function
3 import os
4 from io import open
5
6 config_text = 'site_name: My Docs\n'
7 index_text = """# Welcome to MkDocs
8
9 For full documentation visit [mkdocs.org](http://mkdocs.org).
10
11 ## Commands
12
13 * `mkdocs new [dir-name]` - Create a new project.
14 * `mkdocs serve` - Start the live-reloading docs server.
15 * `mkdocs build` - Build the documentation site.
16 * `mkdocs help` - Print this help message.
17
18 ## Project layout
19
20 mkdocs.yml # The configuration file.
21 docs/
22 index.md # The documentation homepage.
23 ... # Other markdown pages, images and other files.
24 """
25
26
27 def new(args, options):
28 if len(args) != 1:
29 print("Usage 'mkdocs new [directory-name]'")
30 return
31
32 output_dir = args[0]
33
34 docs_dir = os.path.join(output_dir, 'docs')
35 config_path = os.path.join(output_dir, 'mkdocs.yml')
36 index_path = os.path.join(docs_dir, 'index.md')
37
38 if os.path.exists(config_path):
39 print('Project already exists.')
40 return
41
42 if not os.path.exists(output_dir):
43 print('Creating project directory: %s' % output_dir)
44 os.mkdir(output_dir)
45
46 print('Writing config file: %s' % config_path)
47 open(config_path, 'w', encoding='utf-8').write(config_text)
48
49 if os.path.exists(index_path):
50 return
51
52 print('Writing initial docs: %s' % index_path)
53 if not os.path.exists(docs_dir):
54 os.mkdir(docs_dir)
55 open(index_path, 'w', encoding='utf-8').write(index_text)
56
```
Path: `mkdocs/main.py`
Content:
```
1 #!/usr/bin/env python
2 # coding: utf-8
3 from __future__ import print_function
4
5 import logging
6 import sys
7
8 from mkdocs import __version__
9 from mkdocs.build import build
10 from mkdocs.config import load_config
11 from mkdocs.exceptions import MkDocsException
12 from mkdocs.gh_deploy import gh_deploy
13 from mkdocs.new import new
14 from mkdocs.serve import serve
15
16
17 def configure_logging(options):
18 '''When a --verbose flag is passed, increase the verbosity of mkdocs'''
19 logger = logging.getLogger('mkdocs')
20 logger.addHandler(logging.StreamHandler())
21 if 'verbose' in options:
22 logger.setLevel(logging.DEBUG)
23 else:
24 logger.setLevel(logging.WARNING)
25
26
27 def arg_to_option(arg):
28 """
29 Convert command line arguments into two-tuples of config key/value pairs.
30 """
31 arg = arg.lstrip('--')
32 option = True
33 if '=' in arg:
34 arg, option = arg.split('=', 1)
35 return (arg.replace('-', '_'), option)
36
37
38 def main(cmd, args, options=None):
39 """
40 Build the documentation, and optionally start the devserver.
41 """
42 configure_logging(options)
43 clean_site_dir = 'clean' in options
44 if cmd == 'serve':
45 config = load_config(options=options)
46 serve(config, options=options)
47 elif cmd == 'build':
48 config = load_config(options=options)
49 build(config, clean_site_dir=clean_site_dir)
50 elif cmd == 'json':
51 config = load_config(options=options)
52 build(config, dump_json=True, clean_site_dir=clean_site_dir)
53 elif cmd == 'gh-deploy':
54 config = load_config(options=options)
55 build(config, clean_site_dir=clean_site_dir)
56 gh_deploy(config)
57 elif cmd == 'new':
58 new(args, options)
59 else:
60 print('MkDocs (version {0})'.format(__version__))
61 print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')
62
63
64 def run_main():
65 """
66 Invokes main() with the contents of sys.argv
67
68 This is a separate function so it can be invoked
69 by a setuptools console_script.
70 """
71 cmd = sys.argv[1] if len(sys.argv) >= 2 else None
72 opts = [arg_to_option(arg) for arg in sys.argv[2:] if arg.startswith('--')]
73 try:
74 main(cmd, args=sys.argv[2:], options=dict(opts))
75 except MkDocsException as e:
76 print(e.args[0], file=sys.stderr)
77
78
79 if __name__ == '__main__':
80 run_main()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/main.py b/mkdocs/main.py
--- a/mkdocs/main.py
+++ b/mkdocs/main.py
@@ -55,7 +55,7 @@
build(config, clean_site_dir=clean_site_dir)
gh_deploy(config)
elif cmd == 'new':
- new(args, options)
+ new(args)
else:
print('MkDocs (version {0})'.format(__version__))
print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')
diff --git a/mkdocs/new.py b/mkdocs/new.py
--- a/mkdocs/new.py
+++ b/mkdocs/new.py
@@ -1,10 +1,13 @@
# coding: utf-8
from __future__ import print_function
+
import os
from io import open
-config_text = 'site_name: My Docs\n'
-index_text = """# Welcome to MkDocs
+from mkdocs import compat
+
+config_text = compat.unicode('site_name: My Docs\n')
+index_text = compat.unicode("""# Welcome to MkDocs
For full documentation visit [mkdocs.org](http://mkdocs.org).
@@ -21,10 +24,11 @@
docs/
index.md # The documentation homepage.
... # Other markdown pages, images and other files.
-"""
+""")
+
+def new(args):
-def new(args, options):
if len(args) != 1:
print("Usage 'mkdocs new [directory-name]'")
return
|
{"golden_diff": "diff --git a/mkdocs/main.py b/mkdocs/main.py\n--- a/mkdocs/main.py\n+++ b/mkdocs/main.py\n@@ -55,7 +55,7 @@\n build(config, clean_site_dir=clean_site_dir)\n gh_deploy(config)\n elif cmd == 'new':\n- new(args, options)\n+ new(args)\n else:\n print('MkDocs (version {0})'.format(__version__))\n print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')\ndiff --git a/mkdocs/new.py b/mkdocs/new.py\n--- a/mkdocs/new.py\n+++ b/mkdocs/new.py\n@@ -1,10 +1,13 @@\n # coding: utf-8\n from __future__ import print_function\n+\n import os\n from io import open\n \n-config_text = 'site_name: My Docs\\n'\n-index_text = \"\"\"# Welcome to MkDocs\n+from mkdocs import compat\n+\n+config_text = compat.unicode('site_name: My Docs\\n')\n+index_text = compat.unicode(\"\"\"# Welcome to MkDocs\n \n For full documentation visit [mkdocs.org](http://mkdocs.org).\n \n@@ -21,10 +24,11 @@\n docs/\n index.md # The documentation homepage.\n ... # Other markdown pages, images and other files.\n-\"\"\"\n+\"\"\")\n+\n \n+def new(args):\n \n-def new(args, options):\n if len(args) != 1:\n print(\"Usage 'mkdocs new [directory-name]'\")\n return\n", "issue": "`mkdocs new` broken under python2\ncurrent master, python 2.7.9 virtualenv\nonly top directory and mkdocs.yml created, no docs dir or index.md\n\n```\n(karasu)[lashni@orphan src]$ mkdocs new karasu\nCreating project directory: karasu\nWriting config file: karasu/mkdocs.yml\nTraceback (most recent call last):\n File \"/home/lashni/dev/karasu/bin/mkdocs\", line 9, in <module>\n load_entry_point('mkdocs==0.11.1', 'console_scripts', 'mkdocs')()\n File \"/home/lashni/dev/karasu/src/mkdocs/mkdocs/main.py\", line 74, in run_main\n main(cmd, args=sys.argv[2:], options=dict(opts))\n File \"/home/lashni/dev/karasu/src/mkdocs/mkdocs/main.py\", line 58, in main\n new(args, options)\n File \"/home/lashni/dev/karasu/src/mkdocs/mkdocs/new.py\", line 47, in new\n open(config_path, 'w', encoding='utf-8').write(config_text)\nTypeError: must be unicode, not str\n```\n\ncurrent master, python 3.4.3 virtualenv, files/dirs created successfully\n\n```\n(test)[lashni@orphan src]$ mkdocs new karasu\nCreating project directory: karasu\nWriting config file: karasu/mkdocs.yml\nWriting initial docs: karasu/docs/index.md\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\nimport os\nfrom io import open\n\nconfig_text = 'site_name: My Docs\\n'\nindex_text = \"\"\"# Welcome to MkDocs\n\nFor full documentation visit [mkdocs.org](http://mkdocs.org).\n\n## Commands\n\n* `mkdocs new [dir-name]` - Create a new project.\n* `mkdocs serve` - Start the live-reloading docs server.\n* `mkdocs build` - Build the documentation site.\n* `mkdocs help` - Print this help message.\n\n## Project layout\n\n mkdocs.yml # The configuration file.\n docs/\n index.md # The documentation homepage.\n ... # Other markdown pages, images and other files.\n\"\"\"\n\n\ndef new(args, options):\n if len(args) != 1:\n print(\"Usage 'mkdocs new [directory-name]'\")\n return\n\n output_dir = args[0]\n\n docs_dir = os.path.join(output_dir, 'docs')\n config_path = os.path.join(output_dir, 'mkdocs.yml')\n index_path = os.path.join(docs_dir, 'index.md')\n\n if os.path.exists(config_path):\n print('Project already exists.')\n return\n\n if not os.path.exists(output_dir):\n print('Creating project directory: %s' % output_dir)\n os.mkdir(output_dir)\n\n print('Writing config file: %s' % config_path)\n open(config_path, 'w', encoding='utf-8').write(config_text)\n\n if os.path.exists(index_path):\n return\n\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n open(index_path, 'w', encoding='utf-8').write(index_text)\n", "path": "mkdocs/new.py"}, {"content": "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\n\nimport logging\nimport sys\n\nfrom mkdocs import __version__\nfrom mkdocs.build import build\nfrom mkdocs.config import load_config\nfrom mkdocs.exceptions import MkDocsException\nfrom mkdocs.gh_deploy import gh_deploy\nfrom mkdocs.new import new\nfrom mkdocs.serve import serve\n\n\ndef configure_logging(options):\n '''When a --verbose flag is passed, increase the verbosity of mkdocs'''\n logger = logging.getLogger('mkdocs')\n logger.addHandler(logging.StreamHandler())\n if 'verbose' in options:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n\n\ndef arg_to_option(arg):\n \"\"\"\n Convert command line arguments into two-tuples of config key/value pairs.\n \"\"\"\n arg = arg.lstrip('--')\n option = True\n if '=' in arg:\n arg, option = arg.split('=', 1)\n return (arg.replace('-', '_'), option)\n\n\ndef main(cmd, args, options=None):\n \"\"\"\n Build the documentation, and optionally start the devserver.\n \"\"\"\n configure_logging(options)\n clean_site_dir = 'clean' in options\n if cmd == 'serve':\n config = load_config(options=options)\n serve(config, options=options)\n elif cmd == 'build':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n elif cmd == 'json':\n config = load_config(options=options)\n build(config, dump_json=True, clean_site_dir=clean_site_dir)\n elif cmd == 'gh-deploy':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n gh_deploy(config)\n elif cmd == 'new':\n new(args, options)\n else:\n print('MkDocs (version {0})'.format(__version__))\n print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')\n\n\ndef run_main():\n \"\"\"\n Invokes main() with the contents of sys.argv\n\n This is a separate function so it can be invoked\n by a setuptools console_script.\n \"\"\"\n cmd = sys.argv[1] if len(sys.argv) >= 2 else None\n opts = [arg_to_option(arg) for arg in sys.argv[2:] if arg.startswith('--')]\n try:\n main(cmd, args=sys.argv[2:], options=dict(opts))\n except MkDocsException as e:\n print(e.args[0], file=sys.stderr)\n\n\nif __name__ == '__main__':\n run_main()\n", "path": "mkdocs/main.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import print_function\n\nimport os\nfrom io import open\n\nfrom mkdocs import compat\n\nconfig_text = compat.unicode('site_name: My Docs\\n')\nindex_text = compat.unicode(\"\"\"# Welcome to MkDocs\n\nFor full documentation visit [mkdocs.org](http://mkdocs.org).\n\n## Commands\n\n* `mkdocs new [dir-name]` - Create a new project.\n* `mkdocs serve` - Start the live-reloading docs server.\n* `mkdocs build` - Build the documentation site.\n* `mkdocs help` - Print this help message.\n\n## Project layout\n\n mkdocs.yml # The configuration file.\n docs/\n index.md # The documentation homepage.\n ... # Other markdown pages, images and other files.\n\"\"\")\n\n\ndef new(args):\n\n if len(args) != 1:\n print(\"Usage 'mkdocs new [directory-name]'\")\n return\n\n output_dir = args[0]\n\n docs_dir = os.path.join(output_dir, 'docs')\n config_path = os.path.join(output_dir, 'mkdocs.yml')\n index_path = os.path.join(docs_dir, 'index.md')\n\n if os.path.exists(config_path):\n print('Project already exists.')\n return\n\n if not os.path.exists(output_dir):\n print('Creating project directory: %s' % output_dir)\n os.mkdir(output_dir)\n\n print('Writing config file: %s' % config_path)\n open(config_path, 'w', encoding='utf-8').write(config_text)\n\n if os.path.exists(index_path):\n return\n\n print('Writing initial docs: %s' % index_path)\n if not os.path.exists(docs_dir):\n os.mkdir(docs_dir)\n open(index_path, 'w', encoding='utf-8').write(index_text)\n", "path": "mkdocs/new.py"}, {"content": "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\n\nimport logging\nimport sys\n\nfrom mkdocs import __version__\nfrom mkdocs.build import build\nfrom mkdocs.config import load_config\nfrom mkdocs.exceptions import MkDocsException\nfrom mkdocs.gh_deploy import gh_deploy\nfrom mkdocs.new import new\nfrom mkdocs.serve import serve\n\n\ndef configure_logging(options):\n '''When a --verbose flag is passed, increase the verbosity of mkdocs'''\n logger = logging.getLogger('mkdocs')\n logger.addHandler(logging.StreamHandler())\n if 'verbose' in options:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n\n\ndef arg_to_option(arg):\n \"\"\"\n Convert command line arguments into two-tuples of config key/value pairs.\n \"\"\"\n arg = arg.lstrip('--')\n option = True\n if '=' in arg:\n arg, option = arg.split('=', 1)\n return (arg.replace('-', '_'), option)\n\n\ndef main(cmd, args, options=None):\n \"\"\"\n Build the documentation, and optionally start the devserver.\n \"\"\"\n configure_logging(options)\n clean_site_dir = 'clean' in options\n if cmd == 'serve':\n config = load_config(options=options)\n serve(config, options=options)\n elif cmd == 'build':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n elif cmd == 'json':\n config = load_config(options=options)\n build(config, dump_json=True, clean_site_dir=clean_site_dir)\n elif cmd == 'gh-deploy':\n config = load_config(options=options)\n build(config, clean_site_dir=clean_site_dir)\n gh_deploy(config)\n elif cmd == 'new':\n new(args)\n else:\n print('MkDocs (version {0})'.format(__version__))\n print('mkdocs [help|new|build|serve|gh-deploy|json] {options}')\n\n\ndef run_main():\n \"\"\"\n Invokes main() with the contents of sys.argv\n\n This is a separate function so it can be invoked\n by a setuptools console_script.\n \"\"\"\n cmd = sys.argv[1] if len(sys.argv) >= 2 else None\n opts = [arg_to_option(arg) for arg in sys.argv[2:] if arg.startswith('--')]\n try:\n main(cmd, args=sys.argv[2:], options=dict(opts))\n except MkDocsException as e:\n print(e.args[0], file=sys.stderr)\n\n\nif __name__ == '__main__':\n run_main()\n", "path": "mkdocs/main.py"}]}
| 1,816 | 346 |
gh_patches_debug_63962
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-1678
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CI run to install the built package
In light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package.
CI run to install the built package
In light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tasks.py`
Content:
```
1 import os
2 import shutil
3 from invoke import task, run
4
5 with open('tox.ini') as fp:
6 lines = fp.read().split("\n")
7 dockers = [line.split("=")[1].strip() for line in lines
8 if line.find("name") != -1]
9
10
11 @task
12 def devenv(c):
13 """Builds a development environment: downloads, and starts all dockers
14 specified in the tox.ini file.
15 """
16 clean(c)
17 cmd = 'tox -e devenv'
18 for d in dockers:
19 cmd += " --docker-dont-stop={}".format(d)
20 run(cmd)
21
22
23 @task
24 def linters(c):
25 """Run code linters"""
26 run("tox -e linters")
27
28
29 @task
30 def all_tests(c):
31 """Run all linters, and tests in redis-py. This assumes you have all
32 the python versions specified in the tox.ini file.
33 """
34 linters(c)
35 tests(c)
36
37
38 @task
39 def tests(c):
40 """Run the redis-py test suite against the current python,
41 with and without hiredis.
42 """
43 run("tox -e plain -e hiredis")
44
45
46 @task
47 def clean(c):
48 """Stop all dockers, and clean up the built binaries, if generated."""
49 if os.path.isdir("build"):
50 shutil.rmtree("build")
51 if os.path.isdir("dist"):
52 shutil.rmtree("dist")
53 run("docker rm -f {}".format(' '.join(dockers)))
54
55
56 @task
57 def package(c):
58 """Create the python packages"""
59 run("python setup.py build install")
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tasks.py b/tasks.py
--- a/tasks.py
+++ b/tasks.py
@@ -56,4 +56,4 @@
@task
def package(c):
"""Create the python packages"""
- run("python setup.py build install")
+ run("python setup.py sdist bdist_wheel")
|
{"golden_diff": "diff --git a/tasks.py b/tasks.py\n--- a/tasks.py\n+++ b/tasks.py\n@@ -56,4 +56,4 @@\n @task\n def package(c):\n \"\"\"Create the python packages\"\"\"\n- run(\"python setup.py build install\")\n+ run(\"python setup.py sdist bdist_wheel\")\n", "issue": "CI run to install the built package\nIn light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package.\nCI run to install the built package\nIn light of bug #1645 we should amend our CI run to install the built package, in a new virtual env and run something simple like a redis.Redis().ping(). Eventually we could build up to running the full integration test against the package.\n", "before_files": [{"content": "import os\nimport shutil\nfrom invoke import task, run\n\nwith open('tox.ini') as fp:\n lines = fp.read().split(\"\\n\")\n dockers = [line.split(\"=\")[1].strip() for line in lines\n if line.find(\"name\") != -1]\n\n\n@task\ndef devenv(c):\n \"\"\"Builds a development environment: downloads, and starts all dockers\n specified in the tox.ini file.\n \"\"\"\n clean(c)\n cmd = 'tox -e devenv'\n for d in dockers:\n cmd += \" --docker-dont-stop={}\".format(d)\n run(cmd)\n\n\n@task\ndef linters(c):\n \"\"\"Run code linters\"\"\"\n run(\"tox -e linters\")\n\n\n@task\ndef all_tests(c):\n \"\"\"Run all linters, and tests in redis-py. This assumes you have all\n the python versions specified in the tox.ini file.\n \"\"\"\n linters(c)\n tests(c)\n\n\n@task\ndef tests(c):\n \"\"\"Run the redis-py test suite against the current python,\n with and without hiredis.\n \"\"\"\n run(\"tox -e plain -e hiredis\")\n\n\n@task\ndef clean(c):\n \"\"\"Stop all dockers, and clean up the built binaries, if generated.\"\"\"\n if os.path.isdir(\"build\"):\n shutil.rmtree(\"build\")\n if os.path.isdir(\"dist\"):\n shutil.rmtree(\"dist\")\n run(\"docker rm -f {}\".format(' '.join(dockers)))\n\n\n@task\ndef package(c):\n \"\"\"Create the python packages\"\"\"\n run(\"python setup.py build install\")\n", "path": "tasks.py"}], "after_files": [{"content": "import os\nimport shutil\nfrom invoke import task, run\n\nwith open('tox.ini') as fp:\n lines = fp.read().split(\"\\n\")\n dockers = [line.split(\"=\")[1].strip() for line in lines\n if line.find(\"name\") != -1]\n\n\n@task\ndef devenv(c):\n \"\"\"Builds a development environment: downloads, and starts all dockers\n specified in the tox.ini file.\n \"\"\"\n clean(c)\n cmd = 'tox -e devenv'\n for d in dockers:\n cmd += \" --docker-dont-stop={}\".format(d)\n run(cmd)\n\n\n@task\ndef linters(c):\n \"\"\"Run code linters\"\"\"\n run(\"tox -e linters\")\n\n\n@task\ndef all_tests(c):\n \"\"\"Run all linters, and tests in redis-py. This assumes you have all\n the python versions specified in the tox.ini file.\n \"\"\"\n linters(c)\n tests(c)\n\n\n@task\ndef tests(c):\n \"\"\"Run the redis-py test suite against the current python,\n with and without hiredis.\n \"\"\"\n run(\"tox -e plain -e hiredis\")\n\n\n@task\ndef clean(c):\n \"\"\"Stop all dockers, and clean up the built binaries, if generated.\"\"\"\n if os.path.isdir(\"build\"):\n shutil.rmtree(\"build\")\n if os.path.isdir(\"dist\"):\n shutil.rmtree(\"dist\")\n run(\"docker rm -f {}\".format(' '.join(dockers)))\n\n\n@task\ndef package(c):\n \"\"\"Create the python packages\"\"\"\n run(\"python setup.py sdist bdist_wheel\")\n", "path": "tasks.py"}]}
| 849 | 69 |
gh_patches_debug_32160
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3322
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider holiday_stationstores is broken
During the global build at 2021-08-18-14-42-26, spider **holiday_stationstores** failed with **552 features** and **10 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/holiday_stationstores.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/holiday_stationstores.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8
9 class HolidayStationstoreSpider(scrapy.Spider):
10 name = "holiday_stationstores"
11 item_attributes = {'brand': 'Holiday Stationstores',
12 'brand_wikidata': 'Q5880490'}
13 allowed_domains = ["www.holidaystationstores.com"]
14 download_delay = 0.2
15
16 def start_requests(self):
17 yield scrapy.Request('https://www.holidaystationstores.com/Locations/GetAllStores',
18 method='POST',
19 callback=self.parse_all_stores)
20
21 def parse_all_stores(self, response):
22 all_stores = json.loads(response.text)
23
24 for store_id, store in all_stores.items():
25 # GET requests get blocked by their Incapsula bot protection, but POST works fine
26 yield scrapy.Request(f"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}",
27 method='POST',
28 meta={'store': store})
29
30 def parse(self, response):
31 store = response.meta['store']
32
33 address = response.xpath('//div[@class="col-lg-4 col-sm-12"]/text()')[1].extract().strip()
34 phone = response.xpath('//div[@class="HolidayFontColorRed"]/text()').extract_first().strip()
35 services = '|'.join(response.xpath('//ul[@style="list-style-type: none; padding-left: 1.0em; font-size: 12px;"]/li/text()').extract()).lower()
36 open_24_hours = '24 hours' in response.css(
37 '.body-content .col-lg-4').get().lower()
38
39 properties = {
40 'name': f"Holiday #{store['Name']}",
41 'lon': store['Lng'],
42 'lat': store['Lat'],
43 'addr_full': address,
44 'phone': phone,
45 'ref': store['ID'],
46 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),
47 'extras': {
48 'amenity:fuel': True,
49 'fuel:diesel': 'diesel' in services or None,
50 'atm': 'atm' in services or None,
51 'fuel:e85': 'e85' in services or None,
52 'hgv': 'truck' in services or None,
53 'fuel:propane': 'propane' in services or None,
54 'car_wash': 'car wash' in services or None,
55 'fuel:cng': 'cng' in services or None
56 }
57 }
58
59 yield GeojsonPointItem(**properties)
60
61 def opening_hours(self, response):
62 hour_part_elems = response.xpath('//div[@class="row"][@style="font-size: 12px;"]')
63 day_groups = []
64 this_day_group = None
65
66 if hour_part_elems:
67 for hour_part_elem in hour_part_elems:
68 day = hour_part_elem.xpath('.//div[@class="col-3"]/text()').extract_first()
69 hours = hour_part_elem.xpath('.//div[@class="col-9"]/text()').extract_first()
70
71 if not hours:
72 continue
73
74 day = day[:2]
75 match = re.search(
76 r'^(\d{1,2}):(\d{2})\s*(a|p)m - (\d{1,2}):(\d{2})\s*(a|p)m?$', hours.lower())
77 (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()
78
79 f_hr = int(f_hr)
80 if f_ampm == 'p':
81 f_hr += 12
82 elif f_ampm == 'a' and f_hr == 12:
83 f_hr = 0
84 t_hr = int(t_hr)
85 if t_ampm == 'p':
86 t_hr += 12
87 elif t_ampm == 'a' and t_hr == 12:
88 t_hr = 0
89
90 hours = '{:02d}:{}-{:02d}:{}'.format(
91 f_hr,
92 f_min,
93 t_hr,
94 t_min,
95 )
96
97 if not this_day_group:
98 this_day_group = {
99 'from_day': day,
100 'to_day': day,
101 'hours': hours
102 }
103 elif this_day_group['hours'] != hours:
104 day_groups.append(this_day_group)
105 this_day_group = {
106 'from_day': day,
107 'to_day': day,
108 'hours': hours
109 }
110 elif this_day_group['hours'] == hours:
111 this_day_group['to_day'] = day
112
113 if this_day_group:
114 day_groups.append(this_day_group)
115
116 hour_part_elems = response.xpath('//span[@style="font-size:90%"]/text()').extract()
117 if hour_part_elems:
118 day_groups.append({'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})
119
120 opening_hours = ""
121 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
122 opening_hours = '24/7'
123 else:
124 for day_group in day_groups:
125 if day_group['from_day'] == day_group['to_day']:
126 opening_hours += '{from_day} {hours}; '.format(**day_group)
127 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
128 opening_hours += '{hours}; '.format(**day_group)
129 else:
130 opening_hours += '{from_day}-{to_day} {hours}; '.format(
131 **day_group)
132 opening_hours = opening_hours[:-2]
133
134 return opening_hours
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/holiday_stationstores.py b/locations/spiders/holiday_stationstores.py
--- a/locations/spiders/holiday_stationstores.py
+++ b/locations/spiders/holiday_stationstores.py
@@ -31,6 +31,8 @@
store = response.meta['store']
address = response.xpath('//div[@class="col-lg-4 col-sm-12"]/text()')[1].extract().strip()
+ city_state = response.xpath('//div[@class="col-lg-4 col-sm-12"]/text()')[2].extract().strip()
+ city, state = city_state.split(", ")
phone = response.xpath('//div[@class="HolidayFontColorRed"]/text()').extract_first().strip()
services = '|'.join(response.xpath('//ul[@style="list-style-type: none; padding-left: 1.0em; font-size: 12px;"]/li/text()').extract()).lower()
open_24_hours = '24 hours' in response.css(
@@ -43,6 +45,9 @@
'addr_full': address,
'phone': phone,
'ref': store['ID'],
+ 'city': city.strip(),
+ 'state': state.strip(),
+ 'website': response.url,
'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),
'extras': {
'amenity:fuel': True,
@@ -68,7 +73,7 @@
day = hour_part_elem.xpath('.//div[@class="col-3"]/text()').extract_first()
hours = hour_part_elem.xpath('.//div[@class="col-9"]/text()').extract_first()
- if not hours:
+ if not hours or hours.lower() == 'closed':
continue
day = day[:2]
|
{"golden_diff": "diff --git a/locations/spiders/holiday_stationstores.py b/locations/spiders/holiday_stationstores.py\n--- a/locations/spiders/holiday_stationstores.py\n+++ b/locations/spiders/holiday_stationstores.py\n@@ -31,6 +31,8 @@\n store = response.meta['store']\n \n address = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[1].extract().strip()\n+ city_state = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[2].extract().strip()\n+ city, state = city_state.split(\", \")\n phone = response.xpath('//div[@class=\"HolidayFontColorRed\"]/text()').extract_first().strip()\n services = '|'.join(response.xpath('//ul[@style=\"list-style-type: none; padding-left: 1.0em; font-size: 12px;\"]/li/text()').extract()).lower()\n open_24_hours = '24 hours' in response.css(\n@@ -43,6 +45,9 @@\n 'addr_full': address,\n 'phone': phone,\n 'ref': store['ID'],\n+ 'city': city.strip(),\n+ 'state': state.strip(),\n+ 'website': response.url,\n 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),\n 'extras': {\n 'amenity:fuel': True,\n@@ -68,7 +73,7 @@\n day = hour_part_elem.xpath('.//div[@class=\"col-3\"]/text()').extract_first()\n hours = hour_part_elem.xpath('.//div[@class=\"col-9\"]/text()').extract_first()\n \n- if not hours:\n+ if not hours or hours.lower() == 'closed':\n continue\n \n day = day[:2]\n", "issue": "Spider holiday_stationstores is broken\nDuring the global build at 2021-08-18-14-42-26, spider **holiday_stationstores** failed with **552 features** and **10 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/holiday_stationstores.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/holiday_stationstores.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass HolidayStationstoreSpider(scrapy.Spider):\n name = \"holiday_stationstores\"\n item_attributes = {'brand': 'Holiday Stationstores',\n 'brand_wikidata': 'Q5880490'}\n allowed_domains = [\"www.holidaystationstores.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n yield scrapy.Request('https://www.holidaystationstores.com/Locations/GetAllStores',\n method='POST',\n callback=self.parse_all_stores)\n\n def parse_all_stores(self, response):\n all_stores = json.loads(response.text)\n\n for store_id, store in all_stores.items():\n # GET requests get blocked by their Incapsula bot protection, but POST works fine\n yield scrapy.Request(f\"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}\",\n method='POST',\n meta={'store': store})\n\n def parse(self, response):\n store = response.meta['store']\n\n address = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[1].extract().strip()\n phone = response.xpath('//div[@class=\"HolidayFontColorRed\"]/text()').extract_first().strip()\n services = '|'.join(response.xpath('//ul[@style=\"list-style-type: none; padding-left: 1.0em; font-size: 12px;\"]/li/text()').extract()).lower()\n open_24_hours = '24 hours' in response.css(\n '.body-content .col-lg-4').get().lower()\n\n properties = {\n 'name': f\"Holiday #{store['Name']}\",\n 'lon': store['Lng'],\n 'lat': store['Lat'],\n 'addr_full': address,\n 'phone': phone,\n 'ref': store['ID'],\n 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),\n 'extras': {\n 'amenity:fuel': True,\n 'fuel:diesel': 'diesel' in services or None,\n 'atm': 'atm' in services or None,\n 'fuel:e85': 'e85' in services or None,\n 'hgv': 'truck' in services or None,\n 'fuel:propane': 'propane' in services or None,\n 'car_wash': 'car wash' in services or None,\n 'fuel:cng': 'cng' in services or None\n }\n }\n\n yield GeojsonPointItem(**properties)\n\n def opening_hours(self, response):\n hour_part_elems = response.xpath('//div[@class=\"row\"][@style=\"font-size: 12px;\"]')\n day_groups = []\n this_day_group = None\n\n if hour_part_elems:\n for hour_part_elem in hour_part_elems:\n day = hour_part_elem.xpath('.//div[@class=\"col-3\"]/text()').extract_first()\n hours = hour_part_elem.xpath('.//div[@class=\"col-9\"]/text()').extract_first()\n\n if not hours:\n continue\n\n day = day[:2]\n match = re.search(\n r'^(\\d{1,2}):(\\d{2})\\s*(a|p)m - (\\d{1,2}):(\\d{2})\\s*(a|p)m?$', hours.lower())\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n\n f_hr = int(f_hr)\n if f_ampm == 'p':\n f_hr += 12\n elif f_ampm == 'a' and f_hr == 12:\n f_hr = 0\n t_hr = int(t_hr)\n if t_ampm == 'p':\n t_hr += 12\n elif t_ampm == 'a' and t_hr == 12:\n t_hr = 0\n\n hours = '{:02d}:{}-{:02d}:{}'.format(\n f_hr,\n f_min,\n t_hr,\n t_min,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n if this_day_group:\n day_groups.append(this_day_group)\n\n hour_part_elems = response.xpath('//span[@style=\"font-size:90%\"]/text()').extract()\n if hour_part_elems:\n day_groups.append({'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(\n **day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n", "path": "locations/spiders/holiday_stationstores.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass HolidayStationstoreSpider(scrapy.Spider):\n name = \"holiday_stationstores\"\n item_attributes = {'brand': 'Holiday Stationstores',\n 'brand_wikidata': 'Q5880490'}\n allowed_domains = [\"www.holidaystationstores.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n yield scrapy.Request('https://www.holidaystationstores.com/Locations/GetAllStores',\n method='POST',\n callback=self.parse_all_stores)\n\n def parse_all_stores(self, response):\n all_stores = json.loads(response.text)\n\n for store_id, store in all_stores.items():\n # GET requests get blocked by their Incapsula bot protection, but POST works fine\n yield scrapy.Request(f\"https://www.holidaystationstores.com/Locations/Detail?storeNumber={store_id}\",\n method='POST',\n meta={'store': store})\n\n def parse(self, response):\n store = response.meta['store']\n\n address = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[1].extract().strip()\n city_state = response.xpath('//div[@class=\"col-lg-4 col-sm-12\"]/text()')[2].extract().strip()\n city, state = city_state.split(\", \")\n phone = response.xpath('//div[@class=\"HolidayFontColorRed\"]/text()').extract_first().strip()\n services = '|'.join(response.xpath('//ul[@style=\"list-style-type: none; padding-left: 1.0em; font-size: 12px;\"]/li/text()').extract()).lower()\n open_24_hours = '24 hours' in response.css(\n '.body-content .col-lg-4').get().lower()\n\n properties = {\n 'name': f\"Holiday #{store['Name']}\",\n 'lon': store['Lng'],\n 'lat': store['Lat'],\n 'addr_full': address,\n 'phone': phone,\n 'ref': store['ID'],\n 'city': city.strip(),\n 'state': state.strip(),\n 'website': response.url,\n 'opening_hours': '24/7' if open_24_hours else self.opening_hours(response),\n 'extras': {\n 'amenity:fuel': True,\n 'fuel:diesel': 'diesel' in services or None,\n 'atm': 'atm' in services or None,\n 'fuel:e85': 'e85' in services or None,\n 'hgv': 'truck' in services or None,\n 'fuel:propane': 'propane' in services or None,\n 'car_wash': 'car wash' in services or None,\n 'fuel:cng': 'cng' in services or None\n }\n }\n\n yield GeojsonPointItem(**properties)\n\n def opening_hours(self, response):\n hour_part_elems = response.xpath('//div[@class=\"row\"][@style=\"font-size: 12px;\"]')\n day_groups = []\n this_day_group = None\n\n if hour_part_elems:\n for hour_part_elem in hour_part_elems:\n day = hour_part_elem.xpath('.//div[@class=\"col-3\"]/text()').extract_first()\n hours = hour_part_elem.xpath('.//div[@class=\"col-9\"]/text()').extract_first()\n\n if not hours or hours.lower() == 'closed':\n continue\n\n day = day[:2]\n match = re.search(\n r'^(\\d{1,2}):(\\d{2})\\s*(a|p)m - (\\d{1,2}):(\\d{2})\\s*(a|p)m?$', hours.lower())\n (f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()\n\n f_hr = int(f_hr)\n if f_ampm == 'p':\n f_hr += 12\n elif f_ampm == 'a' and f_hr == 12:\n f_hr = 0\n t_hr = int(t_hr)\n if t_ampm == 'p':\n t_hr += 12\n elif t_ampm == 'a' and t_hr == 12:\n t_hr = 0\n\n hours = '{:02d}:{}-{:02d}:{}'.format(\n f_hr,\n f_min,\n t_hr,\n t_min,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day,\n 'to_day': day,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day\n\n if this_day_group:\n day_groups.append(this_day_group)\n\n hour_part_elems = response.xpath('//span[@style=\"font-size:90%\"]/text()').extract()\n if hour_part_elems:\n day_groups.append({'from_day': 'Mo', 'to_day': 'Su', 'hours': '00:00-23:59'})\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(\n **day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n", "path": "locations/spiders/holiday_stationstores.py"}]}
| 2,040 | 407 |
gh_patches_debug_1769
|
rasdani/github-patches
|
git_diff
|
googleapis__google-auth-library-python-697
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing pyOpenSSL Dependency
Thanks for stopping by to let us know something could be better!
**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.
Please run down the following list and make sure you've tried the usual "quick fixes":
- Search the issues already opened: https://github.com/googleapis/google-auth-library-python/issues
If you are still having issues, please be sure to include as much information as possible:
#### Environment details
- OS:
- Python version:
- pip version:
- `google-auth` version:
#### Steps to reproduce
1. Missing pyOpenSSL dependency in setup.py
For the tests there is a requirement in https://github.com/googleapis/google-auth-library-python/blob/master/noxfile.py against pyOpenSSL. But there are imports for pyOpenSSL in multiple modules in the code. Should pyOpenSSL be added to the requirements in setup.py?
I created https://github.com/googleapis/google-auth-library-python/pull/550 with the proposal but wanted to get feedback from an issue first as I don't see this showing up in previous issues or pull requests.
Making sure to follow these steps will guarantee the quickest resolution possible.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2014 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16
17 from setuptools import find_packages
18 from setuptools import setup
19
20
21 DEPENDENCIES = (
22 "cachetools>=2.0.0,<5.0",
23 "pyasn1-modules>=0.2.1",
24 # rsa==4.5 is the last version to support 2.7
25 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233
26 'rsa<4.6; python_version < "3.6"',
27 'rsa>=3.1.4,<5; python_version >= "3.6"',
28 "setuptools>=40.3.0",
29 "six>=1.9.0",
30 )
31
32 extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"}
33
34 with io.open("README.rst", "r") as fh:
35 long_description = fh.read()
36
37 version = "1.26.1"
38
39 setup(
40 name="google-auth",
41 version=version,
42 author="Google Cloud Platform",
43 author_email="[email protected]",
44 description="Google Authentication Library",
45 long_description=long_description,
46 url="https://github.com/googleapis/google-auth-library-python",
47 packages=find_packages(exclude=("tests*", "system_tests*")),
48 namespace_packages=("google",),
49 install_requires=DEPENDENCIES,
50 extras_require=extras,
51 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*",
52 license="Apache 2.0",
53 keywords="google auth oauth client",
54 classifiers=[
55 "Programming Language :: Python :: 2",
56 "Programming Language :: Python :: 2.7",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.6",
59 "Programming Language :: Python :: 3.7",
60 "Programming Language :: Python :: 3.8",
61 "Programming Language :: Python :: 3.9",
62 "Development Status :: 5 - Production/Stable",
63 "Intended Audience :: Developers",
64 "License :: OSI Approved :: Apache Software License",
65 "Operating System :: POSIX",
66 "Operating System :: Microsoft :: Windows",
67 "Operating System :: MacOS :: MacOS X",
68 "Operating System :: OS Independent",
69 "Topic :: Internet :: WWW/HTTP",
70 ],
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,10 @@
"six>=1.9.0",
)
-extras = {"aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'"}
+extras = {
+ "aiohttp": "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'",
+ "pyopenssl": "pyopenssl>=20.0.0",
+}
with io.open("README.rst", "r") as fh:
long_description = fh.read()
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,10 @@\n \"six>=1.9.0\",\n )\n \n-extras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n+extras = {\n+ \"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n+ \"pyopenssl\": \"pyopenssl>=20.0.0\",\n+}\n \n with io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n", "issue": "Missing pyOpenSSL Dependency\nThanks for stopping by to let us know something could be better!\r\n\r\n**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response.\r\n\r\nPlease run down the following list and make sure you've tried the usual \"quick fixes\":\r\n\r\n - Search the issues already opened: https://github.com/googleapis/google-auth-library-python/issues\r\n\r\nIf you are still having issues, please be sure to include as much information as possible:\r\n\r\n#### Environment details\r\n\r\n - OS:\r\n - Python version:\r\n - pip version:\r\n - `google-auth` version:\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Missing pyOpenSSL dependency in setup.py\r\n\r\nFor the tests there is a requirement in https://github.com/googleapis/google-auth-library-python/blob/master/noxfile.py against pyOpenSSL. But there are imports for pyOpenSSL in multiple modules in the code. Should pyOpenSSL be added to the requirements in setup.py?\r\n\r\nI created https://github.com/googleapis/google-auth-library-python/pull/550 with the proposal but wanted to get feedback from an issue first as I don't see this showing up in previous issues or pull requests.\r\n\r\nMaking sure to follow these steps will guarantee the quickest resolution possible.\r\n\r\nThanks!\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\"}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.26.1\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nversion = \"1.26.1\"\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
| 1,351 | 164 |
gh_patches_debug_12271
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3464
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tagging CloudFormation - Error: Parameters must have value
Initially the stack is created with an input parameter.
**c7n policy**
```
policies:
- name: add-cfn-tag
resource: cfn
filters:
- "tag:testcfn": present
actions:
- type: tag
value: abc
key: BusinessUnit
```
**Error**
An error occurred (ValidationError) when calling the UpdateStack operation: Parameters: [input_param] must have values
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/cfn.py`
Content:
```
1 # Copyright 2015-2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import logging
17
18 from concurrent.futures import as_completed
19
20 from c7n.actions import BaseAction
21 from c7n.manager import resources
22 from c7n.query import QueryResourceManager
23 from c7n.utils import local_session, type_schema
24 from c7n.tags import RemoveTag, Tag
25
26 log = logging.getLogger('custodian.cfn')
27
28
29 @resources.register('cfn')
30 class CloudFormation(QueryResourceManager):
31
32 class resource_type(object):
33 service = 'cloudformation'
34 type = 'stack'
35 enum_spec = ('describe_stacks', 'Stacks[]', None)
36 id = 'StackName'
37 filter_name = 'StackName'
38 filter_type = 'scalar'
39 name = 'StackName'
40 date = 'CreationTime'
41 dimension = None
42 config_type = 'AWS::CloudFormation::Stack'
43
44
45 @CloudFormation.action_registry.register('delete')
46 class Delete(BaseAction):
47 """Action to delete cloudformation stacks
48
49 It is recommended to use a filter to avoid unwanted deletion of stacks
50
51 :example:
52
53 .. code-block:: yaml
54
55 policies:
56 - name: cloudformation-delete-failed-stacks
57 resource: cfn
58 filters:
59 - StackStatus: ROLLBACK_COMPLETE
60 actions:
61 - delete
62 """
63
64 schema = type_schema('delete')
65 permissions = ("cloudformation:DeleteStack",)
66
67 def process(self, stacks):
68 with self.executor_factory(max_workers=10) as w:
69 list(w.map(self.process_stacks, stacks))
70
71 def process_stacks(self, stack):
72 client = local_session(
73 self.manager.session_factory).client('cloudformation')
74 client.delete_stack(StackName=stack['StackName'])
75
76
77 @CloudFormation.action_registry.register('set-protection')
78 class SetProtection(BaseAction):
79 """Action to disable termination protection
80
81 It is recommended to use a filter to avoid unwanted deletion of stacks
82
83 :example:
84
85 .. code-block:: yaml
86
87 policies:
88 - name: cloudformation-disable-protection
89 resource: cfn
90 filters:
91 - StackStatus: CREATE_COMPLETE
92 actions:
93 - type: set-protection
94 state: False
95 """
96
97 schema = type_schema(
98 'set-protection', state={'type': 'boolean', 'default': False})
99
100 permissions = ('cloudformation:UpdateStack',)
101
102 def process(self, stacks):
103 client = local_session(
104 self.manager.session_factory).client('cloudformation')
105
106 with self.executor_factory(max_workers=3) as w:
107 futures = {}
108 for s in stacks:
109 futures[w.submit(self.process_stacks, client, s)] = s
110 for f in as_completed(futures):
111 s = futures[f]
112 if f.exception():
113 self.log.error(
114 "Error updating protection stack:%s error:%s",
115 s['StackName'], f.exception())
116
117 def process_stacks(self, client, stack):
118 client.update_termination_protection(
119 EnableTerminationProtection=self.data.get('state', False),
120 StackName=stack['StackName'])
121
122
123 @CloudFormation.action_registry.register('tag')
124 class CloudFormationAddTag(Tag):
125 """Action to tag a cloudformation stack
126
127 :example:
128
129 .. code-block: yaml
130
131 policies:
132 - name: add-cfn-tag
133 resource: cfn
134 filters:
135 - 'tag:DesiredTag': absent
136 actions:
137 - type: tag
138 key: DesiredTag
139 value: DesiredValue
140 """
141 permissions = ('cloudformation:UpdateStack',)
142
143 def process_resource_set(self, stacks, tags):
144 client = local_session(
145 self.manager.session_factory).client('cloudformation')
146
147 def _tag_stacks(s):
148 client.update_stack(
149 StackName=s['StackName'],
150 UsePreviousTemplate=True,
151 Tags=tags)
152
153 with self.executor_factory(max_workers=2) as w:
154 list(w.map(_tag_stacks, stacks))
155
156
157 @CloudFormation.action_registry.register('remove-tag')
158 class CloudFormationRemoveTag(RemoveTag):
159 """Action to remove tags from a cloudformation stack
160
161 :example:
162
163 .. code-block: yaml
164
165 policies:
166 - name: add-cfn-tag
167 resource: cfn
168 filters:
169 - 'tag:DesiredTag': present
170 actions:
171 - type: remove-tag
172 tags: ['DesiredTag']
173 """
174
175 def process_resource_set(self, stacks, keys):
176 client = local_session(
177 self.manager.session_factory).client('cloudformation')
178
179 def _remove_tag(s):
180 tags = [t for t in s['Tags'] if t['Key'] not in keys]
181 client.update_stack(
182 StackName=s['StackName'],
183 UsePreviousTemplate=True,
184 Tags=tags)
185
186 with self.executor_factory(max_workers=2) as w:
187 list(w.map(_remove_tag, stacks))
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/c7n/resources/cfn.py b/c7n/resources/cfn.py
--- a/c7n/resources/cfn.py
+++ b/c7n/resources/cfn.py
@@ -145,9 +145,14 @@
self.manager.session_factory).client('cloudformation')
def _tag_stacks(s):
+ params = []
+ for p in s.get('Parameters', []):
+ params.append({'ParameterKey': p['ParameterKey'],
+ 'UsePreviousValue': True})
client.update_stack(
StackName=s['StackName'],
UsePreviousTemplate=True,
+ Parameters=params,
Tags=tags)
with self.executor_factory(max_workers=2) as w:
|
{"golden_diff": "diff --git a/c7n/resources/cfn.py b/c7n/resources/cfn.py\n--- a/c7n/resources/cfn.py\n+++ b/c7n/resources/cfn.py\n@@ -145,9 +145,14 @@\n self.manager.session_factory).client('cloudformation')\n \n def _tag_stacks(s):\n+ params = []\n+ for p in s.get('Parameters', []):\n+ params.append({'ParameterKey': p['ParameterKey'],\n+ 'UsePreviousValue': True})\n client.update_stack(\n StackName=s['StackName'],\n UsePreviousTemplate=True,\n+ Parameters=params,\n Tags=tags)\n \n with self.executor_factory(max_workers=2) as w:\n", "issue": "Tagging CloudFormation - Error: Parameters must have value\nInitially the stack is created with an input parameter.\r\n\r\n**c7n policy**\r\n```\r\npolicies:\r\n - name: add-cfn-tag\r\n resource: cfn\r\n filters:\r\n - \"tag:testcfn\": present\r\n actions:\r\n - type: tag\r\n value: abc\r\n key: BusinessUnit\r\n```\r\n**Error**\r\nAn error occurred (ValidationError) when calling the UpdateStack operation: Parameters: [input_param] must have values\n", "before_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom concurrent.futures import as_completed\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema\nfrom c7n.tags import RemoveTag, Tag\n\nlog = logging.getLogger('custodian.cfn')\n\n\[email protected]('cfn')\nclass CloudFormation(QueryResourceManager):\n\n class resource_type(object):\n service = 'cloudformation'\n type = 'stack'\n enum_spec = ('describe_stacks', 'Stacks[]', None)\n id = 'StackName'\n filter_name = 'StackName'\n filter_type = 'scalar'\n name = 'StackName'\n date = 'CreationTime'\n dimension = None\n config_type = 'AWS::CloudFormation::Stack'\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"Action to delete cloudformation stacks\n\n It is recommended to use a filter to avoid unwanted deletion of stacks\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudformation-delete-failed-stacks\n resource: cfn\n filters:\n - StackStatus: ROLLBACK_COMPLETE\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cloudformation:DeleteStack\",)\n\n def process(self, stacks):\n with self.executor_factory(max_workers=10) as w:\n list(w.map(self.process_stacks, stacks))\n\n def process_stacks(self, stack):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n client.delete_stack(StackName=stack['StackName'])\n\n\[email protected]_registry.register('set-protection')\nclass SetProtection(BaseAction):\n \"\"\"Action to disable termination protection\n\n It is recommended to use a filter to avoid unwanted deletion of stacks\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudformation-disable-protection\n resource: cfn\n filters:\n - StackStatus: CREATE_COMPLETE\n actions:\n - type: set-protection\n state: False\n \"\"\"\n\n schema = type_schema(\n 'set-protection', state={'type': 'boolean', 'default': False})\n\n permissions = ('cloudformation:UpdateStack',)\n\n def process(self, stacks):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n\n with self.executor_factory(max_workers=3) as w:\n futures = {}\n for s in stacks:\n futures[w.submit(self.process_stacks, client, s)] = s\n for f in as_completed(futures):\n s = futures[f]\n if f.exception():\n self.log.error(\n \"Error updating protection stack:%s error:%s\",\n s['StackName'], f.exception())\n\n def process_stacks(self, client, stack):\n client.update_termination_protection(\n EnableTerminationProtection=self.data.get('state', False),\n StackName=stack['StackName'])\n\n\[email protected]_registry.register('tag')\nclass CloudFormationAddTag(Tag):\n \"\"\"Action to tag a cloudformation stack\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: add-cfn-tag\n resource: cfn\n filters:\n - 'tag:DesiredTag': absent\n actions:\n - type: tag\n key: DesiredTag\n value: DesiredValue\n \"\"\"\n permissions = ('cloudformation:UpdateStack',)\n\n def process_resource_set(self, stacks, tags):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n\n def _tag_stacks(s):\n client.update_stack(\n StackName=s['StackName'],\n UsePreviousTemplate=True,\n Tags=tags)\n\n with self.executor_factory(max_workers=2) as w:\n list(w.map(_tag_stacks, stacks))\n\n\[email protected]_registry.register('remove-tag')\nclass CloudFormationRemoveTag(RemoveTag):\n \"\"\"Action to remove tags from a cloudformation stack\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: add-cfn-tag\n resource: cfn\n filters:\n - 'tag:DesiredTag': present\n actions:\n - type: remove-tag\n tags: ['DesiredTag']\n \"\"\"\n\n def process_resource_set(self, stacks, keys):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n\n def _remove_tag(s):\n tags = [t for t in s['Tags'] if t['Key'] not in keys]\n client.update_stack(\n StackName=s['StackName'],\n UsePreviousTemplate=True,\n Tags=tags)\n\n with self.executor_factory(max_workers=2) as w:\n list(w.map(_remove_tag, stacks))\n", "path": "c7n/resources/cfn.py"}], "after_files": [{"content": "# Copyright 2015-2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom concurrent.futures import as_completed\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema\nfrom c7n.tags import RemoveTag, Tag\n\nlog = logging.getLogger('custodian.cfn')\n\n\[email protected]('cfn')\nclass CloudFormation(QueryResourceManager):\n\n class resource_type(object):\n service = 'cloudformation'\n type = 'stack'\n enum_spec = ('describe_stacks', 'Stacks[]', None)\n id = 'StackName'\n filter_name = 'StackName'\n filter_type = 'scalar'\n name = 'StackName'\n date = 'CreationTime'\n dimension = None\n config_type = 'AWS::CloudFormation::Stack'\n\n\[email protected]_registry.register('delete')\nclass Delete(BaseAction):\n \"\"\"Action to delete cloudformation stacks\n\n It is recommended to use a filter to avoid unwanted deletion of stacks\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudformation-delete-failed-stacks\n resource: cfn\n filters:\n - StackStatus: ROLLBACK_COMPLETE\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cloudformation:DeleteStack\",)\n\n def process(self, stacks):\n with self.executor_factory(max_workers=10) as w:\n list(w.map(self.process_stacks, stacks))\n\n def process_stacks(self, stack):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n client.delete_stack(StackName=stack['StackName'])\n\n\[email protected]_registry.register('set-protection')\nclass SetProtection(BaseAction):\n \"\"\"Action to disable termination protection\n\n It is recommended to use a filter to avoid unwanted deletion of stacks\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudformation-disable-protection\n resource: cfn\n filters:\n - StackStatus: CREATE_COMPLETE\n actions:\n - type: set-protection\n state: False\n \"\"\"\n\n schema = type_schema(\n 'set-protection', state={'type': 'boolean', 'default': False})\n\n permissions = ('cloudformation:UpdateStack',)\n\n def process(self, stacks):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n\n with self.executor_factory(max_workers=3) as w:\n futures = {}\n for s in stacks:\n futures[w.submit(self.process_stacks, client, s)] = s\n for f in as_completed(futures):\n s = futures[f]\n if f.exception():\n self.log.error(\n \"Error updating protection stack:%s error:%s\",\n s['StackName'], f.exception())\n\n def process_stacks(self, client, stack):\n client.update_termination_protection(\n EnableTerminationProtection=self.data.get('state', False),\n StackName=stack['StackName'])\n\n\[email protected]_registry.register('tag')\nclass CloudFormationAddTag(Tag):\n \"\"\"Action to tag a cloudformation stack\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: add-cfn-tag\n resource: cfn\n filters:\n - 'tag:DesiredTag': absent\n actions:\n - type: tag\n key: DesiredTag\n value: DesiredValue\n \"\"\"\n permissions = ('cloudformation:UpdateStack',)\n\n def process_resource_set(self, stacks, tags):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n\n def _tag_stacks(s):\n params = []\n for p in s.get('Parameters', []):\n params.append({'ParameterKey': p['ParameterKey'],\n 'UsePreviousValue': True})\n client.update_stack(\n StackName=s['StackName'],\n UsePreviousTemplate=True,\n Parameters=params,\n Tags=tags)\n\n with self.executor_factory(max_workers=2) as w:\n list(w.map(_tag_stacks, stacks))\n\n\[email protected]_registry.register('remove-tag')\nclass CloudFormationRemoveTag(RemoveTag):\n \"\"\"Action to remove tags from a cloudformation stack\n\n :example:\n\n .. code-block: yaml\n\n policies:\n - name: add-cfn-tag\n resource: cfn\n filters:\n - 'tag:DesiredTag': present\n actions:\n - type: remove-tag\n tags: ['DesiredTag']\n \"\"\"\n\n def process_resource_set(self, stacks, keys):\n client = local_session(\n self.manager.session_factory).client('cloudformation')\n\n def _remove_tag(s):\n tags = [t for t in s['Tags'] if t['Key'] not in keys]\n client.update_stack(\n StackName=s['StackName'],\n UsePreviousTemplate=True,\n Tags=tags)\n\n with self.executor_factory(max_workers=2) as w:\n list(w.map(_remove_tag, stacks))\n", "path": "c7n/resources/cfn.py"}]}
| 2,041 | 157 |
gh_patches_debug_235
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1460
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logging configuration in contrib/utils
# Question
`pyhf.contrib.utils` sets up logging:
https://github.com/scikit-hep/pyhf/blob/6b769fd6f5e1473deba2b4c55d49ebdb3db5b447/src/pyhf/contrib/utils.py#L9
This interferes with custom logging users may want to set up. To achieve this now, they would have to do so before `from pyhf.contrib.utils import download`. To avoid this issue, the logging should not be configured in this part of the code (and only for the CLI).
# Relevant Issues and Pull Requests
#865
User-defined log formatting
# Description
`pyhf` uses `logging` for outputs, and calls `logging.basicConfig()` in a few places.
This has the effect of preventing the user to set their desired logging behavior after `pyhf` import.
While calling this a bug might be a bit of a stretch, I think it might be unintentional since `pyhf` does not apply any logging formatting as far as I can tell.
# Expected Behavior
I expect no calls to `logging.basicConfig()` within `pyhf` to leave the formatting fully up to the user, no matter whether they want to set it before or after importing `pyhf`.
# Actual Behavior
User-defined `logging` formatting only works before importing `pyhf`.
# Steps to Reproduce
importing `pyhf` before formatting:
```
import logging
import pyhf
print(pyhf.__version__)
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
log.info("message")
```
output:
```
0.4.1
```
and when applying formatting before input, the expected behavior:
```
import logging
logging.basicConfig(level=logging.INFO)
import pyhf
print(pyhf.__version__)
log = logging.getLogger(__name__)
log.info("message")
```
output:
```
0.4.1
INFO:__main__:message
```
# Checklist
- [ ] Run `git fetch` to get the most up to date version of `master`
- no, but checked code on master to confirm that the relevant part is unchanged
- [X] Searched through existing Issues to confirm this is not a duplicate issue
- [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/contrib/utils.py`
Content:
```
1 """Helper utilities for common tasks."""
2
3 from urllib.parse import urlparse
4 import tarfile
5 from io import BytesIO
6 import logging
7 from .. import exceptions
8
9 logging.basicConfig()
10 log = logging.getLogger(__name__)
11
12 __all__ = ["download"]
13
14
15 def __dir__():
16 return __all__
17
18
19 try:
20 import requests
21
22 def download(archive_url, output_directory, force=False, compress=False):
23 """
24 Download the patchset archive from the remote URL and extract it in a
25 directory at the path given.
26
27 Example:
28
29 >>> from pyhf.contrib.utils import download
30 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods")
31 >>> import os
32 >>> sorted(os.listdir("1Lbb-likelihoods"))
33 ['BkgOnly.json', 'README.md', 'patchset.json']
34 >>> download("https://doi.org/10.17182/hepdata.90607.v3/r3", "1Lbb-likelihoods.tar.gz", compress=True)
35 >>> import glob
36 >>> glob.glob("1Lbb-likelihoods.tar.gz")
37 ['1Lbb-likelihoods.tar.gz']
38
39 Args:
40 archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.
41 output_directory (:obj:`str`): Name of the directory to unpack the archive into.
42 force (:obj:`bool`): Force download from non-approved host. Default is ``False``.
43 compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.
44
45 Raises:
46 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
47 """
48 if not force:
49 valid_hosts = ["www.hepdata.net", "doi.org"]
50 netloc = urlparse(archive_url).netloc
51 if netloc not in valid_hosts:
52 raise exceptions.InvalidArchiveHost(
53 f"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\n"
54 + "To download an archive from this host use the --force option."
55 )
56
57 with requests.get(archive_url) as response:
58 if compress:
59 with open(output_directory, "wb") as archive:
60 archive.write(response.content)
61 else:
62 with tarfile.open(
63 mode="r|gz", fileobj=BytesIO(response.content)
64 ) as archive:
65 archive.extractall(output_directory)
66
67
68 except ModuleNotFoundError:
69 log.error(
70 "\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download"
71 + "\nPlease install with: python -m pip install pyhf[contrib]\n",
72 exc_info=True,
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py
--- a/src/pyhf/contrib/utils.py
+++ b/src/pyhf/contrib/utils.py
@@ -6,7 +6,6 @@
import logging
from .. import exceptions
-logging.basicConfig()
log = logging.getLogger(__name__)
__all__ = ["download"]
|
{"golden_diff": "diff --git a/src/pyhf/contrib/utils.py b/src/pyhf/contrib/utils.py\n--- a/src/pyhf/contrib/utils.py\n+++ b/src/pyhf/contrib/utils.py\n@@ -6,7 +6,6 @@\n import logging\n from .. import exceptions\n \n-logging.basicConfig()\n log = logging.getLogger(__name__)\n \n __all__ = [\"download\"]\n", "issue": "Logging configuration in contrib/utils\n# Question\r\n\r\n`pyhf.contrib.utils` sets up logging:\r\nhttps://github.com/scikit-hep/pyhf/blob/6b769fd6f5e1473deba2b4c55d49ebdb3db5b447/src/pyhf/contrib/utils.py#L9 \r\n\r\nThis interferes with custom logging users may want to set up. To achieve this now, they would have to do so before `from pyhf.contrib.utils import download`. To avoid this issue, the logging should not be configured in this part of the code (and only for the CLI).\r\n\r\n# Relevant Issues and Pull Requests\r\n\r\n#865\r\n\nUser-defined log formatting\n# Description\r\n\r\n`pyhf` uses `logging` for outputs, and calls `logging.basicConfig()` in a few places.\r\nThis has the effect of preventing the user to set their desired logging behavior after `pyhf` import.\r\nWhile calling this a bug might be a bit of a stretch, I think it might be unintentional since `pyhf` does not apply any logging formatting as far as I can tell.\r\n\r\n# Expected Behavior\r\n\r\nI expect no calls to `logging.basicConfig()` within `pyhf` to leave the formatting fully up to the user, no matter whether they want to set it before or after importing `pyhf`.\r\n\r\n# Actual Behavior\r\n\r\nUser-defined `logging` formatting only works before importing `pyhf`.\r\n\r\n# Steps to Reproduce\r\n\r\nimporting `pyhf` before formatting:\r\n```\r\nimport logging\r\nimport pyhf\r\nprint(pyhf.__version__)\r\nlogging.basicConfig(level=logging.INFO)\r\nlog = logging.getLogger(__name__)\r\nlog.info(\"message\")\r\n```\r\noutput:\r\n```\r\n0.4.1\r\n```\r\nand when applying formatting before input, the expected behavior:\r\n```\r\nimport logging\r\nlogging.basicConfig(level=logging.INFO)\r\nimport pyhf\r\nprint(pyhf.__version__)\r\nlog = logging.getLogger(__name__)\r\nlog.info(\"message\")\r\n```\r\noutput:\r\n```\r\n0.4.1\r\nINFO:__main__:message\r\n``` \r\n\r\n# Checklist\r\n\r\n- [ ] Run `git fetch` to get the most up to date version of `master`\r\n - no, but checked code on master to confirm that the relevant part is unchanged\r\n- [X] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [X] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom .. import exceptions\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n with requests.get(archive_url) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}], "after_files": [{"content": "\"\"\"Helper utilities for common tasks.\"\"\"\n\nfrom urllib.parse import urlparse\nimport tarfile\nfrom io import BytesIO\nimport logging\nfrom .. import exceptions\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"download\"]\n\n\ndef __dir__():\n return __all__\n\n\ntry:\n import requests\n\n def download(archive_url, output_directory, force=False, compress=False):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n >>> from pyhf.contrib.utils import download\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods\")\n >>> import os\n >>> sorted(os.listdir(\"1Lbb-likelihoods\"))\n ['BkgOnly.json', 'README.md', 'patchset.json']\n >>> download(\"https://doi.org/10.17182/hepdata.90607.v3/r3\", \"1Lbb-likelihoods.tar.gz\", compress=True)\n >>> import glob\n >>> glob.glob(\"1Lbb-likelihoods.tar.gz\")\n ['1Lbb-likelihoods.tar.gz']\n\n Args:\n archive_url (:obj:`str`): The URL of the :class:`~pyhf.patchset.PatchSet` archive to download.\n output_directory (:obj:`str`): Name of the directory to unpack the archive into.\n force (:obj:`bool`): Force download from non-approved host. Default is ``False``.\n compress (:obj:`bool`): Keep the archive in a compressed ``tar.gz`` form. Default is ``False``.\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n if not force:\n valid_hosts = [\"www.hepdata.net\", \"doi.org\"]\n netloc = urlparse(archive_url).netloc\n if netloc not in valid_hosts:\n raise exceptions.InvalidArchiveHost(\n f\"{netloc} is not an approved archive host: {', '.join(str(host) for host in valid_hosts)}\\n\"\n + \"To download an archive from this host use the --force option.\"\n )\n\n with requests.get(archive_url) as response:\n if compress:\n with open(output_directory, \"wb\") as archive:\n archive.write(response.content)\n else:\n with tarfile.open(\n mode=\"r|gz\", fileobj=BytesIO(response.content)\n ) as archive:\n archive.extractall(output_directory)\n\n\nexcept ModuleNotFoundError:\n log.error(\n \"\\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\",\n exc_info=True,\n )\n", "path": "src/pyhf/contrib/utils.py"}]}
| 1,550 | 77 |
gh_patches_debug_8585
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a job for Python 3.9 to .travis.yml
It looks like Travis support specifying such a Python version as `3.9-dev`.
While I’m not sure we should officially support Python 3.9 it until its release, running tests on it will allow us to catch any issue early.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 install_requires = [
22 'Twisted>=17.9.0',
23 'cryptography>=2.0',
24 'cssselect>=0.9.1',
25 'itemloaders>=1.0.1',
26 'parsel>=1.5.0',
27 'pyOpenSSL>=16.2.0',
28 'queuelib>=1.4.2',
29 'service_identity>=16.0.0',
30 'w3lib>=1.17.0',
31 'zope.interface>=4.1.3',
32 'protego>=0.1.15',
33 'itemadapter>=0.1.0',
34 ]
35 extras_require = {}
36 cpython_dependencies = [
37 'lxml>=3.5.0',
38 'PyDispatcher>=2.0.5',
39 ]
40 if has_environment_marker_platform_impl_support():
41 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
42 extras_require[':platform_python_implementation == "PyPy"'] = [
43 # Earlier lxml versions are affected by
44 # https://foss.heptapod.net/pypy/pypy/-/issues/2498,
45 # which was fixed in Cython 0.26, released on 2017-06-19, and used to
46 # generate the C headers of lxml release tarballs published since then, the
47 # first of which was:
48 'lxml>=4.0.0',
49 'PyPyDispatcher>=2.1.0',
50 ]
51 else:
52 install_requires.extend(cpython_dependencies)
53
54
55 setup(
56 name='Scrapy',
57 version=version,
58 url='https://scrapy.org',
59 project_urls={
60 'Documentation': 'https://docs.scrapy.org/',
61 'Source': 'https://github.com/scrapy/scrapy',
62 'Tracker': 'https://github.com/scrapy/scrapy/issues',
63 },
64 description='A high-level Web Crawling and Web Scraping framework',
65 long_description=open('README.rst').read(),
66 author='Scrapy developers',
67 maintainer='Pablo Hoffman',
68 maintainer_email='[email protected]',
69 license='BSD',
70 packages=find_packages(exclude=('tests', 'tests.*')),
71 include_package_data=True,
72 zip_safe=False,
73 entry_points={
74 'console_scripts': ['scrapy = scrapy.cmdline:execute']
75 },
76 classifiers=[
77 'Framework :: Scrapy',
78 'Development Status :: 5 - Production/Stable',
79 'Environment :: Console',
80 'Intended Audience :: Developers',
81 'License :: OSI Approved :: BSD License',
82 'Operating System :: OS Independent',
83 'Programming Language :: Python',
84 'Programming Language :: Python :: 3',
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Programming Language :: Python :: 3.8',
88 'Programming Language :: Python :: Implementation :: CPython',
89 'Programming Language :: Python :: Implementation :: PyPy',
90 'Topic :: Internet :: WWW/HTTP',
91 'Topic :: Software Development :: Libraries :: Application Frameworks',
92 'Topic :: Software Development :: Libraries :: Python Modules',
93 ],
94 python_requires='>=3.6',
95 install_requires=install_requires,
96 extras_require=extras_require,
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -85,6 +85,7 @@
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -85,6 +85,7 @@\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n+ 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n", "issue": "Add a job for Python 3.9 to .travis.yml\nIt looks like Travis support specifying such a Python version as `3.9-dev`.\r\n\r\nWhile I\u2019m not sure we should officially support Python 3.9 it until its release, running tests on it will allow us to catch any issue early.\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}], "after_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n]\nextras_require = {}\ncpython_dependencies = [\n 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
| 1,398 | 115 |
gh_patches_debug_1345
|
rasdani/github-patches
|
git_diff
|
castorini__pyserini-667
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch to jnius_config.add_classpath
Currently, pyserini replaces any previously registered jars on the classpath in its setup code. Is there any reason to not use add_classpath() instead of set_classpath()?
Here is the pyjnius relevant code:
```python
def set_classpath(*path):
"""
Sets the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable.
"""
check_vm_running()
global classpath
classpath = list(path)
def add_classpath(*path):
"""
Appends items to the classpath for the JVM to use.
Replaces any existing classpath, overriding the CLASSPATH environment variable.
"""
check_vm_running()
global classpath
if classpath is None:
classpath = list(path)
else:
classpath.extend(path)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyserini/setup.py`
Content:
```
1 #
2 # Pyserini: Reproducible IR research with sparse and dense representations
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 """
18 Module for adding Anserini jar to classpath for pyjnius usage
19 """
20
21 import glob
22 import os
23
24 import jnius_config
25
26
27 def configure_classpath(anserini_root="."):
28 """
29 Parameters
30 ----------
31 anserini_root : str
32 (Optional) path to root anserini directory.
33
34 """
35 paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar'))
36 if not paths:
37 raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))
38
39 latest = max(paths, key=os.path.getctime)
40 jnius_config.set_classpath(latest)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyserini/setup.py b/pyserini/setup.py
--- a/pyserini/setup.py
+++ b/pyserini/setup.py
@@ -37,4 +37,4 @@
raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))
latest = max(paths, key=os.path.getctime)
- jnius_config.set_classpath(latest)
+ jnius_config.add_classpath(latest)
|
{"golden_diff": "diff --git a/pyserini/setup.py b/pyserini/setup.py\n--- a/pyserini/setup.py\n+++ b/pyserini/setup.py\n@@ -37,4 +37,4 @@\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n \n latest = max(paths, key=os.path.getctime)\n- jnius_config.set_classpath(latest)\n+ jnius_config.add_classpath(latest)\n", "issue": "Switch to jnius_config.add_classpath\nCurrently, pyserini replaces any previously registered jars on the classpath in its setup code. Is there any reason to not use add_classpath() instead of set_classpath()?\r\n\r\nHere is the pyjnius relevant code:\r\n```python\r\ndef set_classpath(*path):\r\n \"\"\"\r\n Sets the classpath for the JVM to use. Replaces any existing classpath, overriding the CLASSPATH environment variable.\r\n \"\"\"\r\n check_vm_running()\r\n global classpath\r\n classpath = list(path)\r\n\r\n\r\ndef add_classpath(*path):\r\n \"\"\"\r\n Appends items to the classpath for the JVM to use.\r\n Replaces any existing classpath, overriding the CLASSPATH environment variable.\r\n \"\"\"\r\n check_vm_running()\r\n global classpath\r\n if classpath is None:\r\n classpath = list(path)\r\n else:\r\n classpath.extend(path)\r\n```\n", "before_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nModule for adding Anserini jar to classpath for pyjnius usage\n\"\"\"\n\nimport glob\nimport os\n\nimport jnius_config\n\n\ndef configure_classpath(anserini_root=\".\"):\n \"\"\"\n Parameters\n ----------\n anserini_root : str\n (Optional) path to root anserini directory.\n\n \"\"\"\n paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar'))\n if not paths:\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n\n latest = max(paths, key=os.path.getctime)\n jnius_config.set_classpath(latest)\n", "path": "pyserini/setup.py"}], "after_files": [{"content": "#\n# Pyserini: Reproducible IR research with sparse and dense representations\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nModule for adding Anserini jar to classpath for pyjnius usage\n\"\"\"\n\nimport glob\nimport os\n\nimport jnius_config\n\n\ndef configure_classpath(anserini_root=\".\"):\n \"\"\"\n Parameters\n ----------\n anserini_root : str\n (Optional) path to root anserini directory.\n\n \"\"\"\n paths = glob.glob(os.path.join(anserini_root, 'anserini-*-fatjar.jar'))\n if not paths:\n raise Exception('No matching jar file found in {}'.format(os.path.abspath(anserini_root)))\n\n latest = max(paths, key=os.path.getctime)\n jnius_config.add_classpath(latest)\n", "path": "pyserini/setup.py"}]}
| 803 | 101 |
gh_patches_debug_21488
|
rasdani/github-patches
|
git_diff
|
WeblateOrg__weblate-10189
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Add missing languages" add-on not working
### Describe the issue
I have enabled the "Add missing languages" add-on on https://hosted.weblate.org/projects/catima/. However, despite waiting over 24 hours as the documentation on https://docs.weblate.org/en/latest/admin/addons.html#addon-weblate-consistency-languages states, it has not put the different components of the same project in sync.
This is most noticeable when comparing https://hosted.weblate.org/projects/catima/catima/ with https://hosted.weblate.org/projects/catima/android-debug/
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
1. Enable the "Add missing languages" add-on in a project with multiple components where one component has less languages than the other
2. Wait at least 24 hours as the add-on states
### Expected behavior
All components have the same languages, missing languages on components get created
### Screenshots
Android component:

Android (Debug) component:

### Exception traceback
_No response_
### How do you run Weblate?
weblate.org service
### Weblate versions
_No response_
### Weblate deploy checks
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/addons/consistency.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from django.db.models import Q
6 from django.utils.translation import gettext_lazy
7
8 from weblate.addons.base import BaseAddon
9 from weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD
10 from weblate.addons.tasks import language_consistency
11 from weblate.lang.models import Language
12
13
14 class LangaugeConsistencyAddon(BaseAddon):
15 events = (EVENT_DAILY, EVENT_POST_ADD)
16 name = "weblate.consistency.languages"
17 verbose = gettext_lazy("Add missing languages")
18 description = gettext_lazy(
19 "Ensures a consistent set of languages is used for all components "
20 "within a project."
21 )
22 icon = "language.svg"
23 project_scope = True
24
25 def daily(self, component):
26 language_consistency.delay(
27 component.project_id,
28 list(
29 Language.objects.filter(
30 Q(translation__component=component) | Q(component=component)
31 ).values_list("pk", flat=True)
32 ),
33 )
34
35 def post_add(self, translation):
36 language_consistency.delay(
37 translation.component.project_id,
38 [translation.language_id],
39 )
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/weblate/addons/consistency.py b/weblate/addons/consistency.py
--- a/weblate/addons/consistency.py
+++ b/weblate/addons/consistency.py
@@ -2,13 +2,11 @@
#
# SPDX-License-Identifier: GPL-3.0-or-later
-from django.db.models import Q
from django.utils.translation import gettext_lazy
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD
from weblate.addons.tasks import language_consistency
-from weblate.lang.models import Language
class LangaugeConsistencyAddon(BaseAddon):
@@ -25,11 +23,7 @@
def daily(self, component):
language_consistency.delay(
component.project_id,
- list(
- Language.objects.filter(
- Q(translation__component=component) | Q(component=component)
- ).values_list("pk", flat=True)
- ),
+ [language.id for language in component.project.languages],
)
def post_add(self, translation):
|
{"golden_diff": "diff --git a/weblate/addons/consistency.py b/weblate/addons/consistency.py\n--- a/weblate/addons/consistency.py\n+++ b/weblate/addons/consistency.py\n@@ -2,13 +2,11 @@\n #\n # SPDX-License-Identifier: GPL-3.0-or-later\n \n-from django.db.models import Q\n from django.utils.translation import gettext_lazy\n \n from weblate.addons.base import BaseAddon\n from weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD\n from weblate.addons.tasks import language_consistency\n-from weblate.lang.models import Language\n \n \n class LangaugeConsistencyAddon(BaseAddon):\n@@ -25,11 +23,7 @@\n def daily(self, component):\n language_consistency.delay(\n component.project_id,\n- list(\n- Language.objects.filter(\n- Q(translation__component=component) | Q(component=component)\n- ).values_list(\"pk\", flat=True)\n- ),\n+ [language.id for language in component.project.languages],\n )\n \n def post_add(self, translation):\n", "issue": "\"Add missing languages\" add-on not working\n### Describe the issue\n\nI have enabled the \"Add missing languages\" add-on on https://hosted.weblate.org/projects/catima/. However, despite waiting over 24 hours as the documentation on https://docs.weblate.org/en/latest/admin/addons.html#addon-weblate-consistency-languages states, it has not put the different components of the same project in sync.\r\n\r\nThis is most noticeable when comparing https://hosted.weblate.org/projects/catima/catima/ with https://hosted.weblate.org/projects/catima/android-debug/\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Enable the \"Add missing languages\" add-on in a project with multiple components where one component has less languages than the other\r\n2. Wait at least 24 hours as the add-on states\n\n### Expected behavior\n\nAll components have the same languages, missing languages on components get created\n\n### Screenshots\n\nAndroid component:\r\n\r\n\r\nAndroid (Debug) component:\r\n\r\n\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.db.models import Q\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import BaseAddon\nfrom weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD\nfrom weblate.addons.tasks import language_consistency\nfrom weblate.lang.models import Language\n\n\nclass LangaugeConsistencyAddon(BaseAddon):\n events = (EVENT_DAILY, EVENT_POST_ADD)\n name = \"weblate.consistency.languages\"\n verbose = gettext_lazy(\"Add missing languages\")\n description = gettext_lazy(\n \"Ensures a consistent set of languages is used for all components \"\n \"within a project.\"\n )\n icon = \"language.svg\"\n project_scope = True\n\n def daily(self, component):\n language_consistency.delay(\n component.project_id,\n list(\n Language.objects.filter(\n Q(translation__component=component) | Q(component=component)\n ).values_list(\"pk\", flat=True)\n ),\n )\n\n def post_add(self, translation):\n language_consistency.delay(\n translation.component.project_id,\n [translation.language_id],\n )\n", "path": "weblate/addons/consistency.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import BaseAddon\nfrom weblate.addons.events import EVENT_DAILY, EVENT_POST_ADD\nfrom weblate.addons.tasks import language_consistency\n\n\nclass LangaugeConsistencyAddon(BaseAddon):\n events = (EVENT_DAILY, EVENT_POST_ADD)\n name = \"weblate.consistency.languages\"\n verbose = gettext_lazy(\"Add missing languages\")\n description = gettext_lazy(\n \"Ensures a consistent set of languages is used for all components \"\n \"within a project.\"\n )\n icon = \"language.svg\"\n project_scope = True\n\n def daily(self, component):\n language_consistency.delay(\n component.project_id,\n [language.id for language in component.project.languages],\n )\n\n def post_add(self, translation):\n language_consistency.delay(\n translation.component.project_id,\n [translation.language_id],\n )\n", "path": "weblate/addons/consistency.py"}]}
| 1,015 | 242 |
gh_patches_debug_3297
|
rasdani/github-patches
|
git_diff
|
liberapay__liberapay.com-1484
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Requests access to GitHub private repos?
Hi. I'm a brand-new user. I have a question I didn't see in the FAQ or when I searched issues here.
I was going to connect my GitHub account and saw this:
> Liberapay by liberapay
> wants to access your greghendershott account
>
> Organizations and teams
> Read-only access
>
> This application will be able to read your organization and team membership and private Projects.
I almost clicked OK, but noticed "**private** Projects". I stopped. I don't want to do that.
Is this as-intended?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/elsewhere/github.py`
Content:
```
1 from liberapay.elsewhere._base import PlatformOAuth2
2 from liberapay.elsewhere._exceptions import CantReadMembership
3 from liberapay.elsewhere._extractors import key, drop_keys
4 from liberapay.elsewhere._paginators import header_links_paginator
5
6
7 class GitHub(PlatformOAuth2):
8
9 # Platform attributes
10 name = 'github'
11 display_name = 'GitHub'
12 fontawesome_name = name
13 account_url = 'https://github.com/{user_name}'
14 repo_url = 'https://github.com/{slug}'
15 has_teams = True
16
17 # Auth attributes
18 auth_url = 'https://github.com/login/oauth/authorize'
19 access_token_url = 'https://github.com/login/oauth/access_token'
20 oauth_email_scope = 'user:email'
21 oauth_default_scope = ['read:org']
22
23 # API attributes
24 api_format = 'json'
25 api_paginator = header_links_paginator()
26 api_url = 'https://api.github.com'
27 api_app_auth_params = 'client_id={api_key}&client_secret={api_secret}'
28 api_user_info_path = '/user/{user_id}'
29 api_user_name_info_path = '/users/{user_name}'
30 api_user_self_info_path = '/user'
31 api_team_members_path = '/orgs/{user_name}/public_members'
32 api_friends_path = '/users/{user_name}/following'
33 api_repos_path = '/users/{user_name}/repos?type=owner&sort=updated&per_page=100'
34 api_starred_path = '/users/{user_name}/starred'
35 ratelimit_headers_prefix = 'x-ratelimit-'
36
37 # User info extractors
38 x_user_id = key('id')
39 x_user_name = key('login')
40 x_display_name = key('name')
41 x_email = key('email')
42 x_gravatar_id = key('gravatar_id')
43 x_avatar_url = key('avatar_url')
44 x_is_team = key('type', clean=lambda t: t.lower() == 'organization')
45 x_description = key('bio')
46 x_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))
47
48 # Repo info extractors
49 x_repo_id = key('id')
50 x_repo_name = key('name')
51 x_repo_slug = key('full_name')
52 x_repo_description = key('description')
53 x_repo_last_update = key('pushed_at')
54 x_repo_is_fork = key('fork')
55 x_repo_stars_count = key('stargazers_count')
56 x_repo_owner_id = key('owner', clean=lambda d: d['id'])
57 x_repo_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))
58
59 def get_CantReadMembership_url(self, **kw):
60 return 'https://github.com/settings/connections/applications/'+self.api_key
61
62 def is_team_member(self, org_name, sess, account):
63 org_name = org_name.lower()
64
65 # Check public membership first
66 response = self.api_get(
67 '', '/orgs/'+org_name+'/public_members/'+account.user_name,
68 sess=sess, error_handler=None
69 )
70 if response.status_code == 204:
71 return True
72 elif response.status_code != 404:
73 self.api_error_handler(response, True, self.domain)
74
75 # Check private membership
76 response = self.api_get(
77 '', '/user/memberships/orgs/'+org_name, sess=sess, error_handler=None
78 )
79 if response.status_code == 403:
80 raise CantReadMembership
81 elif response.status_code >= 400:
82 self.api_error_handler(response, True, self.domain)
83 membership = self.api_parser(response)
84 if membership['state'] == 'active':
85 return True
86
87 # Try the endpoint we were using before
88 user_orgs = self.api_parser(self.api_get('', '/user/orgs', sess=sess))
89 return any(org.get('login') == org_name for org in user_orgs)
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py
--- a/liberapay/elsewhere/github.py
+++ b/liberapay/elsewhere/github.py
@@ -18,7 +18,6 @@
auth_url = 'https://github.com/login/oauth/authorize'
access_token_url = 'https://github.com/login/oauth/access_token'
oauth_email_scope = 'user:email'
- oauth_default_scope = ['read:org']
# API attributes
api_format = 'json'
|
{"golden_diff": "diff --git a/liberapay/elsewhere/github.py b/liberapay/elsewhere/github.py\n--- a/liberapay/elsewhere/github.py\n+++ b/liberapay/elsewhere/github.py\n@@ -18,7 +18,6 @@\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n- oauth_default_scope = ['read:org']\n \n # API attributes\n api_format = 'json'\n", "issue": "Requests access to GitHub private repos?\nHi. I'm a brand-new user. I have a question I didn't see in the FAQ or when I searched issues here.\r\n\r\nI was going to connect my GitHub account and saw this:\r\n\r\n> Liberapay by liberapay\r\n> wants to access your greghendershott account\r\n> \r\n> Organizations and teams\r\n> Read-only access\r\n>\r\n> This application will be able to read your organization and team membership and private Projects.\r\n\r\nI almost clicked OK, but noticed \"**private** Projects\". I stopped. I don't want to do that.\r\n\r\nIs this as-intended?\n", "before_files": [{"content": "from liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._exceptions import CantReadMembership\nfrom liberapay.elsewhere._extractors import key, drop_keys\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitHub(PlatformOAuth2):\n\n # Platform attributes\n name = 'github'\n display_name = 'GitHub'\n fontawesome_name = name\n account_url = 'https://github.com/{user_name}'\n repo_url = 'https://github.com/{slug}'\n has_teams = True\n\n # Auth attributes\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n oauth_default_scope = ['read:org']\n\n # API attributes\n api_format = 'json'\n api_paginator = header_links_paginator()\n api_url = 'https://api.github.com'\n api_app_auth_params = 'client_id={api_key}&client_secret={api_secret}'\n api_user_info_path = '/user/{user_id}'\n api_user_name_info_path = '/users/{user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/orgs/{user_name}/public_members'\n api_friends_path = '/users/{user_name}/following'\n api_repos_path = '/users/{user_name}/repos?type=owner&sort=updated&per_page=100'\n api_starred_path = '/users/{user_name}/starred'\n ratelimit_headers_prefix = 'x-ratelimit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('login')\n x_display_name = key('name')\n x_email = key('email')\n x_gravatar_id = key('gravatar_id')\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n x_description = key('bio')\n x_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('full_name')\n x_repo_description = key('description')\n x_repo_last_update = key('pushed_at')\n x_repo_is_fork = key('fork')\n x_repo_stars_count = key('stargazers_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n x_repo_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))\n\n def get_CantReadMembership_url(self, **kw):\n return 'https://github.com/settings/connections/applications/'+self.api_key\n\n def is_team_member(self, org_name, sess, account):\n org_name = org_name.lower()\n\n # Check public membership first\n response = self.api_get(\n '', '/orgs/'+org_name+'/public_members/'+account.user_name,\n sess=sess, error_handler=None\n )\n if response.status_code == 204:\n return True\n elif response.status_code != 404:\n self.api_error_handler(response, True, self.domain)\n\n # Check private membership\n response = self.api_get(\n '', '/user/memberships/orgs/'+org_name, sess=sess, error_handler=None\n )\n if response.status_code == 403:\n raise CantReadMembership\n elif response.status_code >= 400:\n self.api_error_handler(response, True, self.domain)\n membership = self.api_parser(response)\n if membership['state'] == 'active':\n return True\n\n # Try the endpoint we were using before\n user_orgs = self.api_parser(self.api_get('', '/user/orgs', sess=sess))\n return any(org.get('login') == org_name for org in user_orgs)\n", "path": "liberapay/elsewhere/github.py"}], "after_files": [{"content": "from liberapay.elsewhere._base import PlatformOAuth2\nfrom liberapay.elsewhere._exceptions import CantReadMembership\nfrom liberapay.elsewhere._extractors import key, drop_keys\nfrom liberapay.elsewhere._paginators import header_links_paginator\n\n\nclass GitHub(PlatformOAuth2):\n\n # Platform attributes\n name = 'github'\n display_name = 'GitHub'\n fontawesome_name = name\n account_url = 'https://github.com/{user_name}'\n repo_url = 'https://github.com/{slug}'\n has_teams = True\n\n # Auth attributes\n auth_url = 'https://github.com/login/oauth/authorize'\n access_token_url = 'https://github.com/login/oauth/access_token'\n oauth_email_scope = 'user:email'\n\n # API attributes\n api_format = 'json'\n api_paginator = header_links_paginator()\n api_url = 'https://api.github.com'\n api_app_auth_params = 'client_id={api_key}&client_secret={api_secret}'\n api_user_info_path = '/user/{user_id}'\n api_user_name_info_path = '/users/{user_name}'\n api_user_self_info_path = '/user'\n api_team_members_path = '/orgs/{user_name}/public_members'\n api_friends_path = '/users/{user_name}/following'\n api_repos_path = '/users/{user_name}/repos?type=owner&sort=updated&per_page=100'\n api_starred_path = '/users/{user_name}/starred'\n ratelimit_headers_prefix = 'x-ratelimit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('login')\n x_display_name = key('name')\n x_email = key('email')\n x_gravatar_id = key('gravatar_id')\n x_avatar_url = key('avatar_url')\n x_is_team = key('type', clean=lambda t: t.lower() == 'organization')\n x_description = key('bio')\n x_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))\n\n # Repo info extractors\n x_repo_id = key('id')\n x_repo_name = key('name')\n x_repo_slug = key('full_name')\n x_repo_description = key('description')\n x_repo_last_update = key('pushed_at')\n x_repo_is_fork = key('fork')\n x_repo_stars_count = key('stargazers_count')\n x_repo_owner_id = key('owner', clean=lambda d: d['id'])\n x_repo_extra_info_drop = drop_keys(lambda k: k.endswith('_url'))\n\n def get_CantReadMembership_url(self, **kw):\n return 'https://github.com/settings/connections/applications/'+self.api_key\n\n def is_team_member(self, org_name, sess, account):\n org_name = org_name.lower()\n\n # Check public membership first\n response = self.api_get(\n '', '/orgs/'+org_name+'/public_members/'+account.user_name,\n sess=sess, error_handler=None\n )\n if response.status_code == 204:\n return True\n elif response.status_code != 404:\n self.api_error_handler(response, True, self.domain)\n\n # Check private membership\n response = self.api_get(\n '', '/user/memberships/orgs/'+org_name, sess=sess, error_handler=None\n )\n if response.status_code == 403:\n raise CantReadMembership\n elif response.status_code >= 400:\n self.api_error_handler(response, True, self.domain)\n membership = self.api_parser(response)\n if membership['state'] == 'active':\n return True\n\n # Try the endpoint we were using before\n user_orgs = self.api_parser(self.api_get('', '/user/orgs', sess=sess))\n return any(org.get('login') == org_name for org in user_orgs)\n", "path": "liberapay/elsewhere/github.py"}]}
| 1,425 | 123 |
gh_patches_debug_7347
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-10645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🎛️ Migrate header search Stimulus Controller
> ℹ️ **Part of the [Stimulus 🎛️ RFC 78](https://github.com/wagtail/rfcs/pull/78)**
### Is your proposal related to a problem?
We have a core.js implementations of JavaScript code that, when a matching search input receives changes, will trigger an async request to the relevant search results listing. Once the endpoint returns with HTML, it will be patched into the results container HTML element.
### Describe the solution you'd like
* Create a stimulus controller `w-search` that will replace the existing ad-hoc JS implementation
* The behaviour should be exactly the same as current state but using Stimulus data attributes for the behaviour & classes declaration (note: likely we will drop the `autofocus` and may not re-introduce the `slide` jQuery animation)
* Controller should be written in TypeScript
* Ensure that the existing unit tests are created to reflect this new behaviour
* We will need to document an upgrade consideration that the previous `window.headerSearch` approach will not work in a future release.
* We may want to introduce a console warning once all the Wagtail usage of `window.headerSearch` has been removed
* Nice to have - a Storybook story for this component
### Additional context
* Implementation https://github.com/wagtail/wagtail/blob/main/client/src/entrypoints/admin/core.js#L251-L306
* There is a very similar (almost cut & paste) of logic that is used in the chooser modals for searching here https://github.com/wagtail/wagtail/blob/main/client/src/includes/chooserModal.js#L109-L176 (the Stimulus will likely replace this but may be out of scope for this issue
### Potential approach
#### Support `input` only usage (with using `window.headerSearch` config)
```JS
window.headerSearch = {
url: "{% url 'wagtailimages:listing_results' %}",
targetOutput: "#image-results"
}
```
```html
<div class="w-field__input" data-field-input="">
<svg class="icon icon-search w-field__icon" aria-hidden="true">
<use href="#icon-search"></use>
</svg>
<input
type="text"
name="q"
placeholder="Search images"
data-controller="w-search"
data-action="change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search"
id="id_q"
/>
</div>
```
#### Support `input` only usage
```html
<div class="w-field__input" data-field-input="">
<svg class="icon icon-search w-field__icon" aria-hidden="true">
<use href="#icon-search"></use>
</svg>
<input
type="text"
name="q"
placeholder="Search images"
data-controller="w-search"
data-action="change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search"
id="id_q"
data-w-search-results-value="#image-results"
data-w-search-url-value="/admin/images/results/"
/>
</div>
```
#### Support controlled form with search input as a target
```html
<form
class="col search-form"
action="/admin/images/"
method="get"
novalidate=""
role="search"
data-controller="w-search"
data-w-search-url-value="/admin/images/results/"
>
<div class="w-field__wrapper w-mb-0" data-field-wrapper="">
<label class="w-field__label w-sr-only" for="id_q" id="id_q-label">Search term</label>
<div class="w-field w-field--char_field w-field--text_input">
<div class="w-field__input" data-field-input="">
<svg class="icon icon-search w-field__icon" aria-hidden="true"><use href="#icon-search"></use></svg>
<input
type="text"
name="q"
placeholder="Search images"
data-w-search-target="input"
data-action="change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search"
id="id_q"
/>
</div>
</div>
</div>
<div class="visuallyhidden"><input disabled="" type="submit" aria-hidden="true" /></div>
</form>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/forms/search.py`
Content:
```
1 from django import forms
2 from django.utils.translation import gettext as _
3 from django.utils.translation import gettext_lazy
4
5
6 class SearchForm(forms.Form):
7 def __init__(self, *args, **kwargs):
8 placeholder = kwargs.pop("placeholder", _("Search"))
9 super().__init__(*args, **kwargs)
10 self.fields["q"].widget.attrs = {"placeholder": placeholder}
11
12 q = forms.CharField(
13 label=gettext_lazy("Search term"),
14 widget=forms.TextInput(),
15 required=False,
16 )
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py
--- a/wagtail/admin/forms/search.py
+++ b/wagtail/admin/forms/search.py
@@ -7,7 +7,10 @@
def __init__(self, *args, **kwargs):
placeholder = kwargs.pop("placeholder", _("Search"))
super().__init__(*args, **kwargs)
- self.fields["q"].widget.attrs = {"placeholder": placeholder}
+ self.fields["q"].widget.attrs = {
+ "placeholder": placeholder,
+ "data-w-swap-target": "input",
+ }
q = forms.CharField(
label=gettext_lazy("Search term"),
|
{"golden_diff": "diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py\n--- a/wagtail/admin/forms/search.py\n+++ b/wagtail/admin/forms/search.py\n@@ -7,7 +7,10 @@\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n- self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n+ self.fields[\"q\"].widget.attrs = {\n+ \"placeholder\": placeholder,\n+ \"data-w-swap-target\": \"input\",\n+ }\n \n q = forms.CharField(\n label=gettext_lazy(\"Search term\"),\n", "issue": "\ud83c\udf9b\ufe0f Migrate header search Stimulus Controller\n> \u2139\ufe0f **Part of the [Stimulus \ud83c\udf9b\ufe0f RFC 78](https://github.com/wagtail/rfcs/pull/78)**\r\n\r\n### Is your proposal related to a problem?\r\n\r\nWe have a core.js implementations of JavaScript code that, when a matching search input receives changes, will trigger an async request to the relevant search results listing. Once the endpoint returns with HTML, it will be patched into the results container HTML element.\r\n\r\n### Describe the solution you'd like\r\n\r\n* Create a stimulus controller `w-search` that will replace the existing ad-hoc JS implementation\r\n* The behaviour should be exactly the same as current state but using Stimulus data attributes for the behaviour & classes declaration (note: likely we will drop the `autofocus` and may not re-introduce the `slide` jQuery animation)\r\n* Controller should be written in TypeScript\r\n* Ensure that the existing unit tests are created to reflect this new behaviour\r\n* We will need to document an upgrade consideration that the previous `window.headerSearch` approach will not work in a future release.\r\n* We may want to introduce a console warning once all the Wagtail usage of `window.headerSearch` has been removed\r\n* Nice to have - a Storybook story for this component\r\n\r\n### Additional context\r\n\r\n* Implementation https://github.com/wagtail/wagtail/blob/main/client/src/entrypoints/admin/core.js#L251-L306\r\n* There is a very similar (almost cut & paste) of logic that is used in the chooser modals for searching here https://github.com/wagtail/wagtail/blob/main/client/src/includes/chooserModal.js#L109-L176 (the Stimulus will likely replace this but may be out of scope for this issue\r\n\r\n### Potential approach\r\n\r\n#### Support `input` only usage (with using `window.headerSearch` config)\r\n\r\n```JS\r\nwindow.headerSearch = {\r\n url: \"{% url 'wagtailimages:listing_results' %}\",\r\n targetOutput: \"#image-results\"\r\n}\r\n```\r\n\r\n```html\r\n<div class=\"w-field__input\" data-field-input=\"\">\r\n <svg class=\"icon icon-search w-field__icon\" aria-hidden=\"true\">\r\n <use href=\"#icon-search\"></use>\r\n </svg>\r\n <input\r\n type=\"text\"\r\n name=\"q\"\r\n placeholder=\"Search images\"\r\n data-controller=\"w-search\"\r\n data-action=\"change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search\"\r\n id=\"id_q\"\r\n />\r\n</div>\r\n```\r\n\r\n#### Support `input` only usage\r\n\r\n```html\r\n<div class=\"w-field__input\" data-field-input=\"\">\r\n <svg class=\"icon icon-search w-field__icon\" aria-hidden=\"true\">\r\n <use href=\"#icon-search\"></use>\r\n </svg>\r\n <input\r\n type=\"text\"\r\n name=\"q\"\r\n placeholder=\"Search images\"\r\n data-controller=\"w-search\"\r\n data-action=\"change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search\"\r\n id=\"id_q\"\r\n data-w-search-results-value=\"#image-results\"\r\n data-w-search-url-value=\"/admin/images/results/\"\r\n />\r\n</div>\r\n```\r\n\r\n#### Support controlled form with search input as a target\r\n\r\n```html\r\n<form\r\n class=\"col search-form\"\r\n action=\"/admin/images/\"\r\n method=\"get\"\r\n novalidate=\"\"\r\n role=\"search\"\r\n data-controller=\"w-search\"\r\n data-w-search-url-value=\"/admin/images/results/\"\r\n>\r\n <div class=\"w-field__wrapper w-mb-0\" data-field-wrapper=\"\">\r\n <label class=\"w-field__label w-sr-only\" for=\"id_q\" id=\"id_q-label\">Search term</label>\r\n <div class=\"w-field w-field--char_field w-field--text_input\">\r\n <div class=\"w-field__input\" data-field-input=\"\">\r\n <svg class=\"icon icon-search w-field__icon\" aria-hidden=\"true\"><use href=\"#icon-search\"></use></svg>\r\n <input\r\n type=\"text\"\r\n name=\"q\"\r\n placeholder=\"Search images\"\r\n data-w-search-target=\"input\"\r\n data-action=\"change->w-search#search cut->w-search#search keyup->w-search#search paste->w-search#search\"\r\n id=\"id_q\"\r\n />\r\n </div>\r\n </div>\r\n </div>\r\n <div class=\"visuallyhidden\"><input disabled=\"\" type=\"submit\" aria-hidden=\"true\" /></div>\r\n</form>\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\nclass SearchForm(forms.Form):\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n\n q = forms.CharField(\n label=gettext_lazy(\"Search term\"),\n widget=forms.TextInput(),\n required=False,\n )\n", "path": "wagtail/admin/forms/search.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\nclass SearchForm(forms.Form):\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\n \"placeholder\": placeholder,\n \"data-w-swap-target\": \"input\",\n }\n\n q = forms.CharField(\n label=gettext_lazy(\"Search term\"),\n widget=forms.TextInput(),\n required=False,\n )\n", "path": "wagtail/admin/forms/search.py"}]}
| 1,372 | 149 |
gh_patches_debug_23092
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-6430
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tor2web warning page still using outdated pre-SI-redesign resources
## Description
In the SI redesign, we overlooked the Tor2web page which tries to render an old location for the icon and does not show an icon for the flash warning message.
## Steps to Reproduce
Visit https://demo-source.securedrop.org/tor2web-warning
## Expected Behavior

## Actual Behavior

"Tor Browser" link in tor2web warning is broken
## Description
The "Tor Browser" link in the tor2web warning is broken because it does not specify a protocol, so the browser treats it as a relative link.
## Steps to Reproduce
* Visit `/tor2web-warning` in the SI
* Hover over or click on the "Tor Browser" link, it should send you to a non-existent `/www.torproject.org/projects/torbrowser.html` on the SI's domain.
## Expected Behavior
* Link takes you to Tor Project website.
## Comments
Fix should be as simple as adding "https://" in front.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/source_app/info.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import flask
3 from flask import Blueprint, render_template, send_file, redirect, url_for, flash
4 from flask_babel import gettext
5 import werkzeug
6
7 from io import BytesIO # noqa
8
9 from encryption import EncryptionManager
10 from sdconfig import SDConfig
11 from source_app.utils import get_sourcev3_url
12
13
14 def make_blueprint(config: SDConfig) -> Blueprint:
15 view = Blueprint('info', __name__)
16
17 @view.route('/tor2web-warning')
18 def tor2web_warning() -> flask.Response:
19 flash(gettext("Your connection is not anonymous right now!"), "error")
20 return flask.Response(
21 render_template("tor2web-warning.html", source_url=get_sourcev3_url()),
22 403)
23
24 @view.route('/use-tor')
25 def recommend_tor_browser() -> str:
26 return render_template("use-tor-browser.html")
27
28 @view.route('/public-key')
29 def download_public_key() -> flask.Response:
30 journalist_pubkey = EncryptionManager.get_default().get_journalist_public_key()
31 data = BytesIO(journalist_pubkey.encode('utf-8'))
32 return send_file(data,
33 mimetype="application/pgp-keys",
34 attachment_filename=config.JOURNALIST_KEY + ".asc",
35 as_attachment=True)
36
37 @view.route('/journalist-key')
38 def download_journalist_key() -> werkzeug.wrappers.Response:
39 return redirect(url_for('.download_public_key'), code=301)
40
41 @view.route('/why-public-key')
42 def why_download_public_key() -> str:
43 return render_template("why-public-key.html")
44
45 return view
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/source_app/info.py b/securedrop/source_app/info.py
--- a/securedrop/source_app/info.py
+++ b/securedrop/source_app/info.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
import flask
-from flask import Blueprint, render_template, send_file, redirect, url_for, flash
+from flask import Blueprint, render_template, send_file, redirect, url_for
from flask_babel import gettext
import werkzeug
@@ -8,7 +8,7 @@
from encryption import EncryptionManager
from sdconfig import SDConfig
-from source_app.utils import get_sourcev3_url
+from source_app.utils import get_sourcev3_url, flash_msg
def make_blueprint(config: SDConfig) -> Blueprint:
@@ -16,7 +16,7 @@
@view.route('/tor2web-warning')
def tor2web_warning() -> flask.Response:
- flash(gettext("Your connection is not anonymous right now!"), "error")
+ flash_msg("error", None, gettext("Your connection is not anonymous right now!"))
return flask.Response(
render_template("tor2web-warning.html", source_url=get_sourcev3_url()),
403)
|
{"golden_diff": "diff --git a/securedrop/source_app/info.py b/securedrop/source_app/info.py\n--- a/securedrop/source_app/info.py\n+++ b/securedrop/source_app/info.py\n@@ -1,6 +1,6 @@\n # -*- coding: utf-8 -*-\n import flask\n-from flask import Blueprint, render_template, send_file, redirect, url_for, flash\n+from flask import Blueprint, render_template, send_file, redirect, url_for\n from flask_babel import gettext\n import werkzeug\n \n@@ -8,7 +8,7 @@\n \n from encryption import EncryptionManager\n from sdconfig import SDConfig\n-from source_app.utils import get_sourcev3_url\n+from source_app.utils import get_sourcev3_url, flash_msg\n \n \n def make_blueprint(config: SDConfig) -> Blueprint:\n@@ -16,7 +16,7 @@\n \n @view.route('/tor2web-warning')\n def tor2web_warning() -> flask.Response:\n- flash(gettext(\"Your connection is not anonymous right now!\"), \"error\")\n+ flash_msg(\"error\", None, gettext(\"Your connection is not anonymous right now!\"))\n return flask.Response(\n render_template(\"tor2web-warning.html\", source_url=get_sourcev3_url()),\n 403)\n", "issue": "Tor2web warning page still using outdated pre-SI-redesign resources\n## Description\r\n\r\nIn the SI redesign, we overlooked the Tor2web page which tries to render an old location for the icon and does not show an icon for the flash warning message.\r\n\r\n## Steps to Reproduce\r\n\r\nVisit https://demo-source.securedrop.org/tor2web-warning\r\n\r\n## Expected Behavior\r\n\r\n\r\n\r\n## Actual Behavior\r\n\r\n\r\n\n\"Tor Browser\" link in tor2web warning is broken\n## Description\r\n\r\nThe \"Tor Browser\" link in the tor2web warning is broken because it does not specify a protocol, so the browser treats it as a relative link.\r\n\r\n## Steps to Reproduce\r\n\r\n* Visit `/tor2web-warning` in the SI\r\n* Hover over or click on the \"Tor Browser\" link, it should send you to a non-existent `/www.torproject.org/projects/torbrowser.html` on the SI's domain.\r\n\r\n## Expected Behavior\r\n\r\n* Link takes you to Tor Project website.\r\n\r\n## Comments\r\n\r\nFix should be as simple as adding \"https://\" in front.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport flask\nfrom flask import Blueprint, render_template, send_file, redirect, url_for, flash\nfrom flask_babel import gettext\nimport werkzeug\n\nfrom io import BytesIO # noqa\n\nfrom encryption import EncryptionManager\nfrom sdconfig import SDConfig\nfrom source_app.utils import get_sourcev3_url\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('info', __name__)\n\n @view.route('/tor2web-warning')\n def tor2web_warning() -> flask.Response:\n flash(gettext(\"Your connection is not anonymous right now!\"), \"error\")\n return flask.Response(\n render_template(\"tor2web-warning.html\", source_url=get_sourcev3_url()),\n 403)\n\n @view.route('/use-tor')\n def recommend_tor_browser() -> str:\n return render_template(\"use-tor-browser.html\")\n\n @view.route('/public-key')\n def download_public_key() -> flask.Response:\n journalist_pubkey = EncryptionManager.get_default().get_journalist_public_key()\n data = BytesIO(journalist_pubkey.encode('utf-8'))\n return send_file(data,\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n @view.route('/journalist-key')\n def download_journalist_key() -> werkzeug.wrappers.Response:\n return redirect(url_for('.download_public_key'), code=301)\n\n @view.route('/why-public-key')\n def why_download_public_key() -> str:\n return render_template(\"why-public-key.html\")\n\n return view\n", "path": "securedrop/source_app/info.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport flask\nfrom flask import Blueprint, render_template, send_file, redirect, url_for\nfrom flask_babel import gettext\nimport werkzeug\n\nfrom io import BytesIO # noqa\n\nfrom encryption import EncryptionManager\nfrom sdconfig import SDConfig\nfrom source_app.utils import get_sourcev3_url, flash_msg\n\n\ndef make_blueprint(config: SDConfig) -> Blueprint:\n view = Blueprint('info', __name__)\n\n @view.route('/tor2web-warning')\n def tor2web_warning() -> flask.Response:\n flash_msg(\"error\", None, gettext(\"Your connection is not anonymous right now!\"))\n return flask.Response(\n render_template(\"tor2web-warning.html\", source_url=get_sourcev3_url()),\n 403)\n\n @view.route('/use-tor')\n def recommend_tor_browser() -> str:\n return render_template(\"use-tor-browser.html\")\n\n @view.route('/public-key')\n def download_public_key() -> flask.Response:\n journalist_pubkey = EncryptionManager.get_default().get_journalist_public_key()\n data = BytesIO(journalist_pubkey.encode('utf-8'))\n return send_file(data,\n mimetype=\"application/pgp-keys\",\n attachment_filename=config.JOURNALIST_KEY + \".asc\",\n as_attachment=True)\n\n @view.route('/journalist-key')\n def download_journalist_key() -> werkzeug.wrappers.Response:\n return redirect(url_for('.download_public_key'), code=301)\n\n @view.route('/why-public-key')\n def why_download_public_key() -> str:\n return render_template(\"why-public-key.html\")\n\n return view\n", "path": "securedrop/source_app/info.py"}]}
| 1,037 | 270 |
gh_patches_debug_10569
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2047
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Username change fails silently
When you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON.
[IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849
Thanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart:
The Aspen ticket for this is: gittip/aspen-python#279
Username change fails silently
When you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON.
[IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849
Thanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart:
The Aspen ticket for this is: gittip/aspen-python#279
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/exceptions.py`
Content:
```
1 """
2 This module contains exceptions shared across application code.
3 """
4
5 from __future__ import print_function, unicode_literals
6
7
8
9 class UnknownPlatform(Exception): pass
10
11 class ProblemChangingUsername(Exception):
12 def __str__(self):
13 return self.msg.format(self.args[0])
14
15 class UsernameIsEmpty(ProblemChangingUsername):
16 msg = "You need to provide a username!"
17
18 class UsernameTooLong(ProblemChangingUsername):
19 msg = "The username '{}' is too long."
20
21 # Not passing the potentially unicode characters back because of:
22 # https://github.com/gittip/aspen-python/issues/177
23 class UsernameContainsInvalidCharacters(ProblemChangingUsername):
24 msg = "That username contains invalid characters."
25
26 class UsernameIsRestricted(ProblemChangingUsername):
27 msg = "The username '{}' is restricted."
28
29 class UsernameAlreadyTaken(ProblemChangingUsername):
30 msg = "The username '{}' is already taken."
31
32 class TooGreedy(Exception): pass
33 class NoSelfTipping(Exception): pass
34 class BadAmount(Exception): pass
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gittip/exceptions.py b/gittip/exceptions.py
--- a/gittip/exceptions.py
+++ b/gittip/exceptions.py
@@ -18,10 +18,8 @@
class UsernameTooLong(ProblemChangingUsername):
msg = "The username '{}' is too long."
-# Not passing the potentially unicode characters back because of:
-# https://github.com/gittip/aspen-python/issues/177
class UsernameContainsInvalidCharacters(ProblemChangingUsername):
- msg = "That username contains invalid characters."
+ msg = "The username '{}' contains invalid characters."
class UsernameIsRestricted(ProblemChangingUsername):
msg = "The username '{}' is restricted."
|
{"golden_diff": "diff --git a/gittip/exceptions.py b/gittip/exceptions.py\n--- a/gittip/exceptions.py\n+++ b/gittip/exceptions.py\n@@ -18,10 +18,8 @@\n class UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n \n-# Not passing the potentially unicode characters back because of:\n-# https://github.com/gittip/aspen-python/issues/177\n class UsernameContainsInvalidCharacters(ProblemChangingUsername):\n- msg = \"That username contains invalid characters.\"\n+ msg = \"The username '{}' contains invalid characters.\"\n \n class UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n", "issue": "Username change fails silently\nWhen you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON.\n\n[IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849\n\nThanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart:\n\nThe Aspen ticket for this is: gittip/aspen-python#279\n\nUsername change fails silently\nWhen you change your username, you're querying [`username.json.spt`](https://github.com/gittip/www.gittip.com/blob/0464c57465aed49a95a2c546f0d9987ad5b9b3fa/www/%25username/username.json.spt). If the desired username is invalid, we respond back with a user-friendly message (though the UI is ugly). Unfortunately, this behavior is currently broken because it returns HTML instead of the expected JSON.\n\n[IRC](https://botbot.me/freenode/gittip/msg/9518377/), from working on #1849\n\nThanks to @thomasboyt for [reporting the problem](https://botbot.me/freenode/gittip/msg/9517625/) :heart:\n\nThe Aspen ticket for this is: gittip/aspen-python#279\n\n", "before_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\n\nclass UnknownPlatform(Exception): pass\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\n# Not passing the potentially unicode characters back because of:\n# https://github.com/gittip/aspen-python/issues/177\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"That username contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass BadAmount(Exception): pass\n", "path": "gittip/exceptions.py"}], "after_files": [{"content": "\"\"\"\nThis module contains exceptions shared across application code.\n\"\"\"\n\nfrom __future__ import print_function, unicode_literals\n\n\n\nclass UnknownPlatform(Exception): pass\n\nclass ProblemChangingUsername(Exception):\n def __str__(self):\n return self.msg.format(self.args[0])\n\nclass UsernameIsEmpty(ProblemChangingUsername):\n msg = \"You need to provide a username!\"\n\nclass UsernameTooLong(ProblemChangingUsername):\n msg = \"The username '{}' is too long.\"\n\nclass UsernameContainsInvalidCharacters(ProblemChangingUsername):\n msg = \"The username '{}' contains invalid characters.\"\n\nclass UsernameIsRestricted(ProblemChangingUsername):\n msg = \"The username '{}' is restricted.\"\n\nclass UsernameAlreadyTaken(ProblemChangingUsername):\n msg = \"The username '{}' is already taken.\"\n\nclass TooGreedy(Exception): pass\nclass NoSelfTipping(Exception): pass\nclass BadAmount(Exception): pass\n", "path": "gittip/exceptions.py"}]}
| 954 | 153 |
gh_patches_debug_60356
|
rasdani/github-patches
|
git_diff
|
blaze__blaze-1037
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cytoolz is required to import blaze, but it's not listed in requirements_strict.txt
In a fresh virtualenv, `pip install blaze && python -c "import blaze"` fails with:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ssanderson/.virtualenvs/blaze/local/lib/python2.7/site-packages/blaze/__init__.py", line 18, in <module>
from .utils import ignoring
File "/home/ssanderson/.virtualenvs/blaze/local/lib/python2.7/site-packages/blaze/utils.py", line 7, in <module>
from cytoolz import nth
ImportError: No module named cytoolz
```
Is there a reason cytoolz isn't in the strict requirements if it's necessary to even import the top-level module?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `blaze/utils.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import datetime
5 from functools import wraps
6
7 from cytoolz import nth
8 from itertools import islice
9 from collections import Iterator
10 from multiprocessing.pool import ThreadPool
11
12 # these are used throughout blaze, don't remove them
13 from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
14
15 import psutil
16 import numpy as np
17
18 # Imports that replace older utils.
19 from .compatibility import map, zip
20
21 from .dispatch import dispatch
22
23 thread_pool = ThreadPool(psutil.NUM_CPUS)
24
25
26 def nth_list(n, seq):
27 """
28
29 >>> tuple(nth_list([0, 1, 4], 'Hello'))
30 ('H', 'e', 'o')
31 >>> tuple(nth_list([4, 1, 0], 'Hello'))
32 ('o', 'e', 'H')
33 >>> tuple(nth_list([0, 0, 0], 'Hello'))
34 ('H', 'H', 'H')
35 """
36 seq = iter(seq)
37
38 result = []
39 old = 0
40 item = next(seq)
41 for index in sorted(n):
42 for i in range(index - old):
43 item = next(seq)
44 result.append(item)
45 old = index
46
47 order = [x[1] for x in sorted(zip(n, range(len(n))))]
48 return (result[i] for i in order)
49
50
51 def get(ind, coll, lazy=False):
52 """
53
54 >>> get(0, 'Hello')
55 'H'
56
57 >>> get([1, 0], 'Hello')
58 ('e', 'H')
59
60 >>> get(slice(1, 4), 'Hello')
61 ('e', 'l', 'l')
62
63 >>> get(slice(1, 4), 'Hello', lazy=True)
64 <itertools.islice object at ...>
65 """
66 if isinstance(ind, list):
67 result = nth_list(ind, coll)
68 elif isinstance(ind, slice):
69 result = islice(coll, ind.start, ind.stop, ind.step)
70 else:
71 if isinstance(coll, Iterator):
72 result = nth(ind, coll)
73 else:
74 result = coll[ind]
75 if not lazy and isinstance(result, Iterator):
76 result = tuple(result)
77 return result
78
79
80 def ndget(ind, data):
81 """
82 Get from N-Dimensional getable
83
84 Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
85 generic indexibles.
86
87 >>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
88 >>> ndget(0, data)
89 [[1, 2], [3, 4]]
90 >>> ndget((0, 1), data)
91 [3, 4]
92 >>> ndget((0, 0, 0), data)
93 1
94 >>> ndget((slice(0, 2), [0, 1], 0), data)
95 ((1, 3), (5, 7))
96 """
97 if isinstance(ind, tuple) and len(ind) == 1:
98 ind = ind[0]
99 if not isinstance(ind, tuple):
100 return get(ind, data)
101 result = get(ind[0], data)
102 if isinstance(ind[0], (list, slice)):
103 return type(result)(ndget(ind[1:], row) for row in result)
104 else:
105 return ndget(ind[1:], result)
106
107
108 def normalize_to_date(dt):
109 if isinstance(dt, datetime.datetime) and not dt.time():
110 return dt.date()
111 else:
112 return dt
113
114
115 def assert_allclose(lhs, rhs):
116 for tb in map(zip, lhs, rhs):
117 for left, right in tb:
118 if isinstance(left, (np.floating, float)):
119 # account for nans
120 assert np.all(np.isclose(left, right, equal_nan=True))
121 continue
122 if isinstance(left, datetime.datetime):
123 left = normalize_to_date(left)
124 if isinstance(right, datetime.datetime):
125 right = normalize_to_date(right)
126 assert left == right
127
128
129 def example(filename, datapath=os.path.join('examples', 'data')):
130 import blaze
131 return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
132
133
134 def available_memory():
135 return psutil.virtual_memory().available
136
137
138 def listpack(x):
139 """
140 >>> listpack(1)
141 [1]
142 >>> listpack((1, 2))
143 [1, 2]
144 >>> listpack([1, 2])
145 [1, 2]
146 """
147 if isinstance(x, tuple):
148 return list(x)
149 elif isinstance(x, list):
150 return x
151 else:
152 return [x]
153
154
155 @dispatch(datetime.datetime)
156 def json_dumps(dt):
157 s = dt.isoformat()
158 if not dt.tzname():
159 s = s + 'Z'
160 return s
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/blaze/utils.py b/blaze/utils.py
--- a/blaze/utils.py
+++ b/blaze/utils.py
@@ -4,7 +4,11 @@
import datetime
from functools import wraps
-from cytoolz import nth
+try:
+ from cytoolz import nth
+except ImportError:
+ from toolz import nth
+
from itertools import islice
from collections import Iterator
from multiprocessing.pool import ThreadPool
|
{"golden_diff": "diff --git a/blaze/utils.py b/blaze/utils.py\n--- a/blaze/utils.py\n+++ b/blaze/utils.py\n@@ -4,7 +4,11 @@\n import datetime\n from functools import wraps\n \n-from cytoolz import nth\n+try:\n+ from cytoolz import nth\n+except ImportError:\n+ from toolz import nth\n+\n from itertools import islice\n from collections import Iterator\n from multiprocessing.pool import ThreadPool\n", "issue": "cytoolz is required to import blaze, but it's not listed in requirements_strict.txt\nIn a fresh virtualenv, `pip install blaze && python -c \"import blaze\"` fails with:\n\n```\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/home/ssanderson/.virtualenvs/blaze/local/lib/python2.7/site-packages/blaze/__init__.py\", line 18, in <module>\n from .utils import ignoring\n File \"/home/ssanderson/.virtualenvs/blaze/local/lib/python2.7/site-packages/blaze/utils.py\", line 7, in <module>\n from cytoolz import nth\nImportError: No module named cytoolz\n```\n\nIs there a reason cytoolz isn't in the strict requirements if it's necessary to even import the top-level module?\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport datetime\nfrom functools import wraps\n\nfrom cytoolz import nth\nfrom itertools import islice\nfrom collections import Iterator\nfrom multiprocessing.pool import ThreadPool\n\n# these are used throughout blaze, don't remove them\nfrom odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring\n\nimport psutil\nimport numpy as np\n\n# Imports that replace older utils.\nfrom .compatibility import map, zip\n\nfrom .dispatch import dispatch\n\nthread_pool = ThreadPool(psutil.NUM_CPUS)\n\n\ndef nth_list(n, seq):\n \"\"\"\n\n >>> tuple(nth_list([0, 1, 4], 'Hello'))\n ('H', 'e', 'o')\n >>> tuple(nth_list([4, 1, 0], 'Hello'))\n ('o', 'e', 'H')\n >>> tuple(nth_list([0, 0, 0], 'Hello'))\n ('H', 'H', 'H')\n \"\"\"\n seq = iter(seq)\n\n result = []\n old = 0\n item = next(seq)\n for index in sorted(n):\n for i in range(index - old):\n item = next(seq)\n result.append(item)\n old = index\n\n order = [x[1] for x in sorted(zip(n, range(len(n))))]\n return (result[i] for i in order)\n\n\ndef get(ind, coll, lazy=False):\n \"\"\"\n\n >>> get(0, 'Hello')\n 'H'\n\n >>> get([1, 0], 'Hello')\n ('e', 'H')\n\n >>> get(slice(1, 4), 'Hello')\n ('e', 'l', 'l')\n\n >>> get(slice(1, 4), 'Hello', lazy=True)\n <itertools.islice object at ...>\n \"\"\"\n if isinstance(ind, list):\n result = nth_list(ind, coll)\n elif isinstance(ind, slice):\n result = islice(coll, ind.start, ind.stop, ind.step)\n else:\n if isinstance(coll, Iterator):\n result = nth(ind, coll)\n else:\n result = coll[ind]\n if not lazy and isinstance(result, Iterator):\n result = tuple(result)\n return result\n\n\ndef ndget(ind, data):\n \"\"\"\n Get from N-Dimensional getable\n\n Can index with elements, lists, or slices. Mimic's numpy fancy indexing on\n generic indexibles.\n\n >>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]\n >>> ndget(0, data)\n [[1, 2], [3, 4]]\n >>> ndget((0, 1), data)\n [3, 4]\n >>> ndget((0, 0, 0), data)\n 1\n >>> ndget((slice(0, 2), [0, 1], 0), data)\n ((1, 3), (5, 7))\n \"\"\"\n if isinstance(ind, tuple) and len(ind) == 1:\n ind = ind[0]\n if not isinstance(ind, tuple):\n return get(ind, data)\n result = get(ind[0], data)\n if isinstance(ind[0], (list, slice)):\n return type(result)(ndget(ind[1:], row) for row in result)\n else:\n return ndget(ind[1:], result)\n\n\ndef normalize_to_date(dt):\n if isinstance(dt, datetime.datetime) and not dt.time():\n return dt.date()\n else:\n return dt\n\n\ndef assert_allclose(lhs, rhs):\n for tb in map(zip, lhs, rhs):\n for left, right in tb:\n if isinstance(left, (np.floating, float)):\n # account for nans\n assert np.all(np.isclose(left, right, equal_nan=True))\n continue\n if isinstance(left, datetime.datetime):\n left = normalize_to_date(left)\n if isinstance(right, datetime.datetime):\n right = normalize_to_date(right)\n assert left == right\n\n\ndef example(filename, datapath=os.path.join('examples', 'data')):\n import blaze\n return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)\n\n\ndef available_memory():\n return psutil.virtual_memory().available\n\n\ndef listpack(x):\n \"\"\"\n >>> listpack(1)\n [1]\n >>> listpack((1, 2))\n [1, 2]\n >>> listpack([1, 2])\n [1, 2]\n \"\"\"\n if isinstance(x, tuple):\n return list(x)\n elif isinstance(x, list):\n return x\n else:\n return [x]\n\n\n@dispatch(datetime.datetime)\ndef json_dumps(dt):\n s = dt.isoformat()\n if not dt.tzname():\n s = s + 'Z'\n return s\n", "path": "blaze/utils.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport datetime\nfrom functools import wraps\n\ntry:\n from cytoolz import nth\nexcept ImportError:\n from toolz import nth\n\nfrom itertools import islice\nfrom collections import Iterator\nfrom multiprocessing.pool import ThreadPool\n\n# these are used throughout blaze, don't remove them\nfrom odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring\n\nimport psutil\nimport numpy as np\n\n# Imports that replace older utils.\nfrom .compatibility import map, zip\n\nfrom .dispatch import dispatch\n\nthread_pool = ThreadPool(psutil.NUM_CPUS)\n\n\ndef nth_list(n, seq):\n \"\"\"\n\n >>> tuple(nth_list([0, 1, 4], 'Hello'))\n ('H', 'e', 'o')\n >>> tuple(nth_list([4, 1, 0], 'Hello'))\n ('o', 'e', 'H')\n >>> tuple(nth_list([0, 0, 0], 'Hello'))\n ('H', 'H', 'H')\n \"\"\"\n seq = iter(seq)\n\n result = []\n old = 0\n item = next(seq)\n for index in sorted(n):\n for i in range(index - old):\n item = next(seq)\n result.append(item)\n old = index\n\n order = [x[1] for x in sorted(zip(n, range(len(n))))]\n return (result[i] for i in order)\n\n\ndef get(ind, coll, lazy=False):\n \"\"\"\n\n >>> get(0, 'Hello')\n 'H'\n\n >>> get([1, 0], 'Hello')\n ('e', 'H')\n\n >>> get(slice(1, 4), 'Hello')\n ('e', 'l', 'l')\n\n >>> get(slice(1, 4), 'Hello', lazy=True)\n <itertools.islice object at ...>\n \"\"\"\n if isinstance(ind, list):\n result = nth_list(ind, coll)\n elif isinstance(ind, slice):\n result = islice(coll, ind.start, ind.stop, ind.step)\n else:\n if isinstance(coll, Iterator):\n result = nth(ind, coll)\n else:\n result = coll[ind]\n if not lazy and isinstance(result, Iterator):\n result = tuple(result)\n return result\n\n\ndef ndget(ind, data):\n \"\"\"\n Get from N-Dimensional getable\n\n Can index with elements, lists, or slices. Mimic's numpy fancy indexing on\n generic indexibles.\n\n >>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]\n >>> ndget(0, data)\n [[1, 2], [3, 4]]\n >>> ndget((0, 1), data)\n [3, 4]\n >>> ndget((0, 0, 0), data)\n 1\n >>> ndget((slice(0, 2), [0, 1], 0), data)\n ((1, 3), (5, 7))\n \"\"\"\n if isinstance(ind, tuple) and len(ind) == 1:\n ind = ind[0]\n if not isinstance(ind, tuple):\n return get(ind, data)\n result = get(ind[0], data)\n if isinstance(ind[0], (list, slice)):\n return type(result)(ndget(ind[1:], row) for row in result)\n else:\n return ndget(ind[1:], result)\n\n\ndef normalize_to_date(dt):\n if isinstance(dt, datetime.datetime) and not dt.time():\n return dt.date()\n else:\n return dt\n\n\ndef assert_allclose(lhs, rhs):\n for tb in map(zip, lhs, rhs):\n for left, right in tb:\n if isinstance(left, (np.floating, float)):\n # account for nans\n assert np.all(np.isclose(left, right, equal_nan=True))\n continue\n if isinstance(left, datetime.datetime):\n left = normalize_to_date(left)\n if isinstance(right, datetime.datetime):\n right = normalize_to_date(right)\n assert left == right\n\n\ndef example(filename, datapath=os.path.join('examples', 'data')):\n import blaze\n return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)\n\n\ndef available_memory():\n return psutil.virtual_memory().available\n\n\ndef listpack(x):\n \"\"\"\n >>> listpack(1)\n [1]\n >>> listpack((1, 2))\n [1, 2]\n >>> listpack([1, 2])\n [1, 2]\n \"\"\"\n if isinstance(x, tuple):\n return list(x)\n elif isinstance(x, list):\n return x\n else:\n return [x]\n\n\n@dispatch(datetime.datetime)\ndef json_dumps(dt):\n s = dt.isoformat()\n if not dt.tzname():\n s = s + 'Z'\n return s\n", "path": "blaze/utils.py"}]}
| 1,901 | 96 |
gh_patches_debug_47848
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-404
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rate stars don't work
You should be able to click to give a star rating to a book on the book page, it doesn't do anything.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/activitypub/note.py`
Content:
```
1 ''' note serializer and children thereof '''
2 from dataclasses import dataclass, field
3 from typing import Dict, List
4
5 from .base_activity import ActivityObject, Link
6 from .image import Image
7
8 @dataclass(init=False)
9 class Tombstone(ActivityObject):
10 ''' the placeholder for a deleted status '''
11 published: str
12 deleted: str
13 type: str = 'Tombstone'
14
15
16 @dataclass(init=False)
17 class Note(ActivityObject):
18 ''' Note activity '''
19 published: str
20 attributedTo: str
21 content: str
22 to: List[str] = field(default_factory=lambda: [])
23 cc: List[str] = field(default_factory=lambda: [])
24 replies: Dict = field(default_factory=lambda: {})
25 inReplyTo: str = ''
26 summary: str = ''
27 tag: List[Link] = field(default_factory=lambda: [])
28 attachment: List[Image] = field(default_factory=lambda: [])
29 sensitive: bool = False
30 type: str = 'Note'
31
32
33 @dataclass(init=False)
34 class Article(Note):
35 ''' what's an article except a note with more fields '''
36 name: str
37 type: str = 'Article'
38
39
40 @dataclass(init=False)
41 class GeneratedNote(Note):
42 ''' just a re-typed note '''
43 type: str = 'GeneratedNote'
44
45
46 @dataclass(init=False)
47 class Comment(Note):
48 ''' like a note but with a book '''
49 inReplyToBook: str
50 type: str = 'Comment'
51
52
53 @dataclass(init=False)
54 class Review(Comment):
55 ''' a full book review '''
56 name: str
57 rating: int = None
58 type: str = 'Review'
59
60
61 @dataclass(init=False)
62 class Quotation(Comment):
63 ''' a quote and commentary on a book '''
64 quote: str
65 type: str = 'Quotation'
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/activitypub/note.py b/bookwyrm/activitypub/note.py
--- a/bookwyrm/activitypub/note.py
+++ b/bookwyrm/activitypub/note.py
@@ -53,7 +53,7 @@
@dataclass(init=False)
class Review(Comment):
''' a full book review '''
- name: str
+ name: str = None
rating: int = None
type: str = 'Review'
|
{"golden_diff": "diff --git a/bookwyrm/activitypub/note.py b/bookwyrm/activitypub/note.py\n--- a/bookwyrm/activitypub/note.py\n+++ b/bookwyrm/activitypub/note.py\n@@ -53,7 +53,7 @@\n @dataclass(init=False)\n class Review(Comment):\n ''' a full book review '''\n- name: str\n+ name: str = None\n rating: int = None\n type: str = 'Review'\n", "issue": "Rate stars don't work\nYou should be able to click to give a star rating to a book on the book page, it doesn't do anything.\n", "before_files": [{"content": "''' note serializer and children thereof '''\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List\n\nfrom .base_activity import ActivityObject, Link\nfrom .image import Image\n\n@dataclass(init=False)\nclass Tombstone(ActivityObject):\n ''' the placeholder for a deleted status '''\n published: str\n deleted: str\n type: str = 'Tombstone'\n\n\n@dataclass(init=False)\nclass Note(ActivityObject):\n ''' Note activity '''\n published: str\n attributedTo: str\n content: str\n to: List[str] = field(default_factory=lambda: [])\n cc: List[str] = field(default_factory=lambda: [])\n replies: Dict = field(default_factory=lambda: {})\n inReplyTo: str = ''\n summary: str = ''\n tag: List[Link] = field(default_factory=lambda: [])\n attachment: List[Image] = field(default_factory=lambda: [])\n sensitive: bool = False\n type: str = 'Note'\n\n\n@dataclass(init=False)\nclass Article(Note):\n ''' what's an article except a note with more fields '''\n name: str\n type: str = 'Article'\n\n\n@dataclass(init=False)\nclass GeneratedNote(Note):\n ''' just a re-typed note '''\n type: str = 'GeneratedNote'\n\n\n@dataclass(init=False)\nclass Comment(Note):\n ''' like a note but with a book '''\n inReplyToBook: str\n type: str = 'Comment'\n\n\n@dataclass(init=False)\nclass Review(Comment):\n ''' a full book review '''\n name: str\n rating: int = None\n type: str = 'Review'\n\n\n@dataclass(init=False)\nclass Quotation(Comment):\n ''' a quote and commentary on a book '''\n quote: str\n type: str = 'Quotation'\n", "path": "bookwyrm/activitypub/note.py"}], "after_files": [{"content": "''' note serializer and children thereof '''\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List\n\nfrom .base_activity import ActivityObject, Link\nfrom .image import Image\n\n@dataclass(init=False)\nclass Tombstone(ActivityObject):\n ''' the placeholder for a deleted status '''\n published: str\n deleted: str\n type: str = 'Tombstone'\n\n\n@dataclass(init=False)\nclass Note(ActivityObject):\n ''' Note activity '''\n published: str\n attributedTo: str\n content: str\n to: List[str] = field(default_factory=lambda: [])\n cc: List[str] = field(default_factory=lambda: [])\n replies: Dict = field(default_factory=lambda: {})\n inReplyTo: str = ''\n summary: str = ''\n tag: List[Link] = field(default_factory=lambda: [])\n attachment: List[Image] = field(default_factory=lambda: [])\n sensitive: bool = False\n type: str = 'Note'\n\n\n@dataclass(init=False)\nclass Article(Note):\n ''' what's an article except a note with more fields '''\n name: str\n type: str = 'Article'\n\n\n@dataclass(init=False)\nclass GeneratedNote(Note):\n ''' just a re-typed note '''\n type: str = 'GeneratedNote'\n\n\n@dataclass(init=False)\nclass Comment(Note):\n ''' like a note but with a book '''\n inReplyToBook: str\n type: str = 'Comment'\n\n\n@dataclass(init=False)\nclass Review(Comment):\n ''' a full book review '''\n name: str = None\n rating: int = None\n type: str = 'Review'\n\n\n@dataclass(init=False)\nclass Quotation(Comment):\n ''' a quote and commentary on a book '''\n quote: str\n type: str = 'Quotation'\n", "path": "bookwyrm/activitypub/note.py"}]}
| 811 | 103 |
gh_patches_debug_18295
|
rasdani/github-patches
|
git_diff
|
avocado-framework__avocado-5562
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle "could not import module" errors gracefully
**Describe the bug**
Avocado does not handle "could not import module" errors very gracefully, with error messages that are quite cryptic.
**Steps to reproduce**
Write a valid `avocado-instrumented` test, but with an invalid import. Example:
```python
from avocado import Test
import foo
class PassTest(Test):
"""
Example test that passes.
:avocado: tags=fast
"""
def test(self):
"""
A test simply doesn't have to fail in order to pass
"""
```
And run it:
```
$ avocado run examples/tests/passtest.py
JOB ID : 3fee9803715e414a16c3dcf1ddb9ff2f6dc6c0bd
JOB LOG : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/job.log
(1/1) examples/tests/passtest.py:PassTest.test: STARTED
(1/1) examples/tests/passtest.py:PassTest.test: ERROR: Test.__init__() got an unexpected keyword argument 'run.results_dir' (0.01 s)
RESULTS : PASS 0 | ERROR 1 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0
JOB HTML : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/results.html
JOB TIME : 1.47 s
```
**Expected behavior**
Instead of "unexpected argument..." a more clear error message such as: "failed to import the file containing the test" or something similar.
**Current behavior**
From original reporter @jnsnow:
```
(08/27) tests/protocol.py:Connect.testBadUNIX: ERROR:
Test.__init__() got an unexpected keyword argument 'run.results_dir'
(0.01 s)
```
**System information (please complete the following information):**
- OS: ```LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch
Distributor ID: Fedora
Description: Fedora release 36 (Thirty Six)
Release: 36
Codename: ThirtySix```
- Avocado version: 5a0c5b2348da450397287a0954e4c335c0d590a9
- Avocado installation method: git
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `avocado/core/utils/loader.py`
Content:
```
1 import importlib
2 import inspect
3 import os
4 import sys
5
6 from avocado.core import test
7 from avocado.utils import stacktrace
8
9
10 class TestError(test.Test):
11 """
12 Generic test error.
13 """
14
15 def __init__(self, *args, **kwargs):
16 exception = kwargs.pop("exception")
17 test.Test.__init__(self, *args, **kwargs)
18 self.exception = exception
19
20 def test(self):
21 self.error(self.exception)
22
23
24 def load_test(test_factory):
25 """
26 Load test from the test factory.
27
28 :param test_factory: a pair of test class and parameters.
29 :type test_factory: tuple
30 :return: an instance of :class:`avocado.core.test.Test`.
31 """
32 test_class, test_parameters = test_factory
33 if "modulePath" in test_parameters:
34 test_path = test_parameters.pop("modulePath")
35 else:
36 test_path = None
37 if isinstance(test_class, str):
38 module_name = os.path.basename(test_path).split(".")[0]
39 test_module_dir = os.path.abspath(os.path.dirname(test_path))
40 # Tests with local dir imports need this
41 try:
42 sys.path.insert(0, test_module_dir)
43 test_module = importlib.import_module(module_name)
44 except: # pylint: disable=W0702
45 # On load_module exception we fake the test class and pass
46 # the exc_info as parameter to be logged.
47 test_parameters["methodName"] = "test"
48 exception = stacktrace.prepare_exc_info(sys.exc_info())
49 test_parameters["exception"] = exception
50 return TestError(**test_parameters)
51 finally:
52 if test_module_dir in sys.path:
53 sys.path.remove(test_module_dir)
54 for _, obj in inspect.getmembers(test_module):
55 if (
56 inspect.isclass(obj)
57 and obj.__name__ == test_class
58 and inspect.getmodule(obj) == test_module
59 ):
60 if issubclass(obj, test.Test):
61 test_class = obj
62 break
63 if "run.results_dir" in test_parameters:
64 test_parameters["base_logdir"] = test_parameters.pop("run.results_dir")
65 test_instance = test_class(**test_parameters)
66
67 return test_instance
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/avocado/core/utils/loader.py b/avocado/core/utils/loader.py
--- a/avocado/core/utils/loader.py
+++ b/avocado/core/utils/loader.py
@@ -30,6 +30,8 @@
:return: an instance of :class:`avocado.core.test.Test`.
"""
test_class, test_parameters = test_factory
+ if "run.results_dir" in test_parameters:
+ test_parameters["base_logdir"] = test_parameters.pop("run.results_dir")
if "modulePath" in test_parameters:
test_path = test_parameters.pop("modulePath")
else:
@@ -60,8 +62,6 @@
if issubclass(obj, test.Test):
test_class = obj
break
- if "run.results_dir" in test_parameters:
- test_parameters["base_logdir"] = test_parameters.pop("run.results_dir")
test_instance = test_class(**test_parameters)
return test_instance
|
{"golden_diff": "diff --git a/avocado/core/utils/loader.py b/avocado/core/utils/loader.py\n--- a/avocado/core/utils/loader.py\n+++ b/avocado/core/utils/loader.py\n@@ -30,6 +30,8 @@\n :return: an instance of :class:`avocado.core.test.Test`.\n \"\"\"\n test_class, test_parameters = test_factory\n+ if \"run.results_dir\" in test_parameters:\n+ test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n if \"modulePath\" in test_parameters:\n test_path = test_parameters.pop(\"modulePath\")\n else:\n@@ -60,8 +62,6 @@\n if issubclass(obj, test.Test):\n test_class = obj\n break\n- if \"run.results_dir\" in test_parameters:\n- test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n test_instance = test_class(**test_parameters)\n \n return test_instance\n", "issue": "Handle \"could not import module\" errors gracefully\n**Describe the bug**\r\nAvocado does not handle \"could not import module\" errors very gracefully, with error messages that are quite cryptic.\r\n\r\n**Steps to reproduce**\r\nWrite a valid `avocado-instrumented` test, but with an invalid import. Example:\r\n\r\n```python\r\nfrom avocado import Test\r\n\r\nimport foo\r\n\r\n\r\nclass PassTest(Test):\r\n\r\n \"\"\"\r\n Example test that passes.\r\n\r\n :avocado: tags=fast\r\n \"\"\"\r\n\r\n def test(self):\r\n \"\"\"\r\n A test simply doesn't have to fail in order to pass\r\n \"\"\"\r\n```\r\n\r\nAnd run it:\r\n\r\n```\r\n$ avocado run examples/tests/passtest.py \r\nJOB ID : 3fee9803715e414a16c3dcf1ddb9ff2f6dc6c0bd\r\nJOB LOG : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/job.log\r\n (1/1) examples/tests/passtest.py:PassTest.test: STARTED\r\n (1/1) examples/tests/passtest.py:PassTest.test: ERROR: Test.__init__() got an unexpected keyword argument 'run.results_dir' (0.01 s)\r\nRESULTS : PASS 0 | ERROR 1 | FAIL 0 | SKIP 0 | WARN 0 | INTERRUPT 0 | CANCEL 0\r\nJOB HTML : /home/cleber/avocado/job-results/job-2022-08-11T10.24-3fee980/results.html\r\nJOB TIME : 1.47 s\r\n```\r\n\r\n**Expected behavior**\r\nInstead of \"unexpected argument...\" a more clear error message such as: \"failed to import the file containing the test\" or something similar. \r\n\r\n**Current behavior**\r\n\r\nFrom original reporter @jnsnow:\r\n\r\n```\r\n(08/27) tests/protocol.py:Connect.testBadUNIX: ERROR:\r\n Test.__init__() got an unexpected keyword argument 'run.results_dir'\r\n (0.01 s)\r\n```\r\n\r\n**System information (please complete the following information):**\r\n - OS: ```LSB Version:\t:core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch\r\nDistributor ID:\tFedora\r\nDescription:\tFedora release 36 (Thirty Six)\r\nRelease:\t36\r\nCodename:\tThirtySix```\r\n - Avocado version: 5a0c5b2348da450397287a0954e4c335c0d590a9\r\n - Avocado installation method: git\r\n\n", "before_files": [{"content": "import importlib\nimport inspect\nimport os\nimport sys\n\nfrom avocado.core import test\nfrom avocado.utils import stacktrace\n\n\nclass TestError(test.Test):\n \"\"\"\n Generic test error.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n exception = kwargs.pop(\"exception\")\n test.Test.__init__(self, *args, **kwargs)\n self.exception = exception\n\n def test(self):\n self.error(self.exception)\n\n\ndef load_test(test_factory):\n \"\"\"\n Load test from the test factory.\n\n :param test_factory: a pair of test class and parameters.\n :type test_factory: tuple\n :return: an instance of :class:`avocado.core.test.Test`.\n \"\"\"\n test_class, test_parameters = test_factory\n if \"modulePath\" in test_parameters:\n test_path = test_parameters.pop(\"modulePath\")\n else:\n test_path = None\n if isinstance(test_class, str):\n module_name = os.path.basename(test_path).split(\".\")[0]\n test_module_dir = os.path.abspath(os.path.dirname(test_path))\n # Tests with local dir imports need this\n try:\n sys.path.insert(0, test_module_dir)\n test_module = importlib.import_module(module_name)\n except: # pylint: disable=W0702\n # On load_module exception we fake the test class and pass\n # the exc_info as parameter to be logged.\n test_parameters[\"methodName\"] = \"test\"\n exception = stacktrace.prepare_exc_info(sys.exc_info())\n test_parameters[\"exception\"] = exception\n return TestError(**test_parameters)\n finally:\n if test_module_dir in sys.path:\n sys.path.remove(test_module_dir)\n for _, obj in inspect.getmembers(test_module):\n if (\n inspect.isclass(obj)\n and obj.__name__ == test_class\n and inspect.getmodule(obj) == test_module\n ):\n if issubclass(obj, test.Test):\n test_class = obj\n break\n if \"run.results_dir\" in test_parameters:\n test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n test_instance = test_class(**test_parameters)\n\n return test_instance\n", "path": "avocado/core/utils/loader.py"}], "after_files": [{"content": "import importlib\nimport inspect\nimport os\nimport sys\n\nfrom avocado.core import test\nfrom avocado.utils import stacktrace\n\n\nclass TestError(test.Test):\n \"\"\"\n Generic test error.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n exception = kwargs.pop(\"exception\")\n test.Test.__init__(self, *args, **kwargs)\n self.exception = exception\n\n def test(self):\n self.error(self.exception)\n\n\ndef load_test(test_factory):\n \"\"\"\n Load test from the test factory.\n\n :param test_factory: a pair of test class and parameters.\n :type test_factory: tuple\n :return: an instance of :class:`avocado.core.test.Test`.\n \"\"\"\n test_class, test_parameters = test_factory\n if \"run.results_dir\" in test_parameters:\n test_parameters[\"base_logdir\"] = test_parameters.pop(\"run.results_dir\")\n if \"modulePath\" in test_parameters:\n test_path = test_parameters.pop(\"modulePath\")\n else:\n test_path = None\n if isinstance(test_class, str):\n module_name = os.path.basename(test_path).split(\".\")[0]\n test_module_dir = os.path.abspath(os.path.dirname(test_path))\n # Tests with local dir imports need this\n try:\n sys.path.insert(0, test_module_dir)\n test_module = importlib.import_module(module_name)\n except: # pylint: disable=W0702\n # On load_module exception we fake the test class and pass\n # the exc_info as parameter to be logged.\n test_parameters[\"methodName\"] = \"test\"\n exception = stacktrace.prepare_exc_info(sys.exc_info())\n test_parameters[\"exception\"] = exception\n return TestError(**test_parameters)\n finally:\n if test_module_dir in sys.path:\n sys.path.remove(test_module_dir)\n for _, obj in inspect.getmembers(test_module):\n if (\n inspect.isclass(obj)\n and obj.__name__ == test_class\n and inspect.getmodule(obj) == test_module\n ):\n if issubclass(obj, test.Test):\n test_class = obj\n break\n test_instance = test_class(**test_parameters)\n\n return test_instance\n", "path": "avocado/core/utils/loader.py"}]}
| 1,522 | 210 |
gh_patches_debug_56084
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5611
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/inference/benchmark_ops/benchmark_rmsnorm.py`
Content:
```
1 import torch
2
3 from colossalai.kernel.kernel_loader import InferenceOpsLoader
4 from colossalai.kernel.triton import rms_layernorm
5
6 try:
7 import triton # noqa
8 except ImportError:
9 print("please install triton from https://github.com/openai/triton")
10
11 inference_ops = InferenceOpsLoader().load()
12
13 # Triton benchmark plot attributions
14 configs = [
15 triton.testing.Benchmark(
16 x_names=["SEQUENCE_TOTAL"],
17 x_vals=[i for i in range(128, 1025, 128)],
18 line_arg="provider",
19 line_vals=[
20 "vllm_rms_layernorm",
21 "triton_rms_layernorm",
22 "cuda_rms_layernorm",
23 "vllm_rms_layernorm_with_residual",
24 "triton_rms_layernorm_with_residual",
25 "cuda_rms_layernorm_with_residual",
26 ],
27 line_names=[
28 "vllm_rms_layernorm",
29 "triton_rms_layernorm",
30 "cuda_rms_layernorm",
31 "vllm_rms_layernorm_with_residual",
32 "triton_rms_layernorm_with_residual",
33 "cuda_rms_layernorm_with_residual",
34 ],
35 styles=[("red", "-"), ("blue", "-"), ("yellow", "-"), ("red", "--"), ("blue", "--"), ("yellow", "--")],
36 ylabel="ms",
37 plot_name=f"RMSNorm benchmarking results",
38 args={"HIDDEN_SIZE": 1024},
39 )
40 ]
41
42
43 @triton.testing.perf_report(configs)
44 def benchmark_rms_layernorm(
45 provider: str,
46 SEQUENCE_TOTAL: int,
47 HIDDEN_SIZE: int,
48 ):
49 try:
50 from vllm.model_executor.layers.layernorm import RMSNorm
51 except ImportError:
52 raise ImportError("Please install vllm from https://github.com/vllm-project/vllm")
53
54 warmup = 10
55 rep = 1000
56
57 dtype = torch.float16
58 eps = 1e-5
59 x_shape = (SEQUENCE_TOTAL, HIDDEN_SIZE)
60 w_shape = (x_shape[-1],)
61 residual = torch.rand(x_shape, dtype=dtype, device="cuda")
62 weight = torch.ones(w_shape, dtype=dtype, device="cuda")
63 vllm_norm = RMSNorm(hidden_size=HIDDEN_SIZE, eps=eps).to(dtype=dtype, device="cuda")
64 x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device="cuda")
65 if provider == "vllm_rms_layernorm":
66 fn = lambda: vllm_norm(x)
67 elif provider == "triton_rms_layernorm":
68 fn = lambda: rms_layernorm(x, weight, eps=eps)
69 elif provider == "cuda_rms_layernorm":
70 out = torch.empty_like(x)
71 fn = lambda: inference_ops.rms_layernorm(out, x, weight, eps)
72 elif provider == "vllm_rms_layernorm_with_residual":
73 fn = lambda: vllm_norm(x, residual=residual)
74 elif provider == "triton_rms_layernorm_with_residual":
75 fn = lambda: rms_layernorm(x, weight, eps=eps, residual=residual)
76 elif provider == "cuda_rms_layernorm_with_residual":
77 fn = lambda: inference_ops.fused_add_rms_layernorm(x, residual, weight, eps)
78 else:
79 raise ValueError("Undefined provider.")
80
81 ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
82
83 return ms
84
85
86 if __name__ == "__main__":
87 benchmark_rms_layernorm.run(save_path=".", print_data=True)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/inference/benchmark_ops/benchmark_rmsnorm.py b/examples/inference/benchmark_ops/benchmark_rmsnorm.py
--- a/examples/inference/benchmark_ops/benchmark_rmsnorm.py
+++ b/examples/inference/benchmark_ops/benchmark_rmsnorm.py
@@ -35,7 +35,7 @@
styles=[("red", "-"), ("blue", "-"), ("yellow", "-"), ("red", "--"), ("blue", "--"), ("yellow", "--")],
ylabel="ms",
plot_name=f"RMSNorm benchmarking results",
- args={"HIDDEN_SIZE": 1024},
+ args={"HIDDEN_SIZE": 5120},
)
]
|
{"golden_diff": "diff --git a/examples/inference/benchmark_ops/benchmark_rmsnorm.py b/examples/inference/benchmark_ops/benchmark_rmsnorm.py\n--- a/examples/inference/benchmark_ops/benchmark_rmsnorm.py\n+++ b/examples/inference/benchmark_ops/benchmark_rmsnorm.py\n@@ -35,7 +35,7 @@\n styles=[(\"red\", \"-\"), (\"blue\", \"-\"), (\"yellow\", \"-\"), (\"red\", \"--\"), (\"blue\", \"--\"), (\"yellow\", \"--\")],\n ylabel=\"ms\",\n plot_name=f\"RMSNorm benchmarking results\",\n- args={\"HIDDEN_SIZE\": 1024},\n+ args={\"HIDDEN_SIZE\": 5120},\n )\n ]\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import torch\n\nfrom colossalai.kernel.kernel_loader import InferenceOpsLoader\nfrom colossalai.kernel.triton import rms_layernorm\n\ntry:\n import triton # noqa\nexcept ImportError:\n print(\"please install triton from https://github.com/openai/triton\")\n\ninference_ops = InferenceOpsLoader().load()\n\n# Triton benchmark plot attributions\nconfigs = [\n triton.testing.Benchmark(\n x_names=[\"SEQUENCE_TOTAL\"],\n x_vals=[i for i in range(128, 1025, 128)],\n line_arg=\"provider\",\n line_vals=[\n \"vllm_rms_layernorm\",\n \"triton_rms_layernorm\",\n \"cuda_rms_layernorm\",\n \"vllm_rms_layernorm_with_residual\",\n \"triton_rms_layernorm_with_residual\",\n \"cuda_rms_layernorm_with_residual\",\n ],\n line_names=[\n \"vllm_rms_layernorm\",\n \"triton_rms_layernorm\",\n \"cuda_rms_layernorm\",\n \"vllm_rms_layernorm_with_residual\",\n \"triton_rms_layernorm_with_residual\",\n \"cuda_rms_layernorm_with_residual\",\n ],\n styles=[(\"red\", \"-\"), (\"blue\", \"-\"), (\"yellow\", \"-\"), (\"red\", \"--\"), (\"blue\", \"--\"), (\"yellow\", \"--\")],\n ylabel=\"ms\",\n plot_name=f\"RMSNorm benchmarking results\",\n args={\"HIDDEN_SIZE\": 1024},\n )\n]\n\n\[email protected]_report(configs)\ndef benchmark_rms_layernorm(\n provider: str,\n SEQUENCE_TOTAL: int,\n HIDDEN_SIZE: int,\n):\n try:\n from vllm.model_executor.layers.layernorm import RMSNorm\n except ImportError:\n raise ImportError(\"Please install vllm from https://github.com/vllm-project/vllm\")\n\n warmup = 10\n rep = 1000\n\n dtype = torch.float16\n eps = 1e-5\n x_shape = (SEQUENCE_TOTAL, HIDDEN_SIZE)\n w_shape = (x_shape[-1],)\n residual = torch.rand(x_shape, dtype=dtype, device=\"cuda\")\n weight = torch.ones(w_shape, dtype=dtype, device=\"cuda\")\n vllm_norm = RMSNorm(hidden_size=HIDDEN_SIZE, eps=eps).to(dtype=dtype, device=\"cuda\")\n x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=\"cuda\")\n if provider == \"vllm_rms_layernorm\":\n fn = lambda: vllm_norm(x)\n elif provider == \"triton_rms_layernorm\":\n fn = lambda: rms_layernorm(x, weight, eps=eps)\n elif provider == \"cuda_rms_layernorm\":\n out = torch.empty_like(x)\n fn = lambda: inference_ops.rms_layernorm(out, x, weight, eps)\n elif provider == \"vllm_rms_layernorm_with_residual\":\n fn = lambda: vllm_norm(x, residual=residual)\n elif provider == \"triton_rms_layernorm_with_residual\":\n fn = lambda: rms_layernorm(x, weight, eps=eps, residual=residual)\n elif provider == \"cuda_rms_layernorm_with_residual\":\n fn = lambda: inference_ops.fused_add_rms_layernorm(x, residual, weight, eps)\n else:\n raise ValueError(\"Undefined provider.\")\n\n ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)\n\n return ms\n\n\nif __name__ == \"__main__\":\n benchmark_rms_layernorm.run(save_path=\".\", print_data=True)\n", "path": "examples/inference/benchmark_ops/benchmark_rmsnorm.py"}], "after_files": [{"content": "import torch\n\nfrom colossalai.kernel.kernel_loader import InferenceOpsLoader\nfrom colossalai.kernel.triton import rms_layernorm\n\ntry:\n import triton # noqa\nexcept ImportError:\n print(\"please install triton from https://github.com/openai/triton\")\n\ninference_ops = InferenceOpsLoader().load()\n\n# Triton benchmark plot attributions\nconfigs = [\n triton.testing.Benchmark(\n x_names=[\"SEQUENCE_TOTAL\"],\n x_vals=[i for i in range(128, 1025, 128)],\n line_arg=\"provider\",\n line_vals=[\n \"vllm_rms_layernorm\",\n \"triton_rms_layernorm\",\n \"cuda_rms_layernorm\",\n \"vllm_rms_layernorm_with_residual\",\n \"triton_rms_layernorm_with_residual\",\n \"cuda_rms_layernorm_with_residual\",\n ],\n line_names=[\n \"vllm_rms_layernorm\",\n \"triton_rms_layernorm\",\n \"cuda_rms_layernorm\",\n \"vllm_rms_layernorm_with_residual\",\n \"triton_rms_layernorm_with_residual\",\n \"cuda_rms_layernorm_with_residual\",\n ],\n styles=[(\"red\", \"-\"), (\"blue\", \"-\"), (\"yellow\", \"-\"), (\"red\", \"--\"), (\"blue\", \"--\"), (\"yellow\", \"--\")],\n ylabel=\"ms\",\n plot_name=f\"RMSNorm benchmarking results\",\n args={\"HIDDEN_SIZE\": 5120},\n )\n]\n\n\[email protected]_report(configs)\ndef benchmark_rms_layernorm(\n provider: str,\n SEQUENCE_TOTAL: int,\n HIDDEN_SIZE: int,\n):\n try:\n from vllm.model_executor.layers.layernorm import RMSNorm\n except ImportError:\n raise ImportError(\"Please install vllm from https://github.com/vllm-project/vllm\")\n\n warmup = 10\n rep = 1000\n\n dtype = torch.float16\n eps = 1e-5\n x_shape = (SEQUENCE_TOTAL, HIDDEN_SIZE)\n w_shape = (x_shape[-1],)\n residual = torch.rand(x_shape, dtype=dtype, device=\"cuda\")\n weight = torch.ones(w_shape, dtype=dtype, device=\"cuda\")\n vllm_norm = RMSNorm(hidden_size=HIDDEN_SIZE, eps=eps).to(dtype=dtype, device=\"cuda\")\n x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=\"cuda\")\n if provider == \"vllm_rms_layernorm\":\n fn = lambda: vllm_norm(x)\n elif provider == \"triton_rms_layernorm\":\n fn = lambda: rms_layernorm(x, weight, eps=eps)\n elif provider == \"cuda_rms_layernorm\":\n out = torch.empty_like(x)\n fn = lambda: inference_ops.rms_layernorm(out, x, weight, eps)\n elif provider == \"vllm_rms_layernorm_with_residual\":\n fn = lambda: vllm_norm(x, residual=residual)\n elif provider == \"triton_rms_layernorm_with_residual\":\n fn = lambda: rms_layernorm(x, weight, eps=eps, residual=residual)\n elif provider == \"cuda_rms_layernorm_with_residual\":\n fn = lambda: inference_ops.fused_add_rms_layernorm(x, residual, weight, eps)\n else:\n raise ValueError(\"Undefined provider.\")\n\n ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)\n\n return ms\n\n\nif __name__ == \"__main__\":\n benchmark_rms_layernorm.run(save_path=\".\", print_data=True)\n", "path": "examples/inference/benchmark_ops/benchmark_rmsnorm.py"}]}
| 1,314 | 154 |
gh_patches_debug_33315
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-666
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Idea: have a way to specify context via command line
Something like repeat arguments:
```
cookiecutter mytemplate -Cname=my-project -Cgithub-user=ionelmc
```
Or maybe the whole json?
```
cookiecutter mytemplate --context='{"name": "my-project", "github-user": "ionelmc"}'
```
Or variable arguments?
```
cookiecutter mytemplate --context-name=my-project --context-github-user=ionelmc
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/cli.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.cli
6 -----------------
7
8 Main `cookiecutter` CLI.
9 """
10
11 import os
12 import sys
13 import logging
14 import json
15
16 import click
17
18 from cookiecutter import __version__
19 from cookiecutter.config import USER_CONFIG_PATH
20 from cookiecutter.main import cookiecutter
21 from cookiecutter.exceptions import (
22 OutputDirExistsException,
23 InvalidModeException,
24 FailedHookException,
25 UndefinedVariableInTemplate,
26 UnknownExtension,
27 RepositoryNotFound
28 )
29
30 logger = logging.getLogger(__name__)
31
32
33 def version_msg():
34 """Returns the Cookiecutter version, location and Python powering it."""
35 python_version = sys.version[:3]
36 location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
37 message = u'Cookiecutter %(version)s from {} (Python {})'
38 return message.format(location, python_version)
39
40
41 @click.command(context_settings=dict(help_option_names=[u'-h', u'--help']))
42 @click.version_option(__version__, u'-V', u'--version', message=version_msg())
43 @click.argument(u'template')
44 @click.option(
45 u'--no-input', is_flag=True,
46 help=u'Do not prompt for parameters and only use cookiecutter.json '
47 u'file content',
48 )
49 @click.option(
50 u'-c', u'--checkout',
51 help=u'branch, tag or commit to checkout after git clone',
52 )
53 @click.option(
54 '-v', '--verbose',
55 is_flag=True, help='Print debug information', default=False
56 )
57 @click.option(
58 u'--replay', is_flag=True,
59 help=u'Do not prompt for parameters and only use information entered '
60 u'previously',
61 )
62 @click.option(
63 u'-f', u'--overwrite-if-exists', is_flag=True,
64 help=u'Overwrite the contents of the output directory if it already exists'
65 )
66 @click.option(
67 u'-o', u'--output-dir', default='.', type=click.Path(),
68 help=u'Where to output the generated project dir into'
69 )
70 @click.option(
71 u'--config-file', type=click.Path(), default=USER_CONFIG_PATH,
72 help=u'User configuration file'
73 )
74 @click.option(
75 u'--default-config', is_flag=True,
76 help=u'Do not load a config file. Use the defaults instead'
77 )
78 def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,
79 output_dir, config_file, default_config):
80 """Create a project from a Cookiecutter project template (TEMPLATE)."""
81 if verbose:
82 logging.basicConfig(
83 format=u'%(levelname)s %(filename)s: %(message)s',
84 level=logging.DEBUG
85 )
86 else:
87 # Log info and above to console
88 logging.basicConfig(
89 format=u'%(levelname)s: %(message)s',
90 level=logging.INFO
91 )
92
93 try:
94 # If you _need_ to support a local template in a directory
95 # called 'help', use a qualified path to the directory.
96 if template == u'help':
97 click.echo(click.get_current_context().get_help())
98 sys.exit(0)
99
100 user_config = None if default_config else config_file
101
102 cookiecutter(
103 template, checkout, no_input,
104 replay=replay,
105 overwrite_if_exists=overwrite_if_exists,
106 output_dir=output_dir,
107 config_file=user_config
108 )
109 except (OutputDirExistsException,
110 InvalidModeException,
111 FailedHookException,
112 UnknownExtension,
113 RepositoryNotFound) as e:
114 click.echo(e)
115 sys.exit(1)
116 except UndefinedVariableInTemplate as undefined_err:
117 click.echo('{}'.format(undefined_err.message))
118 click.echo('Error message: {}'.format(undefined_err.error.message))
119
120 context_str = json.dumps(
121 undefined_err.context,
122 indent=4,
123 sort_keys=True
124 )
125 click.echo('Context: {}'.format(context_str))
126 sys.exit(1)
127
128
129 if __name__ == "__main__":
130 main()
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py
--- a/cookiecutter/cli.py
+++ b/cookiecutter/cli.py
@@ -38,9 +38,23 @@
return message.format(location, python_version)
+def validate_extra_context(ctx, param, value):
+ for s in value:
+ if '=' not in s:
+ raise click.BadParameter(
+ 'EXTRA_CONTEXT should contain items of the form key=value; '
+ "'{}' doesn't match that form".format(s)
+ )
+
+ # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')
+ # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}
+ return dict(s.split('=', 1) for s in value) or None
+
+
@click.command(context_settings=dict(help_option_names=[u'-h', u'--help']))
@click.version_option(__version__, u'-V', u'--version', message=version_msg())
@click.argument(u'template')
[email protected](u'extra_context', nargs=-1, callback=validate_extra_context)
@click.option(
u'--no-input', is_flag=True,
help=u'Do not prompt for parameters and only use cookiecutter.json '
@@ -75,8 +89,8 @@
u'--default-config', is_flag=True,
help=u'Do not load a config file. Use the defaults instead'
)
-def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,
- output_dir, config_file, default_config):
+def main(template, extra_context, no_input, checkout, verbose, replay,
+ overwrite_if_exists, output_dir, config_file, default_config):
"""Create a project from a Cookiecutter project template (TEMPLATE)."""
if verbose:
logging.basicConfig(
@@ -101,6 +115,7 @@
cookiecutter(
template, checkout, no_input,
+ extra_context=extra_context,
replay=replay,
overwrite_if_exists=overwrite_if_exists,
output_dir=output_dir,
|
{"golden_diff": "diff --git a/cookiecutter/cli.py b/cookiecutter/cli.py\n--- a/cookiecutter/cli.py\n+++ b/cookiecutter/cli.py\n@@ -38,9 +38,23 @@\n return message.format(location, python_version)\n \n \n+def validate_extra_context(ctx, param, value):\n+ for s in value:\n+ if '=' not in s:\n+ raise click.BadParameter(\n+ 'EXTRA_CONTEXT should contain items of the form key=value; '\n+ \"'{}' doesn't match that form\".format(s)\n+ )\n+\n+ # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')\n+ # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}\n+ return dict(s.split('=', 1) for s in value) or None\n+\n+\n @click.command(context_settings=dict(help_option_names=[u'-h', u'--help']))\n @click.version_option(__version__, u'-V', u'--version', message=version_msg())\n @click.argument(u'template')\[email protected](u'extra_context', nargs=-1, callback=validate_extra_context)\n @click.option(\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n@@ -75,8 +89,8 @@\n u'--default-config', is_flag=True,\n help=u'Do not load a config file. Use the defaults instead'\n )\n-def main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n- output_dir, config_file, default_config):\n+def main(template, extra_context, no_input, checkout, verbose, replay,\n+ overwrite_if_exists, output_dir, config_file, default_config):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n@@ -101,6 +115,7 @@\n \n cookiecutter(\n template, checkout, no_input,\n+ extra_context=extra_context,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n", "issue": "Idea: have a way to specify context via command line\nSomething like repeat arguments:\n\n```\ncookiecutter mytemplate -Cname=my-project -Cgithub-user=ionelmc\n```\n\nOr maybe the whole json?\n\n```\ncookiecutter mytemplate --context='{\"name\": \"my-project\", \"github-user\": \"ionelmc\"}'\n```\n\nOr variable arguments?\n\n```\ncookiecutter mytemplate --context-name=my-project --context-github-user=ionelmc\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport json\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.config import USER_CONFIG_PATH\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException,\n InvalidModeException,\n FailedHookException,\n UndefinedVariableInTemplate,\n UnknownExtension,\n RepositoryNotFound\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n \"\"\"Returns the Cookiecutter version, location and Python powering it.\"\"\"\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\[email protected](\n u'-o', u'--output-dir', default='.', type=click.Path(),\n help=u'Where to output the generated project dir into'\n)\[email protected](\n u'--config-file', type=click.Path(), default=USER_CONFIG_PATH,\n help=u'User configuration file'\n)\[email protected](\n u'--default-config', is_flag=True,\n help=u'Do not load a config file. Use the defaults instead'\n)\ndef main(template, no_input, checkout, verbose, replay, overwrite_if_exists,\n output_dir, config_file, default_config):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n # If you _need_ to support a local template in a directory\n # called 'help', use a qualified path to the directory.\n if template == u'help':\n click.echo(click.get_current_context().get_help())\n sys.exit(0)\n\n user_config = None if default_config else config_file\n\n cookiecutter(\n template, checkout, no_input,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n config_file=user_config\n )\n except (OutputDirExistsException,\n InvalidModeException,\n FailedHookException,\n UnknownExtension,\n RepositoryNotFound) as e:\n click.echo(e)\n sys.exit(1)\n except UndefinedVariableInTemplate as undefined_err:\n click.echo('{}'.format(undefined_err.message))\n click.echo('Error message: {}'.format(undefined_err.error.message))\n\n context_str = json.dumps(\n undefined_err.context,\n indent=4,\n sort_keys=True\n )\n click.echo('Context: {}'.format(context_str))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.cli\n-----------------\n\nMain `cookiecutter` CLI.\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport json\n\nimport click\n\nfrom cookiecutter import __version__\nfrom cookiecutter.config import USER_CONFIG_PATH\nfrom cookiecutter.main import cookiecutter\nfrom cookiecutter.exceptions import (\n OutputDirExistsException,\n InvalidModeException,\n FailedHookException,\n UndefinedVariableInTemplate,\n UnknownExtension,\n RepositoryNotFound\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef version_msg():\n \"\"\"Returns the Cookiecutter version, location and Python powering it.\"\"\"\n python_version = sys.version[:3]\n location = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n message = u'Cookiecutter %(version)s from {} (Python {})'\n return message.format(location, python_version)\n\n\ndef validate_extra_context(ctx, param, value):\n for s in value:\n if '=' not in s:\n raise click.BadParameter(\n 'EXTRA_CONTEXT should contain items of the form key=value; '\n \"'{}' doesn't match that form\".format(s)\n )\n\n # Convert tuple -- e.g.: (u'program_name=foobar', u'startsecs=66')\n # to dict -- e.g.: {'program_name': 'foobar', 'startsecs': '66'}\n return dict(s.split('=', 1) for s in value) or None\n\n\[email protected](context_settings=dict(help_option_names=[u'-h', u'--help']))\[email protected]_option(__version__, u'-V', u'--version', message=version_msg())\[email protected](u'template')\[email protected](u'extra_context', nargs=-1, callback=validate_extra_context)\[email protected](\n u'--no-input', is_flag=True,\n help=u'Do not prompt for parameters and only use cookiecutter.json '\n u'file content',\n)\[email protected](\n u'-c', u'--checkout',\n help=u'branch, tag or commit to checkout after git clone',\n)\[email protected](\n '-v', '--verbose',\n is_flag=True, help='Print debug information', default=False\n)\[email protected](\n u'--replay', is_flag=True,\n help=u'Do not prompt for parameters and only use information entered '\n u'previously',\n)\[email protected](\n u'-f', u'--overwrite-if-exists', is_flag=True,\n help=u'Overwrite the contents of the output directory if it already exists'\n)\[email protected](\n u'-o', u'--output-dir', default='.', type=click.Path(),\n help=u'Where to output the generated project dir into'\n)\[email protected](\n u'--config-file', type=click.Path(), default=USER_CONFIG_PATH,\n help=u'User configuration file'\n)\[email protected](\n u'--default-config', is_flag=True,\n help=u'Do not load a config file. Use the defaults instead'\n)\ndef main(template, extra_context, no_input, checkout, verbose, replay,\n overwrite_if_exists, output_dir, config_file, default_config):\n \"\"\"Create a project from a Cookiecutter project template (TEMPLATE).\"\"\"\n if verbose:\n logging.basicConfig(\n format=u'%(levelname)s %(filename)s: %(message)s',\n level=logging.DEBUG\n )\n else:\n # Log info and above to console\n logging.basicConfig(\n format=u'%(levelname)s: %(message)s',\n level=logging.INFO\n )\n\n try:\n # If you _need_ to support a local template in a directory\n # called 'help', use a qualified path to the directory.\n if template == u'help':\n click.echo(click.get_current_context().get_help())\n sys.exit(0)\n\n user_config = None if default_config else config_file\n\n cookiecutter(\n template, checkout, no_input,\n extra_context=extra_context,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n config_file=user_config\n )\n except (OutputDirExistsException,\n InvalidModeException,\n FailedHookException,\n UnknownExtension,\n RepositoryNotFound) as e:\n click.echo(e)\n sys.exit(1)\n except UndefinedVariableInTemplate as undefined_err:\n click.echo('{}'.format(undefined_err.message))\n click.echo('Error message: {}'.format(undefined_err.error.message))\n\n context_str = json.dumps(\n undefined_err.context,\n indent=4,\n sort_keys=True\n )\n click.echo('Context: {}'.format(context_str))\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "cookiecutter/cli.py"}]}
| 1,515 | 475 |
gh_patches_debug_4402
|
rasdani/github-patches
|
git_diff
|
aws__aws-cli-1769
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
JSON Cache for AssumeRoleProvider not truncating files
When we open a file for writing, if we're reusing the same file (same cache key) we don't truncate the file before writing. If the second JSON response is smaller it will result in extra data at the end of the JSON document.
This will trigger a json parsing error, which raises a KeyError, which causes the cred provider to retrieve a new set of temporary credentials because it thinks the file is not in the cache.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/assumerole.py`
Content:
```
1 import os
2 import json
3 import logging
4
5 from botocore.exceptions import ProfileNotFound
6
7 LOG = logging.getLogger(__name__)
8
9
10 def register_assume_role_provider(event_handlers):
11 event_handlers.register('session-initialized',
12 inject_assume_role_provider_cache,
13 unique_id='inject_assume_role_cred_provider_cache')
14
15
16 def inject_assume_role_provider_cache(session, **kwargs):
17 try:
18 cred_chain = session.get_component('credential_provider')
19 except ProfileNotFound:
20 # If a user has provided a profile that does not exist,
21 # trying to retrieve components/config on the session
22 # will raise ProfileNotFound. Sometimes this is invalid:
23 #
24 # "ec2 describe-instances --profile unknown"
25 #
26 # and sometimes this is perfectly valid:
27 #
28 # "configure set region us-west-2 --profile brand-new-profile"
29 #
30 # Because we can't know (and don't want to know) whether
31 # the customer is trying to do something valid, we just
32 # immediately return. If it's invalid something else
33 # up the stack will raise ProfileNotFound, otherwise
34 # the configure (and other) commands will work as expected.
35 LOG.debug("ProfileNotFound caught when trying to inject "
36 "assume-role cred provider cache. Not configuring "
37 "JSONFileCache for assume-role.")
38 return
39 provider = cred_chain.get_provider('assume-role')
40 provider.cache = JSONFileCache()
41
42
43 class JSONFileCache(object):
44 """JSON file cache.
45
46 This provides a dict like interface that stores JSON serializable
47 objects.
48
49 The objects are serialized to JSON and stored in a file. These
50 values can be retrieved at a later time.
51
52 """
53
54 CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache'))
55
56 def __init__(self, working_dir=CACHE_DIR):
57 self._working_dir = working_dir
58
59 def __contains__(self, cache_key):
60 actual_key = self._convert_cache_key(cache_key)
61 return os.path.isfile(actual_key)
62
63 def __getitem__(self, cache_key):
64 """Retrieve value from a cache key."""
65 actual_key = self._convert_cache_key(cache_key)
66 try:
67 with open(actual_key) as f:
68 return json.load(f)
69 except (OSError, ValueError, IOError):
70 raise KeyError(cache_key)
71
72 def __setitem__(self, cache_key, value):
73 full_key = self._convert_cache_key(cache_key)
74 try:
75 file_content = json.dumps(value)
76 except (TypeError, ValueError):
77 raise ValueError("Value cannot be cached, must be "
78 "JSON serializable: %s" % value)
79 if not os.path.isdir(self._working_dir):
80 os.makedirs(self._working_dir)
81 with os.fdopen(os.open(full_key,
82 os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
83 f.write(file_content)
84
85 def _convert_cache_key(self, cache_key):
86 full_path = os.path.join(self._working_dir, cache_key + '.json')
87 return full_path
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awscli/customizations/assumerole.py b/awscli/customizations/assumerole.py
--- a/awscli/customizations/assumerole.py
+++ b/awscli/customizations/assumerole.py
@@ -80,6 +80,7 @@
os.makedirs(self._working_dir)
with os.fdopen(os.open(full_key,
os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:
+ f.truncate()
f.write(file_content)
def _convert_cache_key(self, cache_key):
|
{"golden_diff": "diff --git a/awscli/customizations/assumerole.py b/awscli/customizations/assumerole.py\n--- a/awscli/customizations/assumerole.py\n+++ b/awscli/customizations/assumerole.py\n@@ -80,6 +80,7 @@\n os.makedirs(self._working_dir)\n with os.fdopen(os.open(full_key,\n os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:\n+ f.truncate()\n f.write(file_content)\n \n def _convert_cache_key(self, cache_key):\n", "issue": "JSON Cache for AssumeRoleProvider not truncating files\nWhen we open a file for writing, if we're reusing the same file (same cache key) we don't truncate the file before writing. If the second JSON response is smaller it will result in extra data at the end of the JSON document.\n\nThis will trigger a json parsing error, which raises a KeyError, which causes the cred provider to retrieve a new set of temporary credentials because it thinks the file is not in the cache.\n\n", "before_files": [{"content": "import os\nimport json\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nLOG = logging.getLogger(__name__)\n\n\ndef register_assume_role_provider(event_handlers):\n event_handlers.register('session-initialized',\n inject_assume_role_provider_cache,\n unique_id='inject_assume_role_cred_provider_cache')\n\n\ndef inject_assume_role_provider_cache(session, **kwargs):\n try:\n cred_chain = session.get_component('credential_provider')\n except ProfileNotFound:\n # If a user has provided a profile that does not exist,\n # trying to retrieve components/config on the session\n # will raise ProfileNotFound. Sometimes this is invalid:\n #\n # \"ec2 describe-instances --profile unknown\"\n #\n # and sometimes this is perfectly valid:\n #\n # \"configure set region us-west-2 --profile brand-new-profile\"\n #\n # Because we can't know (and don't want to know) whether\n # the customer is trying to do something valid, we just\n # immediately return. If it's invalid something else\n # up the stack will raise ProfileNotFound, otherwise\n # the configure (and other) commands will work as expected.\n LOG.debug(\"ProfileNotFound caught when trying to inject \"\n \"assume-role cred provider cache. Not configuring \"\n \"JSONFileCache for assume-role.\")\n return\n provider = cred_chain.get_provider('assume-role')\n provider.cache = JSONFileCache()\n\n\nclass JSONFileCache(object):\n \"\"\"JSON file cache.\n\n This provides a dict like interface that stores JSON serializable\n objects.\n\n The objects are serialized to JSON and stored in a file. These\n values can be retrieved at a later time.\n\n \"\"\"\n\n CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache'))\n\n def __init__(self, working_dir=CACHE_DIR):\n self._working_dir = working_dir\n\n def __contains__(self, cache_key):\n actual_key = self._convert_cache_key(cache_key)\n return os.path.isfile(actual_key)\n\n def __getitem__(self, cache_key):\n \"\"\"Retrieve value from a cache key.\"\"\"\n actual_key = self._convert_cache_key(cache_key)\n try:\n with open(actual_key) as f:\n return json.load(f)\n except (OSError, ValueError, IOError):\n raise KeyError(cache_key)\n\n def __setitem__(self, cache_key, value):\n full_key = self._convert_cache_key(cache_key)\n try:\n file_content = json.dumps(value)\n except (TypeError, ValueError):\n raise ValueError(\"Value cannot be cached, must be \"\n \"JSON serializable: %s\" % value)\n if not os.path.isdir(self._working_dir):\n os.makedirs(self._working_dir)\n with os.fdopen(os.open(full_key,\n os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:\n f.write(file_content)\n\n def _convert_cache_key(self, cache_key):\n full_path = os.path.join(self._working_dir, cache_key + '.json')\n return full_path\n", "path": "awscli/customizations/assumerole.py"}], "after_files": [{"content": "import os\nimport json\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nLOG = logging.getLogger(__name__)\n\n\ndef register_assume_role_provider(event_handlers):\n event_handlers.register('session-initialized',\n inject_assume_role_provider_cache,\n unique_id='inject_assume_role_cred_provider_cache')\n\n\ndef inject_assume_role_provider_cache(session, **kwargs):\n try:\n cred_chain = session.get_component('credential_provider')\n except ProfileNotFound:\n # If a user has provided a profile that does not exist,\n # trying to retrieve components/config on the session\n # will raise ProfileNotFound. Sometimes this is invalid:\n #\n # \"ec2 describe-instances --profile unknown\"\n #\n # and sometimes this is perfectly valid:\n #\n # \"configure set region us-west-2 --profile brand-new-profile\"\n #\n # Because we can't know (and don't want to know) whether\n # the customer is trying to do something valid, we just\n # immediately return. If it's invalid something else\n # up the stack will raise ProfileNotFound, otherwise\n # the configure (and other) commands will work as expected.\n LOG.debug(\"ProfileNotFound caught when trying to inject \"\n \"assume-role cred provider cache. Not configuring \"\n \"JSONFileCache for assume-role.\")\n return\n provider = cred_chain.get_provider('assume-role')\n provider.cache = JSONFileCache()\n\n\nclass JSONFileCache(object):\n \"\"\"JSON file cache.\n\n This provides a dict like interface that stores JSON serializable\n objects.\n\n The objects are serialized to JSON and stored in a file. These\n values can be retrieved at a later time.\n\n \"\"\"\n\n CACHE_DIR = os.path.expanduser(os.path.join('~', '.aws', 'cli', 'cache'))\n\n def __init__(self, working_dir=CACHE_DIR):\n self._working_dir = working_dir\n\n def __contains__(self, cache_key):\n actual_key = self._convert_cache_key(cache_key)\n return os.path.isfile(actual_key)\n\n def __getitem__(self, cache_key):\n \"\"\"Retrieve value from a cache key.\"\"\"\n actual_key = self._convert_cache_key(cache_key)\n try:\n with open(actual_key) as f:\n return json.load(f)\n except (OSError, ValueError, IOError):\n raise KeyError(cache_key)\n\n def __setitem__(self, cache_key, value):\n full_key = self._convert_cache_key(cache_key)\n try:\n file_content = json.dumps(value)\n except (TypeError, ValueError):\n raise ValueError(\"Value cannot be cached, must be \"\n \"JSON serializable: %s\" % value)\n if not os.path.isdir(self._working_dir):\n os.makedirs(self._working_dir)\n with os.fdopen(os.open(full_key,\n os.O_WRONLY | os.O_CREAT, 0o600), 'w') as f:\n f.truncate()\n f.write(file_content)\n\n def _convert_cache_key(self, cache_key):\n full_path = os.path.join(self._working_dir, cache_key + '.json')\n return full_path\n", "path": "awscli/customizations/assumerole.py"}]}
| 1,201 | 124 |
gh_patches_debug_49851
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15890
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenIDC SSO through apache stopped working after update to 3.7.6
### Deployment Type
Self-hosted
### NetBox Version
v3.7.6
### Python Version
3.9
### Steps to Reproduce
This is a longstanding NetBox instance. It runs under gunicorn, proxied through apache which is configured to use mod_auth_openid for authentication.
NetBox's configuration includes:
REMOTE_AUTH_ENABLED = True
REMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend'
REMOTE_AUTH_HEADER = 'HTTP_OIDC_CLAIM_PREFERRED_USERNAME'
REMOTE_AUTH_AUTO_CREATE_USER = True
This was working fine until the update to 3.7.6 following our usual procedure:
Pull and checkout v3.7.6.
Run upgrade.sh
Restart NetBox gunicorn service, netbox-rq and apache
Since the upgrade, NetBox has presented a login box instead of logging in as the REMOTE_AUTH_HEADER user. Using tcpdump, I can see the "OIDC_CLAIM_preferred_username" header is being sent to gunicorn. Other instances using the same OpenIDC configuration are working.
### Expected Behavior
REMOTE_AUTH login using OpenIDC credentials.
### Observed Behavior
The web frontend prompts for username and password.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib/gunicorn.py`
Content:
```
1 # The IP address (typically localhost) and port that the NetBox WSGI process should listen on
2 bind = '127.0.0.1:8001'
3
4 # Number of gunicorn workers to spawn. This should typically be 2n+1, where
5 # n is the number of CPU cores present.
6 workers = 5
7
8 # Number of threads per worker process
9 threads = 3
10
11 # Timeout (in seconds) for a request to complete
12 timeout = 120
13
14 # The maximum number of requests a worker can handle before being respawned
15 max_requests = 5000
16 max_requests_jitter = 500
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/contrib/gunicorn.py b/contrib/gunicorn.py
--- a/contrib/gunicorn.py
+++ b/contrib/gunicorn.py
@@ -14,3 +14,7 @@
# The maximum number of requests a worker can handle before being respawned
max_requests = 5000
max_requests_jitter = 500
+
+# Uncomment this line to accept HTTP headers containing underscores, e.g. for remote
+# authentication support. See https://docs.gunicorn.org/en/stable/settings.html#header-map
+# header-map = 'dangerous'
|
{"golden_diff": "diff --git a/contrib/gunicorn.py b/contrib/gunicorn.py\n--- a/contrib/gunicorn.py\n+++ b/contrib/gunicorn.py\n@@ -14,3 +14,7 @@\n # The maximum number of requests a worker can handle before being respawned\n max_requests = 5000\n max_requests_jitter = 500\n+\n+# Uncomment this line to accept HTTP headers containing underscores, e.g. for remote\n+# authentication support. See https://docs.gunicorn.org/en/stable/settings.html#header-map\n+# header-map = 'dangerous'\n", "issue": "OpenIDC SSO through apache stopped working after update to 3.7.6\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.6\n\n### Python Version\n\n3.9\n\n### Steps to Reproduce\n\nThis is a longstanding NetBox instance. It runs under gunicorn, proxied through apache which is configured to use mod_auth_openid for authentication. \r\n\r\nNetBox's configuration includes:\r\nREMOTE_AUTH_ENABLED = True\r\nREMOTE_AUTH_BACKEND = 'netbox.authentication.RemoteUserBackend'\r\nREMOTE_AUTH_HEADER = 'HTTP_OIDC_CLAIM_PREFERRED_USERNAME'\r\nREMOTE_AUTH_AUTO_CREATE_USER = True\r\n\r\nThis was working fine until the update to 3.7.6 following our usual procedure:\r\n\r\nPull and checkout v3.7.6.\r\n\r\nRun upgrade.sh\r\n\r\nRestart NetBox gunicorn service, netbox-rq and apache\r\n\r\nSince the upgrade, NetBox has presented a login box instead of logging in as the REMOTE_AUTH_HEADER user. Using tcpdump, I can see the \"OIDC_CLAIM_preferred_username\" header is being sent to gunicorn. Other instances using the same OpenIDC configuration are working.\r\n\n\n### Expected Behavior\n\nREMOTE_AUTH login using OpenIDC credentials.\n\n### Observed Behavior\n\nThe web frontend prompts for username and password.\n", "before_files": [{"content": "# The IP address (typically localhost) and port that the NetBox WSGI process should listen on\nbind = '127.0.0.1:8001'\n\n# Number of gunicorn workers to spawn. This should typically be 2n+1, where\n# n is the number of CPU cores present.\nworkers = 5\n\n# Number of threads per worker process\nthreads = 3\n\n# Timeout (in seconds) for a request to complete\ntimeout = 120\n\n# The maximum number of requests a worker can handle before being respawned\nmax_requests = 5000\nmax_requests_jitter = 500\n", "path": "contrib/gunicorn.py"}], "after_files": [{"content": "# The IP address (typically localhost) and port that the NetBox WSGI process should listen on\nbind = '127.0.0.1:8001'\n\n# Number of gunicorn workers to spawn. This should typically be 2n+1, where\n# n is the number of CPU cores present.\nworkers = 5\n\n# Number of threads per worker process\nthreads = 3\n\n# Timeout (in seconds) for a request to complete\ntimeout = 120\n\n# The maximum number of requests a worker can handle before being respawned\nmax_requests = 5000\nmax_requests_jitter = 500\n\n# Uncomment this line to accept HTTP headers containing underscores, e.g. for remote\n# authentication support. See https://docs.gunicorn.org/en/stable/settings.html#header-map\n# header-map = 'dangerous'\n", "path": "contrib/gunicorn.py"}]}
| 695 | 124 |
gh_patches_debug_30377
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-2845
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using 'Have-I-been-pwned' policy breaks flows in Authentik 2022.4.1
**Describe the bug**
Using a 'Have-I-been-pwned' policy on a password prompt within a flow breaks the flow.
**To Reproduce**
Steps to reproduce the behavior:
1. Use Authentik 2022.3.3
2. Use all the default settings/flows, so a clean install
3. Add a have-i-been-pwned policy to the default-password-change flow on the default-password-change-prompt stage.
4. This stage binding has the following settings:
- _Evaluate on plan: True_
- _Re-evaluate policies: False_
- _Invalid respones action: RETRY returns the error message and a similar challenge to the executor._
- _Policy engine mode: ALL, all policies must match to include this stage access._
5. Go to the Flow Overview and Execute flow with current user, see that the have-i-been pwned policy works correctly.
6. Use Authentik 2022.4.1
7. Repeat steps 2 - 5 described above
8. See that you will receive an error message 'Password not set in context'.
**Expected behavior**
The password should be checked, and the flow should not crash with the error 'Password not set in context'.
**Version and Deployment (please complete the following information):**
- authentik version: 2022.4.1
- Deployment: tested both Docker & K8S
**Additional context**
I repeated these steps multiple times and I keep getting the same issue. Therefore I think it is safe to assume that this is a bug introduced in the update from version 2022.3.3 to version 2022.4.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/policies/hibp/models.py`
Content:
```
1 """authentik HIBP Models"""
2 from hashlib import sha1
3
4 from django.db import models
5 from django.utils.translation import gettext as _
6 from rest_framework.serializers import BaseSerializer
7 from structlog.stdlib import get_logger
8
9 from authentik.lib.utils.http import get_http_session
10 from authentik.policies.models import Policy, PolicyResult
11 from authentik.policies.types import PolicyRequest
12
13 LOGGER = get_logger()
14
15
16 class HaveIBeenPwendPolicy(Policy):
17 """Check if password is on HaveIBeenPwned's list by uploading the first
18 5 characters of the SHA1 Hash."""
19
20 password_field = models.TextField(
21 default="password",
22 help_text=_("Field key to check, field keys defined in Prompt stages are available."),
23 )
24
25 allowed_count = models.IntegerField(default=0)
26
27 @property
28 def serializer(self) -> BaseSerializer:
29 from authentik.policies.hibp.api import HaveIBeenPwendPolicySerializer
30
31 return HaveIBeenPwendPolicySerializer
32
33 @property
34 def component(self) -> str:
35 return "ak-policy-hibp-form"
36
37 def passes(self, request: PolicyRequest) -> PolicyResult:
38 """Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5
39 characters of Password in request and checks if full hash is in response. Returns 0
40 if Password is not in result otherwise the count of how many times it was used."""
41 if self.password_field not in request.context:
42 LOGGER.warning(
43 "Password field not set in Policy Request",
44 field=self.password_field,
45 fields=request.context.keys(),
46 )
47 return PolicyResult(False, _("Password not set in context"))
48 password = str(request.context[self.password_field])
49
50 pw_hash = sha1(password.encode("utf-8")).hexdigest() # nosec
51 url = f"https://api.pwnedpasswords.com/range/{pw_hash[:5]}"
52 result = get_http_session().get(url).text
53 final_count = 0
54 for line in result.split("\r\n"):
55 full_hash, count = line.split(":")
56 if pw_hash[5:] == full_hash.lower():
57 final_count = int(count)
58 LOGGER.debug("got hibp result", count=final_count, hash=pw_hash[:5])
59 if final_count > self.allowed_count:
60 message = _("Password exists on %(count)d online lists." % {"count": final_count})
61 return PolicyResult(False, message)
62 return PolicyResult(True)
63
64 class Meta:
65
66 verbose_name = _("Have I Been Pwned Policy")
67 verbose_name_plural = _("Have I Been Pwned Policies")
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/policies/hibp/models.py b/authentik/policies/hibp/models.py
--- a/authentik/policies/hibp/models.py
+++ b/authentik/policies/hibp/models.py
@@ -9,6 +9,7 @@
from authentik.lib.utils.http import get_http_session
from authentik.policies.models import Policy, PolicyResult
from authentik.policies.types import PolicyRequest
+from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT
LOGGER = get_logger()
@@ -38,14 +39,17 @@
"""Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5
characters of Password in request and checks if full hash is in response. Returns 0
if Password is not in result otherwise the count of how many times it was used."""
- if self.password_field not in request.context:
+ password = request.context.get(PLAN_CONTEXT_PROMPT, {}).get(
+ self.password_field, request.context.get(self.password_field)
+ )
+ if not password:
LOGGER.warning(
"Password field not set in Policy Request",
field=self.password_field,
fields=request.context.keys(),
)
return PolicyResult(False, _("Password not set in context"))
- password = str(request.context[self.password_field])
+ password = str(password)
pw_hash = sha1(password.encode("utf-8")).hexdigest() # nosec
url = f"https://api.pwnedpasswords.com/range/{pw_hash[:5]}"
|
{"golden_diff": "diff --git a/authentik/policies/hibp/models.py b/authentik/policies/hibp/models.py\n--- a/authentik/policies/hibp/models.py\n+++ b/authentik/policies/hibp/models.py\n@@ -9,6 +9,7 @@\n from authentik.lib.utils.http import get_http_session\n from authentik.policies.models import Policy, PolicyResult\n from authentik.policies.types import PolicyRequest\n+from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n \n LOGGER = get_logger()\n \n@@ -38,14 +39,17 @@\n \"\"\"Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5\n characters of Password in request and checks if full hash is in response. Returns 0\n if Password is not in result otherwise the count of how many times it was used.\"\"\"\n- if self.password_field not in request.context:\n+ password = request.context.get(PLAN_CONTEXT_PROMPT, {}).get(\n+ self.password_field, request.context.get(self.password_field)\n+ )\n+ if not password:\n LOGGER.warning(\n \"Password field not set in Policy Request\",\n field=self.password_field,\n fields=request.context.keys(),\n )\n return PolicyResult(False, _(\"Password not set in context\"))\n- password = str(request.context[self.password_field])\n+ password = str(password)\n \n pw_hash = sha1(password.encode(\"utf-8\")).hexdigest() # nosec\n url = f\"https://api.pwnedpasswords.com/range/{pw_hash[:5]}\"\n", "issue": "Using 'Have-I-been-pwned' policy breaks flows in Authentik 2022.4.1\n**Describe the bug**\r\nUsing a 'Have-I-been-pwned' policy on a password prompt within a flow breaks the flow.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Use Authentik 2022.3.3\r\n2. Use all the default settings/flows, so a clean install\r\n3. Add a have-i-been-pwned policy to the default-password-change flow on the default-password-change-prompt stage.\r\n4. This stage binding has the following settings:\r\n- _Evaluate on plan: True_\r\n- _Re-evaluate policies: False_\r\n- _Invalid respones action: RETRY returns the error message and a similar challenge to the executor._\r\n- _Policy engine mode: ALL, all policies must match to include this stage access._\r\n5. Go to the Flow Overview and Execute flow with current user, see that the have-i-been pwned policy works correctly.\r\n6. Use Authentik 2022.4.1\r\n7. Repeat steps 2 - 5 described above\r\n8. See that you will receive an error message 'Password not set in context'.\r\n\r\n**Expected behavior**\r\nThe password should be checked, and the flow should not crash with the error 'Password not set in context'.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 2022.4.1\r\n - Deployment: tested both Docker & K8S\r\n\r\n**Additional context**\r\nI repeated these steps multiple times and I keep getting the same issue. Therefore I think it is safe to assume that this is a bug introduced in the update from version 2022.3.3 to version 2022.4.1\r\n\n", "before_files": [{"content": "\"\"\"authentik HIBP Models\"\"\"\nfrom hashlib import sha1\n\nfrom django.db import models\nfrom django.utils.translation import gettext as _\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.policies.models import Policy, PolicyResult\nfrom authentik.policies.types import PolicyRequest\n\nLOGGER = get_logger()\n\n\nclass HaveIBeenPwendPolicy(Policy):\n \"\"\"Check if password is on HaveIBeenPwned's list by uploading the first\n 5 characters of the SHA1 Hash.\"\"\"\n\n password_field = models.TextField(\n default=\"password\",\n help_text=_(\"Field key to check, field keys defined in Prompt stages are available.\"),\n )\n\n allowed_count = models.IntegerField(default=0)\n\n @property\n def serializer(self) -> BaseSerializer:\n from authentik.policies.hibp.api import HaveIBeenPwendPolicySerializer\n\n return HaveIBeenPwendPolicySerializer\n\n @property\n def component(self) -> str:\n return \"ak-policy-hibp-form\"\n\n def passes(self, request: PolicyRequest) -> PolicyResult:\n \"\"\"Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5\n characters of Password in request and checks if full hash is in response. Returns 0\n if Password is not in result otherwise the count of how many times it was used.\"\"\"\n if self.password_field not in request.context:\n LOGGER.warning(\n \"Password field not set in Policy Request\",\n field=self.password_field,\n fields=request.context.keys(),\n )\n return PolicyResult(False, _(\"Password not set in context\"))\n password = str(request.context[self.password_field])\n\n pw_hash = sha1(password.encode(\"utf-8\")).hexdigest() # nosec\n url = f\"https://api.pwnedpasswords.com/range/{pw_hash[:5]}\"\n result = get_http_session().get(url).text\n final_count = 0\n for line in result.split(\"\\r\\n\"):\n full_hash, count = line.split(\":\")\n if pw_hash[5:] == full_hash.lower():\n final_count = int(count)\n LOGGER.debug(\"got hibp result\", count=final_count, hash=pw_hash[:5])\n if final_count > self.allowed_count:\n message = _(\"Password exists on %(count)d online lists.\" % {\"count\": final_count})\n return PolicyResult(False, message)\n return PolicyResult(True)\n\n class Meta:\n\n verbose_name = _(\"Have I Been Pwned Policy\")\n verbose_name_plural = _(\"Have I Been Pwned Policies\")\n", "path": "authentik/policies/hibp/models.py"}], "after_files": [{"content": "\"\"\"authentik HIBP Models\"\"\"\nfrom hashlib import sha1\n\nfrom django.db import models\nfrom django.utils.translation import gettext as _\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.policies.models import Policy, PolicyResult\nfrom authentik.policies.types import PolicyRequest\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n\nLOGGER = get_logger()\n\n\nclass HaveIBeenPwendPolicy(Policy):\n \"\"\"Check if password is on HaveIBeenPwned's list by uploading the first\n 5 characters of the SHA1 Hash.\"\"\"\n\n password_field = models.TextField(\n default=\"password\",\n help_text=_(\"Field key to check, field keys defined in Prompt stages are available.\"),\n )\n\n allowed_count = models.IntegerField(default=0)\n\n @property\n def serializer(self) -> BaseSerializer:\n from authentik.policies.hibp.api import HaveIBeenPwendPolicySerializer\n\n return HaveIBeenPwendPolicySerializer\n\n @property\n def component(self) -> str:\n return \"ak-policy-hibp-form\"\n\n def passes(self, request: PolicyRequest) -> PolicyResult:\n \"\"\"Check if password is in HIBP DB. Hashes given Password with SHA1, uses the first 5\n characters of Password in request and checks if full hash is in response. Returns 0\n if Password is not in result otherwise the count of how many times it was used.\"\"\"\n password = request.context.get(PLAN_CONTEXT_PROMPT, {}).get(\n self.password_field, request.context.get(self.password_field)\n )\n if not password:\n LOGGER.warning(\n \"Password field not set in Policy Request\",\n field=self.password_field,\n fields=request.context.keys(),\n )\n return PolicyResult(False, _(\"Password not set in context\"))\n password = str(password)\n\n pw_hash = sha1(password.encode(\"utf-8\")).hexdigest() # nosec\n url = f\"https://api.pwnedpasswords.com/range/{pw_hash[:5]}\"\n result = get_http_session().get(url).text\n final_count = 0\n for line in result.split(\"\\r\\n\"):\n full_hash, count = line.split(\":\")\n if pw_hash[5:] == full_hash.lower():\n final_count = int(count)\n LOGGER.debug(\"got hibp result\", count=final_count, hash=pw_hash[:5])\n if final_count > self.allowed_count:\n message = _(\"Password exists on %(count)d online lists.\" % {\"count\": final_count})\n return PolicyResult(False, message)\n return PolicyResult(True)\n\n class Meta:\n\n verbose_name = _(\"Have I Been Pwned Policy\")\n verbose_name_plural = _(\"Have I Been Pwned Policies\")\n", "path": "authentik/policies/hibp/models.py"}]}
| 1,365 | 346 |
gh_patches_debug_30742
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-265
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More filters for table list API
**Problem**
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
The table list API should allow filtering. For example, we might want to get the list of all tables in a schema to see if the table the user is trying to create already exists in that schema.
**Proposed solution**
<!-- A clear and concise description of your proposed solution or feature. -->
The table list endpoint should support filtering by:
- schema
- before/after: created, last updated
- whether the import was verified
**Additional context**
<!-- Add any other context or screenshots about the feature request here.-->
We should use `django-filter` since it integrates with DRF and makes setting up filters easy.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/models.py`
Content:
```
1 from django.contrib.auth.models import User
2 from django.core.cache import cache
3 from django.db import models
4 from django.utils.functional import cached_property
5
6 from mathesar.database.base import create_mathesar_engine
7 from mathesar.utils import models as model_utils
8 from db import tables, records, schemas
9
10 NAME_CACHE_INTERVAL = 60 * 5
11
12
13 class BaseModel(models.Model):
14 created_at = models.DateTimeField(auto_now_add=True)
15 updated_at = models.DateTimeField(auto_now=True)
16
17 class Meta:
18 abstract = True
19
20
21 class DatabaseObject(BaseModel):
22 oid = models.IntegerField()
23
24 class Meta:
25 abstract = True
26
27 def __str__(self):
28 return f"{self.__class__.__name__}: {self.oid}"
29
30
31 class Schema(DatabaseObject):
32 database = models.CharField(max_length=128)
33
34 @cached_property
35 def _sa_engine(self):
36 # We're caching this since the engine is used frequently.
37 return create_mathesar_engine(self.database)
38
39 @cached_property
40 def name(self):
41 cache_key = f"{self.database}_schema_name_{self.oid}"
42 try:
43 schema_name = cache.get(cache_key)
44 if schema_name is None:
45 schema_name = schemas.get_schema_name_from_oid(
46 self.oid, self._sa_engine
47 )
48 cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL)
49 return schema_name
50 # We catch this error, since it lets us decouple the cadence of
51 # overall DB reflection from the cadence of cache expiration for
52 # schema names. Also, it makes it obvious when the DB layer has
53 # been altered, as opposed to other reasons for a 404 when
54 # requesting a schema.
55 except TypeError:
56 return 'MISSING'
57
58
59 class Table(DatabaseObject):
60 schema = models.ForeignKey('Schema', on_delete=models.CASCADE,
61 related_name='tables')
62 import_verified = models.BooleanField(blank=True, null=True)
63
64 @cached_property
65 def _sa_table(self):
66 try:
67 table = tables.reflect_table_from_oid(
68 self.oid, self.schema._sa_engine,
69 )
70 # We catch this error, since it lets us decouple the cadence of
71 # overall DB reflection from the cadence of cache expiration for
72 # table names. Also, it makes it obvious when the DB layer has
73 # been altered, as opposed to other reasons for a 404 when
74 # requesting a table.
75 except TypeError:
76 table = tables.create_empty_table("MISSING")
77 return table
78
79 @cached_property
80 def name(self):
81 return self._sa_table.name
82
83 @property
84 def sa_columns(self):
85 return self._sa_table.columns
86
87 @property
88 def sa_column_names(self):
89 return self.sa_columns.keys()
90
91 @property
92 def sa_num_records(self):
93 return tables.get_count(self._sa_table, self.schema._sa_engine)
94
95 @property
96 def sa_all_records(self):
97 return records.get_records(self._sa_table, self.schema._sa_engine)
98
99 def get_record(self, id_value):
100 return records.get_record(self._sa_table, self.schema._sa_engine, id_value)
101
102 def get_records(self, limit=None, offset=None):
103 return records.get_records(self._sa_table, self.schema._sa_engine, limit, offset)
104
105 def create_record_or_records(self, record_data):
106 return records.create_record_or_records(self._sa_table, self.schema._sa_engine, record_data)
107
108 def update_record(self, id_value, record_data):
109 return records.update_record(self._sa_table, self.schema._sa_engine, id_value, record_data)
110
111 def delete_record(self, id_value):
112 return records.delete_record(self._sa_table, self.schema._sa_engine, id_value)
113
114
115 class DataFile(BaseModel):
116 file = models.FileField(
117 upload_to=model_utils.user_directory_path,
118 )
119 user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)
120 table_imported_to = models.ForeignKey(Table, related_name="data_files", blank=True,
121 null=True, on_delete=models.SET_NULL)
122 delimiter = models.CharField(max_length=1, default=',', blank=True)
123 escapechar = models.CharField(max_length=1, blank=True)
124 quotechar = models.CharField(max_length=1, default='"', blank=True)
125
```
Path: `mathesar/filters.py`
Content:
```
1 from django_property_filter import (
2 PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter,
3 )
4
5 from mathesar.models import Schema, Table
6
7
8 class CharInFilter(PropertyBaseInFilter, PropertyCharFilter):
9 pass
10
11
12 class SchemaFilter(PropertyFilterSet):
13 database = CharInFilter(field_name='database', lookup_expr='in')
14 name = CharInFilter(field_name='name', lookup_expr='in')
15
16 class Meta:
17 model = Schema
18 fields = ['database', 'name']
19
20
21 class TableFilter(PropertyFilterSet):
22 name = CharInFilter(field_name='name', lookup_expr='in')
23
24 class Meta:
25 model = Table
26 fields = ['name']
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/filters.py b/mathesar/filters.py
--- a/mathesar/filters.py
+++ b/mathesar/filters.py
@@ -1,5 +1,6 @@
from django_property_filter import (
PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter,
+ PropertyDateTimeFromToRangeFilter, PropertyBooleanFilter
)
from mathesar.models import Schema, Table
@@ -20,7 +21,13 @@
class TableFilter(PropertyFilterSet):
name = CharInFilter(field_name='name', lookup_expr='in')
+ schema = CharInFilter(field_name='schema__name', lookup_expr='in')
+ created = PropertyDateTimeFromToRangeFilter(field_name='created_at')
+ updated = PropertyDateTimeFromToRangeFilter(field_name='updated_at')
+ import_verified = PropertyBooleanFilter(field_name='import_verified')
+ not_imported = PropertyBooleanFilter(lookup_expr="isnull",
+ field_name='import_verified')
class Meta:
model = Table
- fields = ['name']
+ fields = ['name', 'schema', 'created_at', 'updated_at', 'import_verified']
diff --git a/mathesar/models.py b/mathesar/models.py
--- a/mathesar/models.py
+++ b/mathesar/models.py
@@ -28,13 +28,21 @@
return f"{self.__class__.__name__}: {self.oid}"
+# TODO: Replace with a proper form of caching
+# See: https://github.com/centerofci/mathesar/issues/280
+_engine = None
+
+
class Schema(DatabaseObject):
database = models.CharField(max_length=128)
- @cached_property
+ @property
def _sa_engine(self):
+ global _engine
# We're caching this since the engine is used frequently.
- return create_mathesar_engine(self.database)
+ if _engine is None:
+ _engine = create_mathesar_engine(self.database)
+ return _engine
@cached_property
def name(self):
|
{"golden_diff": "diff --git a/mathesar/filters.py b/mathesar/filters.py\n--- a/mathesar/filters.py\n+++ b/mathesar/filters.py\n@@ -1,5 +1,6 @@\n from django_property_filter import (\n PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter,\n+ PropertyDateTimeFromToRangeFilter, PropertyBooleanFilter\n )\n \n from mathesar.models import Schema, Table\n@@ -20,7 +21,13 @@\n \n class TableFilter(PropertyFilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n+ schema = CharInFilter(field_name='schema__name', lookup_expr='in')\n+ created = PropertyDateTimeFromToRangeFilter(field_name='created_at')\n+ updated = PropertyDateTimeFromToRangeFilter(field_name='updated_at')\n+ import_verified = PropertyBooleanFilter(field_name='import_verified')\n+ not_imported = PropertyBooleanFilter(lookup_expr=\"isnull\",\n+ field_name='import_verified')\n \n class Meta:\n model = Table\n- fields = ['name']\n+ fields = ['name', 'schema', 'created_at', 'updated_at', 'import_verified']\ndiff --git a/mathesar/models.py b/mathesar/models.py\n--- a/mathesar/models.py\n+++ b/mathesar/models.py\n@@ -28,13 +28,21 @@\n return f\"{self.__class__.__name__}: {self.oid}\"\n \n \n+# TODO: Replace with a proper form of caching\n+# See: https://github.com/centerofci/mathesar/issues/280\n+_engine = None\n+\n+\n class Schema(DatabaseObject):\n database = models.CharField(max_length=128)\n \n- @cached_property\n+ @property\n def _sa_engine(self):\n+ global _engine\n # We're caching this since the engine is used frequently.\n- return create_mathesar_engine(self.database)\n+ if _engine is None:\n+ _engine = create_mathesar_engine(self.database)\n+ return _engine\n \n @cached_property\n def name(self):\n", "issue": "More filters for table list API\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nThe table list API should allow filtering. For example, we might want to get the list of all tables in a schema to see if the table the user is trying to create already exists in that schema.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe table list endpoint should support filtering by:\r\n- schema\r\n- before/after: created, last updated\r\n- whether the import was verified\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\nWe should use `django-filter` since it integrates with DRF and makes setting up filters easy.\n", "before_files": [{"content": "from django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.utils import models as model_utils\nfrom db import tables, records, schemas\n\nNAME_CACHE_INTERVAL = 60 * 5\n\n\nclass BaseModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DatabaseObject(BaseModel):\n oid = models.IntegerField()\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.oid}\"\n\n\nclass Schema(DatabaseObject):\n database = models.CharField(max_length=128)\n\n @cached_property\n def _sa_engine(self):\n # We're caching this since the engine is used frequently.\n return create_mathesar_engine(self.database)\n\n @cached_property\n def name(self):\n cache_key = f\"{self.database}_schema_name_{self.oid}\"\n try:\n schema_name = cache.get(cache_key)\n if schema_name is None:\n schema_name = schemas.get_schema_name_from_oid(\n self.oid, self._sa_engine\n )\n cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL)\n return schema_name\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # schema names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a schema.\n except TypeError:\n return 'MISSING'\n\n\nclass Table(DatabaseObject):\n schema = models.ForeignKey('Schema', on_delete=models.CASCADE,\n related_name='tables')\n import_verified = models.BooleanField(blank=True, null=True)\n\n @cached_property\n def _sa_table(self):\n try:\n table = tables.reflect_table_from_oid(\n self.oid, self.schema._sa_engine,\n )\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # table names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a table.\n except TypeError:\n table = tables.create_empty_table(\"MISSING\")\n return table\n\n @cached_property\n def name(self):\n return self._sa_table.name\n\n @property\n def sa_columns(self):\n return self._sa_table.columns\n\n @property\n def sa_column_names(self):\n return self.sa_columns.keys()\n\n @property\n def sa_num_records(self):\n return tables.get_count(self._sa_table, self.schema._sa_engine)\n\n @property\n def sa_all_records(self):\n return records.get_records(self._sa_table, self.schema._sa_engine)\n\n def get_record(self, id_value):\n return records.get_record(self._sa_table, self.schema._sa_engine, id_value)\n\n def get_records(self, limit=None, offset=None):\n return records.get_records(self._sa_table, self.schema._sa_engine, limit, offset)\n\n def create_record_or_records(self, record_data):\n return records.create_record_or_records(self._sa_table, self.schema._sa_engine, record_data)\n\n def update_record(self, id_value, record_data):\n return records.update_record(self._sa_table, self.schema._sa_engine, id_value, record_data)\n\n def delete_record(self, id_value):\n return records.delete_record(self._sa_table, self.schema._sa_engine, id_value)\n\n\nclass DataFile(BaseModel):\n file = models.FileField(\n upload_to=model_utils.user_directory_path,\n )\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)\n table_imported_to = models.ForeignKey(Table, related_name=\"data_files\", blank=True,\n null=True, on_delete=models.SET_NULL)\n delimiter = models.CharField(max_length=1, default=',', blank=True)\n escapechar = models.CharField(max_length=1, blank=True)\n quotechar = models.CharField(max_length=1, default='\"', blank=True)\n", "path": "mathesar/models.py"}, {"content": "from django_property_filter import (\n PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter,\n)\n\nfrom mathesar.models import Schema, Table\n\n\nclass CharInFilter(PropertyBaseInFilter, PropertyCharFilter):\n pass\n\n\nclass SchemaFilter(PropertyFilterSet):\n database = CharInFilter(field_name='database', lookup_expr='in')\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Schema\n fields = ['database', 'name']\n\n\nclass TableFilter(PropertyFilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Table\n fields = ['name']\n", "path": "mathesar/filters.py"}], "after_files": [{"content": "from django.contrib.auth.models import User\nfrom django.core.cache import cache\nfrom django.db import models\nfrom django.utils.functional import cached_property\n\nfrom mathesar.database.base import create_mathesar_engine\nfrom mathesar.utils import models as model_utils\nfrom db import tables, records, schemas\n\nNAME_CACHE_INTERVAL = 60 * 5\n\n\nclass BaseModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass DatabaseObject(BaseModel):\n oid = models.IntegerField()\n\n class Meta:\n abstract = True\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.oid}\"\n\n\n# TODO: Replace with a proper form of caching\n# See: https://github.com/centerofci/mathesar/issues/280\n_engine = None\n\n\nclass Schema(DatabaseObject):\n database = models.CharField(max_length=128)\n\n @property\n def _sa_engine(self):\n global _engine\n # We're caching this since the engine is used frequently.\n if _engine is None:\n _engine = create_mathesar_engine(self.database)\n return _engine\n\n @cached_property\n def name(self):\n cache_key = f\"{self.database}_schema_name_{self.oid}\"\n try:\n schema_name = cache.get(cache_key)\n if schema_name is None:\n schema_name = schemas.get_schema_name_from_oid(\n self.oid, self._sa_engine\n )\n cache.set(cache_key, schema_name, NAME_CACHE_INTERVAL)\n return schema_name\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # schema names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a schema.\n except TypeError:\n return 'MISSING'\n\n\nclass Table(DatabaseObject):\n schema = models.ForeignKey('Schema', on_delete=models.CASCADE,\n related_name='tables')\n import_verified = models.BooleanField(blank=True, null=True)\n\n @cached_property\n def _sa_table(self):\n try:\n table = tables.reflect_table_from_oid(\n self.oid, self.schema._sa_engine,\n )\n # We catch this error, since it lets us decouple the cadence of\n # overall DB reflection from the cadence of cache expiration for\n # table names. Also, it makes it obvious when the DB layer has\n # been altered, as opposed to other reasons for a 404 when\n # requesting a table.\n except TypeError:\n table = tables.create_empty_table(\"MISSING\")\n return table\n\n @cached_property\n def name(self):\n return self._sa_table.name\n\n @property\n def sa_columns(self):\n return self._sa_table.columns\n\n @property\n def sa_column_names(self):\n return self.sa_columns.keys()\n\n @property\n def sa_num_records(self):\n return tables.get_count(self._sa_table, self.schema._sa_engine)\n\n @property\n def sa_all_records(self):\n return records.get_records(self._sa_table, self.schema._sa_engine)\n\n def get_record(self, id_value):\n return records.get_record(self._sa_table, self.schema._sa_engine, id_value)\n\n def get_records(self, limit=None, offset=None):\n return records.get_records(self._sa_table, self.schema._sa_engine, limit, offset)\n\n def create_record_or_records(self, record_data):\n return records.create_record_or_records(self._sa_table, self.schema._sa_engine, record_data)\n\n def update_record(self, id_value, record_data):\n return records.update_record(self._sa_table, self.schema._sa_engine, id_value, record_data)\n\n def delete_record(self, id_value):\n return records.delete_record(self._sa_table, self.schema._sa_engine, id_value)\n\n\nclass DataFile(BaseModel):\n file = models.FileField(\n upload_to=model_utils.user_directory_path,\n )\n user = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE)\n table_imported_to = models.ForeignKey(Table, related_name=\"data_files\", blank=True,\n null=True, on_delete=models.SET_NULL)\n delimiter = models.CharField(max_length=1, default=',', blank=True)\n escapechar = models.CharField(max_length=1, blank=True)\n quotechar = models.CharField(max_length=1, default='\"', blank=True)\n", "path": "mathesar/models.py"}, {"content": "from django_property_filter import (\n PropertyFilterSet, PropertyBaseInFilter, PropertyCharFilter,\n PropertyDateTimeFromToRangeFilter, PropertyBooleanFilter\n)\n\nfrom mathesar.models import Schema, Table\n\n\nclass CharInFilter(PropertyBaseInFilter, PropertyCharFilter):\n pass\n\n\nclass SchemaFilter(PropertyFilterSet):\n database = CharInFilter(field_name='database', lookup_expr='in')\n name = CharInFilter(field_name='name', lookup_expr='in')\n\n class Meta:\n model = Schema\n fields = ['database', 'name']\n\n\nclass TableFilter(PropertyFilterSet):\n name = CharInFilter(field_name='name', lookup_expr='in')\n schema = CharInFilter(field_name='schema__name', lookup_expr='in')\n created = PropertyDateTimeFromToRangeFilter(field_name='created_at')\n updated = PropertyDateTimeFromToRangeFilter(field_name='updated_at')\n import_verified = PropertyBooleanFilter(field_name='import_verified')\n not_imported = PropertyBooleanFilter(lookup_expr=\"isnull\",\n field_name='import_verified')\n\n class Meta:\n model = Table\n fields = ['name', 'schema', 'created_at', 'updated_at', 'import_verified']\n", "path": "mathesar/filters.py"}]}
| 1,838 | 448 |
gh_patches_debug_19797
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-4220
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ddtrace.opentracer` incorrectly raises `SpanContextCorruptedException` on `extract` of missing span context
The documentation for `SpanContextCorruptedException` [says](https://opentracing-python.readthedocs.io/en/1.3.0/api.html#opentracing.SpanContextCorruptedException):
> SpanContextCorruptedException should be used when the underlying span context state is seemingly present but not well-formed.
`ddtrace.opentracer`'s `extract` is throwing an error whenever it fails to recover a span, whether or not it was malformed or simply missing. This completely breaks the normal pattern of "I received an HTTP request, so I'll throw the headers at `extract` and pass the result to `child_of` for my new span, expecting to get `None` and therefore make a new root span if I was called without tracing info".
### Which version of dd-trace-py are you using?
Python 3.7
ddtrace 0.46.0
### How can we reproduce your problem?
```py
In [1]: from opentracing import Format
In [2]: from ddtrace.opentracer import Tracer
In [3]: tracer = Tracer()
In [4]: tracer.extract(Format.HTTP_HEADERS, {})
---------------------------------------------------------------------------
SpanContextCorruptedException Traceback (most recent call last)
<ipython-input-4-f497fe0c23a2> in <module>
----> 1 tracer.extract(Format.HTTP_HEADERS, {})
~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/tracer.py in extract(self, format, carrier)
326 # we have to manually activate the returned context from a distributed
327 # trace
--> 328 ot_span_ctx = propagator.extract(carrier)
329 dd_span_ctx = ot_span_ctx._dd_context
330 self._dd_tracer.context_provider.activate(dd_span_ctx)
~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/propagation/http.py in extract(self, carrier)
70 # if this occurs.
71 if not ddspan_ctx.trace_id:
---> 72 raise SpanContextCorruptedException("failed to extract span context")
73
74 baggage = {}
SpanContextCorruptedException: failed to extract span context
```
### What is the result that you expected?
I expect to get a clean `None` with no error if no DataDog span context material was present. See Jaeger:
```py
In [1]: from opentracing import Format
In [2]: import jaeger_client
In [3]: tracer = jaeger_client.Config({"service_name": "foo"}).initialize_tracer()
In [4]: tracer.extract(Format.HTTP_HEADERS, {})
In [5]: print(tracer.extract(Format.HTTP_HEADERS, {}))
None
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/opentracer/propagation/http.py`
Content:
```
1 from typing import Dict
2
3 from opentracing import InvalidCarrierException
4 from opentracing import SpanContextCorruptedException
5
6 from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator
7
8 from ...internal.logger import get_logger
9 from ..span_context import SpanContext
10 from .propagator import Propagator
11
12
13 log = get_logger(__name__)
14
15 HTTP_BAGGAGE_PREFIX = "ot-baggage-"
16 HTTP_BAGGAGE_PREFIX_LEN = len(HTTP_BAGGAGE_PREFIX)
17
18
19 class HTTPPropagator(Propagator):
20 """OpenTracing compatible HTTP_HEADER and TEXT_MAP format propagator.
21
22 `HTTPPropagator` provides compatibility by using existing OpenTracing
23 compatible methods from the ddtracer along with new logic supporting the
24 outstanding OpenTracing-defined functionality.
25 """
26
27 @staticmethod
28 def inject(span_context, carrier):
29 # type: (SpanContext, Dict[str, str]) -> None
30 """Inject a span context into a carrier.
31
32 *span_context* is injected into the carrier by first using an
33 :class:`ddtrace.propagation.http.HTTPPropagator` to inject the ddtracer
34 specific fields.
35
36 Then the baggage is injected into *carrier*.
37
38 :param span_context: span context to inject.
39
40 :param carrier: carrier to inject into.
41 """
42 if not isinstance(carrier, dict):
43 raise InvalidCarrierException("propagator expects carrier to be a dict")
44
45 DDHTTPPropagator.inject(span_context._dd_context, carrier)
46
47 # Add the baggage
48 if span_context.baggage is not None:
49 for key in span_context.baggage:
50 carrier[HTTP_BAGGAGE_PREFIX + key] = span_context.baggage[key]
51
52 @staticmethod
53 def extract(carrier):
54 # type: (Dict[str, str]) -> SpanContext
55 """Extract a span context from a carrier.
56
57 :class:`ddtrace.propagation.http.HTTPPropagator` is used to extract
58 ddtracer supported fields into a `ddtrace.Context` context which is
59 combined with new logic to extract the baggage which is returned in an
60 OpenTracing compatible span context.
61
62 :param carrier: carrier to extract from.
63
64 :return: extracted span context.
65 """
66 if not isinstance(carrier, dict):
67 raise InvalidCarrierException("propagator expects carrier to be a dict")
68
69 ddspan_ctx = DDHTTPPropagator.extract(carrier)
70
71 # if the dd propagator fails then it will return a new empty span
72 # context (with trace_id=None), we however want to raise an exception
73 # if this occurs.
74 if not ddspan_ctx.trace_id:
75 raise SpanContextCorruptedException("failed to extract span context")
76
77 baggage = {}
78 for key in carrier:
79 if key.startswith(HTTP_BAGGAGE_PREFIX):
80 baggage[key[HTTP_BAGGAGE_PREFIX_LEN:]] = carrier[key]
81
82 return SpanContext(ddcontext=ddspan_ctx, baggage=baggage)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/opentracer/propagation/http.py b/ddtrace/opentracer/propagation/http.py
--- a/ddtrace/opentracer/propagation/http.py
+++ b/ddtrace/opentracer/propagation/http.py
@@ -1,7 +1,6 @@
from typing import Dict
from opentracing import InvalidCarrierException
-from opentracing import SpanContextCorruptedException
from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator
@@ -67,13 +66,6 @@
raise InvalidCarrierException("propagator expects carrier to be a dict")
ddspan_ctx = DDHTTPPropagator.extract(carrier)
-
- # if the dd propagator fails then it will return a new empty span
- # context (with trace_id=None), we however want to raise an exception
- # if this occurs.
- if not ddspan_ctx.trace_id:
- raise SpanContextCorruptedException("failed to extract span context")
-
baggage = {}
for key in carrier:
if key.startswith(HTTP_BAGGAGE_PREFIX):
|
{"golden_diff": "diff --git a/ddtrace/opentracer/propagation/http.py b/ddtrace/opentracer/propagation/http.py\n--- a/ddtrace/opentracer/propagation/http.py\n+++ b/ddtrace/opentracer/propagation/http.py\n@@ -1,7 +1,6 @@\n from typing import Dict\n \n from opentracing import InvalidCarrierException\n-from opentracing import SpanContextCorruptedException\n \n from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator\n \n@@ -67,13 +66,6 @@\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n \n ddspan_ctx = DDHTTPPropagator.extract(carrier)\n-\n- # if the dd propagator fails then it will return a new empty span\n- # context (with trace_id=None), we however want to raise an exception\n- # if this occurs.\n- if not ddspan_ctx.trace_id:\n- raise SpanContextCorruptedException(\"failed to extract span context\")\n-\n baggage = {}\n for key in carrier:\n if key.startswith(HTTP_BAGGAGE_PREFIX):\n", "issue": "`ddtrace.opentracer` incorrectly raises `SpanContextCorruptedException` on `extract` of missing span context\nThe documentation for `SpanContextCorruptedException` [says](https://opentracing-python.readthedocs.io/en/1.3.0/api.html#opentracing.SpanContextCorruptedException):\r\n\r\n> SpanContextCorruptedException should be used when the underlying span context state is seemingly present but not well-formed.\r\n\r\n`ddtrace.opentracer`'s `extract` is throwing an error whenever it fails to recover a span, whether or not it was malformed or simply missing. This completely breaks the normal pattern of \"I received an HTTP request, so I'll throw the headers at `extract` and pass the result to `child_of` for my new span, expecting to get `None` and therefore make a new root span if I was called without tracing info\".\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\nPython 3.7\r\nddtrace 0.46.0\r\n\r\n### How can we reproduce your problem?\r\n\r\n```py\r\nIn [1]: from opentracing import Format\r\n\r\nIn [2]: from ddtrace.opentracer import Tracer\r\n\r\nIn [3]: tracer = Tracer()\r\n\r\nIn [4]: tracer.extract(Format.HTTP_HEADERS, {})\r\n---------------------------------------------------------------------------\r\nSpanContextCorruptedException Traceback (most recent call last)\r\n<ipython-input-4-f497fe0c23a2> in <module>\r\n----> 1 tracer.extract(Format.HTTP_HEADERS, {})\r\n\r\n~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/tracer.py in extract(self, format, carrier)\r\n 326 # we have to manually activate the returned context from a distributed\r\n 327 # trace\r\n--> 328 ot_span_ctx = propagator.extract(carrier)\r\n 329 dd_span_ctx = ot_span_ctx._dd_context\r\n 330 self._dd_tracer.context_provider.activate(dd_span_ctx)\r\n\r\n~/projects/granular/analysis/analysis-api/.venv/lib/python3.7/site-packages/ddtrace/opentracer/propagation/http.py in extract(self, carrier)\r\n 70 # if this occurs.\r\n 71 if not ddspan_ctx.trace_id:\r\n---> 72 raise SpanContextCorruptedException(\"failed to extract span context\")\r\n 73 \r\n 74 baggage = {}\r\n\r\nSpanContextCorruptedException: failed to extract span context\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nI expect to get a clean `None` with no error if no DataDog span context material was present. See Jaeger:\r\n\r\n```py\r\nIn [1]: from opentracing import Format\r\n\r\nIn [2]: import jaeger_client\r\n\r\nIn [3]: tracer = jaeger_client.Config({\"service_name\": \"foo\"}).initialize_tracer()\r\n\r\nIn [4]: tracer.extract(Format.HTTP_HEADERS, {})\r\n\r\nIn [5]: print(tracer.extract(Format.HTTP_HEADERS, {}))\r\nNone\r\n```\r\n\n", "before_files": [{"content": "from typing import Dict\n\nfrom opentracing import InvalidCarrierException\nfrom opentracing import SpanContextCorruptedException\n\nfrom ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator\n\nfrom ...internal.logger import get_logger\nfrom ..span_context import SpanContext\nfrom .propagator import Propagator\n\n\nlog = get_logger(__name__)\n\nHTTP_BAGGAGE_PREFIX = \"ot-baggage-\"\nHTTP_BAGGAGE_PREFIX_LEN = len(HTTP_BAGGAGE_PREFIX)\n\n\nclass HTTPPropagator(Propagator):\n \"\"\"OpenTracing compatible HTTP_HEADER and TEXT_MAP format propagator.\n\n `HTTPPropagator` provides compatibility by using existing OpenTracing\n compatible methods from the ddtracer along with new logic supporting the\n outstanding OpenTracing-defined functionality.\n \"\"\"\n\n @staticmethod\n def inject(span_context, carrier):\n # type: (SpanContext, Dict[str, str]) -> None\n \"\"\"Inject a span context into a carrier.\n\n *span_context* is injected into the carrier by first using an\n :class:`ddtrace.propagation.http.HTTPPropagator` to inject the ddtracer\n specific fields.\n\n Then the baggage is injected into *carrier*.\n\n :param span_context: span context to inject.\n\n :param carrier: carrier to inject into.\n \"\"\"\n if not isinstance(carrier, dict):\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n\n DDHTTPPropagator.inject(span_context._dd_context, carrier)\n\n # Add the baggage\n if span_context.baggage is not None:\n for key in span_context.baggage:\n carrier[HTTP_BAGGAGE_PREFIX + key] = span_context.baggage[key]\n\n @staticmethod\n def extract(carrier):\n # type: (Dict[str, str]) -> SpanContext\n \"\"\"Extract a span context from a carrier.\n\n :class:`ddtrace.propagation.http.HTTPPropagator` is used to extract\n ddtracer supported fields into a `ddtrace.Context` context which is\n combined with new logic to extract the baggage which is returned in an\n OpenTracing compatible span context.\n\n :param carrier: carrier to extract from.\n\n :return: extracted span context.\n \"\"\"\n if not isinstance(carrier, dict):\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n\n ddspan_ctx = DDHTTPPropagator.extract(carrier)\n\n # if the dd propagator fails then it will return a new empty span\n # context (with trace_id=None), we however want to raise an exception\n # if this occurs.\n if not ddspan_ctx.trace_id:\n raise SpanContextCorruptedException(\"failed to extract span context\")\n\n baggage = {}\n for key in carrier:\n if key.startswith(HTTP_BAGGAGE_PREFIX):\n baggage[key[HTTP_BAGGAGE_PREFIX_LEN:]] = carrier[key]\n\n return SpanContext(ddcontext=ddspan_ctx, baggage=baggage)\n", "path": "ddtrace/opentracer/propagation/http.py"}], "after_files": [{"content": "from typing import Dict\n\nfrom opentracing import InvalidCarrierException\n\nfrom ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator\n\nfrom ...internal.logger import get_logger\nfrom ..span_context import SpanContext\nfrom .propagator import Propagator\n\n\nlog = get_logger(__name__)\n\nHTTP_BAGGAGE_PREFIX = \"ot-baggage-\"\nHTTP_BAGGAGE_PREFIX_LEN = len(HTTP_BAGGAGE_PREFIX)\n\n\nclass HTTPPropagator(Propagator):\n \"\"\"OpenTracing compatible HTTP_HEADER and TEXT_MAP format propagator.\n\n `HTTPPropagator` provides compatibility by using existing OpenTracing\n compatible methods from the ddtracer along with new logic supporting the\n outstanding OpenTracing-defined functionality.\n \"\"\"\n\n @staticmethod\n def inject(span_context, carrier):\n # type: (SpanContext, Dict[str, str]) -> None\n \"\"\"Inject a span context into a carrier.\n\n *span_context* is injected into the carrier by first using an\n :class:`ddtrace.propagation.http.HTTPPropagator` to inject the ddtracer\n specific fields.\n\n Then the baggage is injected into *carrier*.\n\n :param span_context: span context to inject.\n\n :param carrier: carrier to inject into.\n \"\"\"\n if not isinstance(carrier, dict):\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n\n DDHTTPPropagator.inject(span_context._dd_context, carrier)\n\n # Add the baggage\n if span_context.baggage is not None:\n for key in span_context.baggage:\n carrier[HTTP_BAGGAGE_PREFIX + key] = span_context.baggage[key]\n\n @staticmethod\n def extract(carrier):\n # type: (Dict[str, str]) -> SpanContext\n \"\"\"Extract a span context from a carrier.\n\n :class:`ddtrace.propagation.http.HTTPPropagator` is used to extract\n ddtracer supported fields into a `ddtrace.Context` context which is\n combined with new logic to extract the baggage which is returned in an\n OpenTracing compatible span context.\n\n :param carrier: carrier to extract from.\n\n :return: extracted span context.\n \"\"\"\n if not isinstance(carrier, dict):\n raise InvalidCarrierException(\"propagator expects carrier to be a dict\")\n\n ddspan_ctx = DDHTTPPropagator.extract(carrier)\n baggage = {}\n for key in carrier:\n if key.startswith(HTTP_BAGGAGE_PREFIX):\n baggage[key[HTTP_BAGGAGE_PREFIX_LEN:]] = carrier[key]\n\n return SpanContext(ddcontext=ddspan_ctx, baggage=baggage)\n", "path": "ddtrace/opentracer/propagation/http.py"}]}
| 1,742 | 240 |
gh_patches_debug_15487
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-3511
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Building MkDocs' documentation
When I began working on #3493 I needed to be able to run the dev server with MkDocs' own documentation, which was more difficult than it should have been.
First, let me say that I always work from a venv, and created a new one to start my work. In the past, one could simply do `pip install -r requirements/docs.txt` and then `mkdocs serve` and it worked. But I had to jump through a lot of hoops and learn new tools to get things working this time.
To be clear, I am not suggesting that the old tools should be brought back. Nor am I suggesting that my preferred tools be used. However, I could find no documentation about how to proceed. Eventually, I did find documentation that hatch is used for tests, and looking at `pyproject.toml` I could see a config for a hatch `docs` env. But not having ever used that tool before, it took be multiple tries (and searches) to work out how to even use that env. But, I'm getting ahead of myself...
After realizing that there were no requirements.txt files, I next looked for optional dependencies defined in `pyproject.toml`. I was hoping to maybe do `pip install .[docs]` (`.` rather than `markdown` because I was working from the working tree of the git repo). When I determined that that wasn't an option, I began looking into the hatch options. Finally in some random question in some forum I found an explanation of how to run the `shell` subcommand with an alternate env: `hatch -e docs shell`.
And then I could finally run `mkdocs serve`. Except that I got an error about missing `po` files, which is weird because I am using English, which should work without any translations being defined. Finally, after generating `po` and `mo` files, I could run the dev server and begin my work.
All of this led me to believe that the current maintainers are not ever running the dev server with MkDocs documentation. And I also could not find any automations for deploying the documentation, so I couldn't even use that as a reference.
Again, I am not being critical of the tool choices. I see that a switch was made from tox to hatch. That's fine if that is the tool that the maintainers what to use. But for an occasional contributor, I would prefer to not need to learn these tools. I would prefer to be able to use standard Python tools that work with any project. Or if I do need to use your tool of choice, then I would expect the specific commands I would need to use to all be documented clearly.
I could submit a PR which updated the documentation, but I'm not sure what the recommended best practices are here. I am simply bringing this to the attention of the maintainers with the hopes that more consideration will be given to this in the future.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/hooks.py`
Content:
```
1 import re
2 from pathlib import Path
3
4 from mkdocs.config.defaults import MkDocsConfig
5 from mkdocs.structure.nav import Page
6
7
8 def _get_language_of_translation_file(path: Path) -> str:
9 with path.open(encoding='utf-8') as f:
10 translation_line = f.readline()
11 m = re.search('^# (.+) translations ', translation_line)
12 assert m
13 return m[1]
14
15
16 def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):
17 if page.file.src_uri == 'user-guide/choosing-your-theme.md':
18 here = Path(config.config_file_path).parent
19
20 def replacement(m: re.Match) -> str:
21 lines = []
22 for d in sorted(here.glob(m[2])):
23 lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))
24 lines.append(f'{m[1]}`{d.name}`: {lang}')
25 return '\n'.join(lines)
26
27 return re.sub(
28 r'^( *\* )\(see the list of existing directories `(.+)`\)$',
29 replacement,
30 markdown,
31 flags=re.MULTILINE,
32 )
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/hooks.py b/docs/hooks.py
--- a/docs/hooks.py
+++ b/docs/hooks.py
@@ -1,8 +1,12 @@
+from __future__ import annotations
+
import re
from pathlib import Path
+from typing import TYPE_CHECKING
-from mkdocs.config.defaults import MkDocsConfig
-from mkdocs.structure.nav import Page
+if TYPE_CHECKING:
+ from mkdocs.config.defaults import MkDocsConfig
+ from mkdocs.structure.nav import Page
def _get_language_of_translation_file(path: Path) -> str:
@@ -13,7 +17,7 @@
return m[1]
-def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):
+def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:
if page.file.src_uri == 'user-guide/choosing-your-theme.md':
here = Path(config.config_file_path).parent
|
{"golden_diff": "diff --git a/docs/hooks.py b/docs/hooks.py\n--- a/docs/hooks.py\n+++ b/docs/hooks.py\n@@ -1,8 +1,12 @@\n+from __future__ import annotations\n+\n import re\n from pathlib import Path\n+from typing import TYPE_CHECKING\n \n-from mkdocs.config.defaults import MkDocsConfig\n-from mkdocs.structure.nav import Page\n+if TYPE_CHECKING:\n+ from mkdocs.config.defaults import MkDocsConfig\n+ from mkdocs.structure.nav import Page\n \n \n def _get_language_of_translation_file(path: Path) -> str:\n@@ -13,7 +17,7 @@\n return m[1]\n \n \n-def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):\n+def on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n", "issue": "Building MkDocs' documentation\nWhen I began working on #3493 I needed to be able to run the dev server with MkDocs' own documentation, which was more difficult than it should have been.\r\n\r\nFirst, let me say that I always work from a venv, and created a new one to start my work. In the past, one could simply do `pip install -r requirements/docs.txt` and then `mkdocs serve` and it worked. But I had to jump through a lot of hoops and learn new tools to get things working this time.\r\n\r\nTo be clear, I am not suggesting that the old tools should be brought back. Nor am I suggesting that my preferred tools be used. However, I could find no documentation about how to proceed. Eventually, I did find documentation that hatch is used for tests, and looking at `pyproject.toml` I could see a config for a hatch `docs` env. But not having ever used that tool before, it took be multiple tries (and searches) to work out how to even use that env. But, I'm getting ahead of myself...\r\n\r\nAfter realizing that there were no requirements.txt files, I next looked for optional dependencies defined in `pyproject.toml`. I was hoping to maybe do `pip install .[docs]` (`.` rather than `markdown` because I was working from the working tree of the git repo). When I determined that that wasn't an option, I began looking into the hatch options. Finally in some random question in some forum I found an explanation of how to run the `shell` subcommand with an alternate env: `hatch -e docs shell`.\r\n\r\nAnd then I could finally run `mkdocs serve`. Except that I got an error about missing `po` files, which is weird because I am using English, which should work without any translations being defined. Finally, after generating `po` and `mo` files, I could run the dev server and begin my work.\r\n\r\nAll of this led me to believe that the current maintainers are not ever running the dev server with MkDocs documentation. And I also could not find any automations for deploying the documentation, so I couldn't even use that as a reference.\r\n\r\nAgain, I am not being critical of the tool choices. I see that a switch was made from tox to hatch. That's fine if that is the tool that the maintainers what to use. But for an occasional contributor, I would prefer to not need to learn these tools. I would prefer to be able to use standard Python tools that work with any project. Or if I do need to use your tool of choice, then I would expect the specific commands I would need to use to all be documented clearly.\r\n\r\nI could submit a PR which updated the documentation, but I'm not sure what the recommended best practices are here. I am simply bringing this to the attention of the maintainers with the hopes that more consideration will be given to this in the future.\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom mkdocs.config.defaults import MkDocsConfig\nfrom mkdocs.structure.nav import Page\n\n\ndef _get_language_of_translation_file(path: Path) -> str:\n with path.open(encoding='utf-8') as f:\n translation_line = f.readline()\n m = re.search('^# (.+) translations ', translation_line)\n assert m\n return m[1]\n\n\ndef on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs):\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n\n def replacement(m: re.Match) -> str:\n lines = []\n for d in sorted(here.glob(m[2])):\n lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))\n lines.append(f'{m[1]}`{d.name}`: {lang}')\n return '\\n'.join(lines)\n\n return re.sub(\n r'^( *\\* )\\(see the list of existing directories `(.+)`\\)$',\n replacement,\n markdown,\n flags=re.MULTILINE,\n )\n", "path": "docs/hooks.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from mkdocs.config.defaults import MkDocsConfig\n from mkdocs.structure.nav import Page\n\n\ndef _get_language_of_translation_file(path: Path) -> str:\n with path.open(encoding='utf-8') as f:\n translation_line = f.readline()\n m = re.search('^# (.+) translations ', translation_line)\n assert m\n return m[1]\n\n\ndef on_page_markdown(markdown: str, page: Page, config: MkDocsConfig, **kwargs) -> str | None:\n if page.file.src_uri == 'user-guide/choosing-your-theme.md':\n here = Path(config.config_file_path).parent\n\n def replacement(m: re.Match) -> str:\n lines = []\n for d in sorted(here.glob(m[2])):\n lang = _get_language_of_translation_file(Path(d, 'LC_MESSAGES', 'messages.po'))\n lines.append(f'{m[1]}`{d.name}`: {lang}')\n return '\\n'.join(lines)\n\n return re.sub(\n r'^( *\\* )\\(see the list of existing directories `(.+)`\\)$',\n replacement,\n markdown,\n flags=re.MULTILINE,\n )\n", "path": "docs/hooks.py"}]}
| 1,179 | 220 |
gh_patches_debug_300
|
rasdani/github-patches
|
git_diff
|
mlcommons__GaNDLF-477
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add histology exception tests
**Is your feature request related to a problem? Please describe.**
Currently, the histology inference pipeline contains a lot of exceptions, but they aren't being tested.
**Describe the solution you'd like**
See title.
**Describe alternatives you've considered**
N.A.
**Additional context**
N.A.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 """The setup script."""
4
5
6 import os
7 from setuptools import setup, find_packages
8 from setuptools.command.install import install
9 from setuptools.command.develop import develop
10 from setuptools.command.egg_info import egg_info
11
12 with open("README.md") as readme_file:
13 readme = readme_file.read()
14
15
16 def git_submodule_update():
17 ## submodule update
18 os.system("git submodule update --init --recursive")
19
20
21 class CustomInstallCommand(install):
22 def run(self):
23 install.run(self)
24 git_submodule_update()
25
26
27 class CustomDevelopCommand(develop):
28 def run(self):
29 develop.run(self)
30 git_submodule_update()
31
32
33 class CustomEggInfoCommand(egg_info):
34 def run(self):
35 egg_info.run(self)
36 git_submodule_update()
37
38
39 # read version.py
40 import sys, re
41
42 try:
43 filepath = "GANDLF/version.py"
44 version_file = open(filepath)
45 (__version__,) = re.findall('__version__ = "(.*)"', version_file.read())
46
47 except Exception as error:
48 __version__ = "0.0.1"
49 sys.stderr.write("Warning: Could not open '%s' due %s\n" % (filepath, error))
50
51 requirements = [
52 "black",
53 "numpy==1.22.0",
54 "scipy",
55 "SimpleITK!=2.0.*",
56 "torchvision",
57 "tqdm",
58 "torchio==0.18.57",
59 "pandas",
60 "pylint",
61 "scikit-learn>=0.23.2",
62 "scikit-image>=0.19.1",
63 'pickle5>=0.0.11; python_version < "3.8.0"',
64 "setuptools",
65 "seaborn",
66 "pyyaml",
67 "tiffslide",
68 "matplotlib",
69 "requests>=2.25.0",
70 "pyvips",
71 "pytest",
72 "coverage",
73 "pytest-cov",
74 "psutil",
75 "medcam",
76 "opencv-python",
77 "torchmetrics==0.5.1", # newer versions have changed api for f1 invocation
78 "OpenPatchMiner==0.1.8",
79 "zarr==2.10.3",
80 "pydicom",
81 "onnx",
82 "torchinfo==1.7.0",
83 ]
84
85 # pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389
86 if sys.platform == "darwin":
87 requirements.append("torch==1.9.0")
88 else:
89 requirements.append("torch==1.8.2")
90
91 setup(
92 name="GANDLF",
93 version=__version__,
94 author="Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun Güley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos", # alphabetical order
95 author_email="[email protected]",
96 python_requires=">=3.7",
97 packages=find_packages(),
98 cmdclass={ # this ensures git_submodule_update is called during install
99 "install": CustomInstallCommand,
100 "develop": CustomDevelopCommand,
101 "egg_info": CustomEggInfoCommand,
102 },
103 scripts=[
104 "gandlf_run",
105 "gandlf_constructCSV",
106 "gandlf_collectStats",
107 "gandlf_patchMiner",
108 "gandlf_preprocess",
109 "gandlf_anonymizer",
110 "gandlf_verifyInstall",
111 ],
112 classifiers=[
113 "Development Status :: 3 - Alpha",
114 "Intended Audience :: Science/Research",
115 "License :: OSI Approved :: BSD License",
116 "Natural Language :: English",
117 "Operating System :: OS Independent",
118 "Programming Language :: Python :: 3.7",
119 "Programming Language :: Python :: 3.8",
120 "Programming Language :: Python :: 3.9",
121 "Topic :: Scientific/Engineering :: Medical Science Apps",
122 ],
123 description=(
124 "PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging."
125 ),
126 install_requires=requirements,
127 license="BSD-3-Clause License",
128 long_description=readme,
129 long_description_content_type="text/markdown",
130 include_package_data=True,
131 keywords="semantic, segmentation, regression, classification, data-augmentation, medical-imaging",
132 zip_safe=False,
133 )
134
135 ## windows vips installation
136 if os.name == "nt": # proceed for windows
137 from pathlib import Path
138
139 # download and extract if main dll is absent
140 if not Path("./vips/vips-dev-8.10/bin/libvips-42.dll").exists():
141 print("Downloading and extracting VIPS for Windows")
142 url = "https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip"
143 zip_to_extract = "./vips.zip"
144 import urllib.request, zipfile
145
146 urllib.request.urlretrieve(url, zip_to_extract)
147 z = zipfile.ZipFile(zip_to_extract)
148 z.extractall("./vips")
149 z.close()
150 os.remove(zip_to_extract)
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -67,7 +67,7 @@
"tiffslide",
"matplotlib",
"requests>=2.25.0",
- "pyvips",
+ "pyvips==2.2.1",
"pytest",
"coverage",
"pytest-cov",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -67,7 +67,7 @@\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n- \"pyvips\",\n+ \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n", "issue": "Add histology exception tests\n**Is your feature request related to a problem? Please describe.**\r\nCurrently, the histology inference pipeline contains a lot of exceptions, but they aren't being tested.\r\n\r\n**Describe the solution you'd like**\r\nSee title.\r\n\r\n**Describe alternatives you've considered**\r\nN.A.\r\n\r\n**Additional context**\r\nN.A.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\n\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\nfrom setuptools.command.develop import develop\nfrom setuptools.command.egg_info import egg_info\n\nwith open(\"README.md\") as readme_file:\n readme = readme_file.read()\n\n\ndef git_submodule_update():\n ## submodule update\n os.system(\"git submodule update --init --recursive\")\n\n\nclass CustomInstallCommand(install):\n def run(self):\n install.run(self)\n git_submodule_update()\n\n\nclass CustomDevelopCommand(develop):\n def run(self):\n develop.run(self)\n git_submodule_update()\n\n\nclass CustomEggInfoCommand(egg_info):\n def run(self):\n egg_info.run(self)\n git_submodule_update()\n\n\n# read version.py\nimport sys, re\n\ntry:\n filepath = \"GANDLF/version.py\"\n version_file = open(filepath)\n (__version__,) = re.findall('__version__ = \"(.*)\"', version_file.read())\n\nexcept Exception as error:\n __version__ = \"0.0.1\"\n sys.stderr.write(\"Warning: Could not open '%s' due %s\\n\" % (filepath, error))\n\nrequirements = [\n \"black\",\n \"numpy==1.22.0\",\n \"scipy\",\n \"SimpleITK!=2.0.*\",\n \"torchvision\",\n \"tqdm\",\n \"torchio==0.18.57\",\n \"pandas\",\n \"pylint\",\n \"scikit-learn>=0.23.2\",\n \"scikit-image>=0.19.1\",\n 'pickle5>=0.0.11; python_version < \"3.8.0\"',\n \"setuptools\",\n \"seaborn\",\n \"pyyaml\",\n \"tiffslide\",\n \"matplotlib\",\n \"requests>=2.25.0\",\n \"pyvips==2.2.1\",\n \"pytest\",\n \"coverage\",\n \"pytest-cov\",\n \"psutil\",\n \"medcam\",\n \"opencv-python\",\n \"torchmetrics==0.5.1\", # newer versions have changed api for f1 invocation\n \"OpenPatchMiner==0.1.8\",\n \"zarr==2.10.3\",\n \"pydicom\",\n \"onnx\",\n \"torchinfo==1.7.0\",\n]\n\n# pytorch doesn't have LTS support on OSX - https://github.com/CBICA/GaNDLF/issues/389\nif sys.platform == \"darwin\":\n requirements.append(\"torch==1.9.0\")\nelse:\n requirements.append(\"torch==1.8.2\")\n\nsetup(\n name=\"GANDLF\",\n version=__version__,\n author=\"Jose Agraz, Vinayak Ahluwalia, Bhakti Baheti, Spyridon Bakas, Ujjwal Baid, Megh Bhalerao, Brandon Edwards, Karol Gotkowski, Caleb Grenko, Orhun G\u00fcley, Ibrahim Ethem Hamamci, Sarthak Pati, Micah Sheller, Juliia Skobleva, Siddhesh Thakur, Spiros Thermos\", # alphabetical order\n author_email=\"[email protected]\",\n python_requires=\">=3.7\",\n packages=find_packages(),\n cmdclass={ # this ensures git_submodule_update is called during install\n \"install\": CustomInstallCommand,\n \"develop\": CustomDevelopCommand,\n \"egg_info\": CustomEggInfoCommand,\n },\n scripts=[\n \"gandlf_run\",\n \"gandlf_constructCSV\",\n \"gandlf_collectStats\",\n \"gandlf_patchMiner\",\n \"gandlf_preprocess\",\n \"gandlf_anonymizer\",\n \"gandlf_verifyInstall\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Scientific/Engineering :: Medical Science Apps\",\n ],\n description=(\n \"PyTorch-based framework that handles segmentation/regression/classification using various DL architectures for medical imaging.\"\n ),\n install_requires=requirements,\n license=\"BSD-3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n keywords=\"semantic, segmentation, regression, classification, data-augmentation, medical-imaging\",\n zip_safe=False,\n)\n\n## windows vips installation\nif os.name == \"nt\": # proceed for windows\n from pathlib import Path\n\n # download and extract if main dll is absent\n if not Path(\"./vips/vips-dev-8.10/bin/libvips-42.dll\").exists():\n print(\"Downloading and extracting VIPS for Windows\")\n url = \"https://github.com/libvips/libvips/releases/download/v8.10.2/vips-dev-w64-all-8.10.2.zip\"\n zip_to_extract = \"./vips.zip\"\n import urllib.request, zipfile\n\n urllib.request.urlretrieve(url, zip_to_extract)\n z = zipfile.ZipFile(zip_to_extract)\n z.extractall(\"./vips\")\n z.close()\n os.remove(zip_to_extract)\n", "path": "setup.py"}]}
| 1,869 | 87 |
gh_patches_debug_38652
|
rasdani/github-patches
|
git_diff
|
sagemath__sage-37422
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make sage.parallel.ncpus.ncpus() use os.cpu_count()
<div id="comment:0"></div>
Currently, `sage.parallel.ncpus.ncpus()` uses platform-specific code to determine the number of available CPUs for some specific systems. This functionality is now available in the standard `os` module as `cpu_count()`.
Component: **misc**
Author: **Lorenz Panny**
Branch/Commit: **[public/change_ncpus_to_os_module](https://github.com/sagemath/sagetrac-mirror/tree/public/change_ncpus_to_os_module) @ [`a509210`](https://github.com/sagemath/sagetrac-mirror/commit/a509210125fc50baf72dcb7f2248e96cddf61c8f)**
_Issue created by migration from https://trac.sagemath.org/ticket/34328_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sage/parallel/ncpus.py`
Content:
```
1 """
2 CPU Detection
3 """
4 # Parallel Python Software: http://www.parallelpython.com
5 # Copyright (c) 2005-2008, Vitalii Vanovschi
6 # All rights reserved.
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 # * Redistributions of source code must retain the above copyright notice,
10 # this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above copyright
12 # notice, this list of conditions and the following disclaimer in the
13 # documentation and/or other materials provided with the distribution.
14 # * Neither the name of the author nor the names of its contributors
15 # may be used to endorse or promote products derived from this software
16 # without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 # THE POSSIBILITY OF SUCH DAMAGE.
29
30 ######
31 # This is from ParallelPython (the pp.py file).
32
33 import os
34 import subprocess
35
36
37 def ncpus():
38 """
39 Detects the number of effective CPUs in the system.
40
41 EXAMPLES::
42
43 sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.
44 2
45 """
46 # Support Sage environment variable SAGE_NUM_THREADS
47 # NOTE: while doctesting, this is forced to be 2 by the
48 # sage-runtests script
49 try:
50 n = os.environ["SAGE_NUM_THREADS"]
51 except KeyError:
52 pass
53 else:
54 return int(n)
55
56 # for Linux, Unix and MacOS
57 if hasattr(os, "sysconf"):
58 if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
59 # Linux and Unix
60 ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
61 if isinstance(ncpus, int) and ncpus > 0:
62 return ncpus
63 else:
64 # MacOS X
65 # deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read())
66 process = subprocess.Popen("sysctl -n hw.ncpu", shell=True,
67 stdin=subprocess.PIPE,
68 stdout=subprocess.PIPE,
69 stderr=subprocess.PIPE, close_fds=True)
70 return int(process.stdout.read())
71 # for Windows
72 if "NUMBER_OF_PROCESSORS" in os.environ:
73 ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
74 if ncpus > 0:
75 return ncpus
76 # return the default value
77 return 1
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sage/parallel/ncpus.py b/src/sage/parallel/ncpus.py
--- a/src/sage/parallel/ncpus.py
+++ b/src/sage/parallel/ncpus.py
@@ -1,46 +1,19 @@
"""
CPU Detection
"""
-# Parallel Python Software: http://www.parallelpython.com
-# Copyright (c) 2005-2008, Vitalii Vanovschi
-# All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-# * Redistributions of source code must retain the above copyright notice,
-# this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# * Neither the name of the author nor the names of its contributors
-# may be used to endorse or promote products derived from this software
-# without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
-# THE POSSIBILITY OF SUCH DAMAGE.
-
-######
-# This is from ParallelPython (the pp.py file).
import os
-import subprocess
def ncpus():
"""
- Detects the number of effective CPUs in the system.
+ Return the number of available CPUs in the system.
+
+ ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`
EXAMPLES::
- sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.
+ sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine
2
"""
# Support Sage environment variable SAGE_NUM_THREADS
@@ -53,25 +26,9 @@
else:
return int(n)
- # for Linux, Unix and MacOS
- if hasattr(os, "sysconf"):
- if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
- # Linux and Unix
- ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
- if isinstance(ncpus, int) and ncpus > 0:
- return ncpus
- else:
- # MacOS X
- # deprecated: return int(os.popen2("sysctl -n hw.ncpu")[1].read())
- process = subprocess.Popen("sysctl -n hw.ncpu", shell=True,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, close_fds=True)
- return int(process.stdout.read())
- # for Windows
- if "NUMBER_OF_PROCESSORS" in os.environ:
- ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
- if ncpus > 0:
- return ncpus
- # return the default value
- return 1
+ n = None
+
+ if hasattr(os, 'sched_getaffinity'):
+ n = len(os.sched_getaffinity(0))
+
+ return n or os.cpu_count() or 1
|
{"golden_diff": "diff --git a/src/sage/parallel/ncpus.py b/src/sage/parallel/ncpus.py\n--- a/src/sage/parallel/ncpus.py\n+++ b/src/sage/parallel/ncpus.py\n@@ -1,46 +1,19 @@\n \"\"\"\n CPU Detection\n \"\"\"\n-# Parallel Python Software: http://www.parallelpython.com\n-# Copyright (c) 2005-2008, Vitalii Vanovschi\n-# All rights reserved.\n-# Redistribution and use in source and binary forms, with or without\n-# modification, are permitted provided that the following conditions are met:\n-# * Redistributions of source code must retain the above copyright notice,\n-# this list of conditions and the following disclaimer.\n-# * Redistributions in binary form must reproduce the above copyright\n-# notice, this list of conditions and the following disclaimer in the\n-# documentation and/or other materials provided with the distribution.\n-# * Neither the name of the author nor the names of its contributors\n-# may be used to endorse or promote products derived from this software\n-# without specific prior written permission.\n-#\n-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n-# THE POSSIBILITY OF SUCH DAMAGE.\n-\n-######\n-# This is from ParallelPython (the pp.py file).\n \n import os\n-import subprocess\n \n \n def ncpus():\n \"\"\"\n- Detects the number of effective CPUs in the system.\n+ Return the number of available CPUs in the system.\n+\n+ ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`\n \n EXAMPLES::\n \n- sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.\n+ sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n@@ -53,25 +26,9 @@\n else:\n return int(n)\n \n- # for Linux, Unix and MacOS\n- if hasattr(os, \"sysconf\"):\n- if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n- # Linux and Unix\n- ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n- if isinstance(ncpus, int) and ncpus > 0:\n- return ncpus\n- else:\n- # MacOS X\n- # deprecated: return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n- process = subprocess.Popen(\"sysctl -n hw.ncpu\", shell=True,\n- stdin=subprocess.PIPE,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.PIPE, close_fds=True)\n- return int(process.stdout.read())\n- # for Windows\n- if \"NUMBER_OF_PROCESSORS\" in os.environ:\n- ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n- if ncpus > 0:\n- return ncpus\n- # return the default value\n- return 1\n+ n = None\n+\n+ if hasattr(os, 'sched_getaffinity'):\n+ n = len(os.sched_getaffinity(0))\n+\n+ return n or os.cpu_count() or 1\n", "issue": "make sage.parallel.ncpus.ncpus() use os.cpu_count()\n<div id=\"comment:0\"></div>\n\nCurrently, `sage.parallel.ncpus.ncpus()` uses platform-specific code to determine the number of available CPUs for some specific systems. This functionality is now available in the standard `os` module as `cpu_count()`.\n\nComponent: **misc**\n\nAuthor: **Lorenz Panny**\n\nBranch/Commit: **[public/change_ncpus_to_os_module](https://github.com/sagemath/sagetrac-mirror/tree/public/change_ncpus_to_os_module) @ [`a509210`](https://github.com/sagemath/sagetrac-mirror/commit/a509210125fc50baf72dcb7f2248e96cddf61c8f)**\n\n_Issue created by migration from https://trac.sagemath.org/ticket/34328_\n\n\n", "before_files": [{"content": "\"\"\"\nCPU Detection\n\"\"\"\n# Parallel Python Software: http://www.parallelpython.com\n# Copyright (c) 2005-2008, Vitalii Vanovschi\n# All rights reserved.\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author nor the names of its contributors\n# may be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\n# THE POSSIBILITY OF SUCH DAMAGE.\n\n######\n# This is from ParallelPython (the pp.py file).\n\nimport os\nimport subprocess\n\n\ndef ncpus():\n \"\"\"\n Detects the number of effective CPUs in the system.\n\n EXAMPLES::\n\n sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine.\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n # NOTE: while doctesting, this is forced to be 2 by the\n # sage-runtests script\n try:\n n = os.environ[\"SAGE_NUM_THREADS\"]\n except KeyError:\n pass\n else:\n return int(n)\n\n # for Linux, Unix and MacOS\n if hasattr(os, \"sysconf\"):\n if \"SC_NPROCESSORS_ONLN\" in os.sysconf_names:\n # Linux and Unix\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n return ncpus\n else:\n # MacOS X\n # deprecated: return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n process = subprocess.Popen(\"sysctl -n hw.ncpu\", shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n return int(process.stdout.read())\n # for Windows\n if \"NUMBER_OF_PROCESSORS\" in os.environ:\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"])\n if ncpus > 0:\n return ncpus\n # return the default value\n return 1\n", "path": "src/sage/parallel/ncpus.py"}], "after_files": [{"content": "\"\"\"\nCPU Detection\n\"\"\"\n\nimport os\n\n\ndef ncpus():\n \"\"\"\n Return the number of available CPUs in the system.\n\n ALGORITHM: :func:`os.sched_getaffinity` or :func:`os.cpu_count`\n\n EXAMPLES::\n\n sage: sage.parallel.ncpus.ncpus() # random output -- depends on machine\n 2\n \"\"\"\n # Support Sage environment variable SAGE_NUM_THREADS\n # NOTE: while doctesting, this is forced to be 2 by the\n # sage-runtests script\n try:\n n = os.environ[\"SAGE_NUM_THREADS\"]\n except KeyError:\n pass\n else:\n return int(n)\n\n n = None\n\n if hasattr(os, 'sched_getaffinity'):\n n = len(os.sched_getaffinity(0))\n\n return n or os.cpu_count() or 1\n", "path": "src/sage/parallel/ncpus.py"}]}
| 1,302 | 867 |
gh_patches_debug_9132
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-10307
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] cElementTree has been deprecated and will be removed in favor of ElementTree
Reference : https://bugs.python.org/issue36543
```
bokeh/sampledata/us_states.py
33:import xml.etree.cElementTree as et
bokeh/sampledata/us_counties.py
40:import xml.etree.cElementTree as et
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/sampledata/us_counties.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 ''' This modules exposes geometry data for Unites States. It exposes a
8 dictionary ``data``, which is indexed by the two-tuples:
9
10 .. code-block:: python
11
12 (state_id, county_id)
13
14 that have the following dictionaries as the associated value:
15
16 .. code-block:: python
17
18 data[(1,1)]['name']
19 data[(1,1)]['state']
20 data[(1,1)]['detailed name']
21 data[(1,1)]['lats']
22 data[(1,1)]['lons']
23
24 Entries for ``'name'`` can have duplicates for certain states (e.g. Virginia).
25 The combination of ``'detailed name'`` and ``'state'`` will always be unique.
26
27 '''
28 #-----------------------------------------------------------------------------
29 # Boilerplate
30 #-----------------------------------------------------------------------------
31 import logging # isort:skip
32 log = logging.getLogger(__name__)
33
34 #-----------------------------------------------------------------------------
35 # Imports
36 #-----------------------------------------------------------------------------
37
38 # Standard library imports
39 import csv
40 import xml.etree.cElementTree as et
41
42 # Bokeh imports
43 from ..util.sampledata import external_path, open_csv
44
45 #-----------------------------------------------------------------------------
46 # Globals and constants
47 #-----------------------------------------------------------------------------
48
49 __all__ = (
50 'data',
51 )
52
53 #-----------------------------------------------------------------------------
54 # General API
55 #-----------------------------------------------------------------------------
56
57 #-----------------------------------------------------------------------------
58 # Dev API
59 #-----------------------------------------------------------------------------
60
61 #-----------------------------------------------------------------------------
62 # Private API
63 #-----------------------------------------------------------------------------
64
65 def _read_data():
66 '''
67
68 '''
69 nan = float('NaN')
70
71 data = {}
72
73 with open_csv(external_path('US_Counties.csv')) as f:
74 next(f)
75 reader = csv.reader(f, delimiter=str(','), quotechar=str('"'))
76 for row in reader:
77 name, dummy, state, dummy, geometry, dummy, dummy, dummy, det_name, state_id, county_id, dummy, dummy = row
78 xml = et.fromstring(geometry)
79 lats = []
80 lons = []
81 for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):
82 if i > 0:
83 lats.append(nan)
84 lons.append(nan)
85 coords = (c.split(',')[:2] for c in poly.text.split())
86 lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in
87 coords]))
88 lats.extend(lat)
89 lons.extend(lon)
90 data[(int(state_id), int(county_id))] = {
91 'name' : name,
92 'detailed name' : det_name,
93 'state' : state,
94 'lats' : lats,
95 'lons' : lons,
96 }
97
98 return data
99
100 #-----------------------------------------------------------------------------
101 # Code
102 #-----------------------------------------------------------------------------
103
104 data = _read_data()
105
```
Path: `bokeh/sampledata/us_states.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
3 # All rights reserved.
4 #
5 # The full license is in the file LICENSE.txt, distributed with this software.
6 #-----------------------------------------------------------------------------
7 '''
8 This modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is
9 indexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the
10 associated value:
11
12 data['CA']['name']
13 data['CA']['region']
14 data['CA']['lats']
15 data['CA']['lons']
16
17 '''
18
19 #-----------------------------------------------------------------------------
20 # Boilerplate
21 #-----------------------------------------------------------------------------
22 import logging # isort:skip
23 log = logging.getLogger(__name__)
24
25 #-----------------------------------------------------------------------------
26 # Imports
27 #-----------------------------------------------------------------------------
28
29 # Standard library imports
30 import codecs
31 import csv
32 import gzip
33 import xml.etree.cElementTree as et
34
35 # Bokeh imports
36 from ..util.sampledata import package_path
37
38 #-----------------------------------------------------------------------------
39 # Globals and constants
40 #-----------------------------------------------------------------------------
41
42 __all__ = (
43 'data',
44 )
45
46 #-----------------------------------------------------------------------------
47 # General API
48 #-----------------------------------------------------------------------------
49
50 #-----------------------------------------------------------------------------
51 # Dev API
52 #-----------------------------------------------------------------------------
53
54 #-----------------------------------------------------------------------------
55 # Private API
56 #-----------------------------------------------------------------------------
57
58 def _read_data():
59 '''
60
61 '''
62 nan = float('NaN')
63
64 data = {}
65
66 with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:
67 decoded = codecs.iterdecode(f, "utf-8")
68 next(decoded)
69 reader = csv.reader(decoded, delimiter=str(','), quotechar=str('"'))
70 for row in reader:
71 region, name, code, geometry, dummy = row
72 xml = et.fromstring(geometry)
73 lats = []
74 lons = []
75 for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):
76 if i > 0:
77 lats.append(nan)
78 lons.append(nan)
79 coords = (c.split(',')[:2] for c in poly.text.split())
80 lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in
81 coords]))
82 lats.extend(lat)
83 lons.extend(lon)
84 data[code] = {
85 'name' : name,
86 'region' : region,
87 'lats' : lats,
88 'lons' : lons,
89 }
90
91 return data
92
93 #-----------------------------------------------------------------------------
94 # Code
95 #-----------------------------------------------------------------------------
96
97 data = _read_data()
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bokeh/sampledata/us_counties.py b/bokeh/sampledata/us_counties.py
--- a/bokeh/sampledata/us_counties.py
+++ b/bokeh/sampledata/us_counties.py
@@ -37,7 +37,7 @@
# Standard library imports
import csv
-import xml.etree.cElementTree as et
+import xml.etree.ElementTree as et
# Bokeh imports
from ..util.sampledata import external_path, open_csv
diff --git a/bokeh/sampledata/us_states.py b/bokeh/sampledata/us_states.py
--- a/bokeh/sampledata/us_states.py
+++ b/bokeh/sampledata/us_states.py
@@ -30,7 +30,7 @@
import codecs
import csv
import gzip
-import xml.etree.cElementTree as et
+import xml.etree.ElementTree as et
# Bokeh imports
from ..util.sampledata import package_path
|
{"golden_diff": "diff --git a/bokeh/sampledata/us_counties.py b/bokeh/sampledata/us_counties.py\n--- a/bokeh/sampledata/us_counties.py\n+++ b/bokeh/sampledata/us_counties.py\n@@ -37,7 +37,7 @@\n \n # Standard library imports\n import csv\n-import xml.etree.cElementTree as et\n+import xml.etree.ElementTree as et\n \n # Bokeh imports\n from ..util.sampledata import external_path, open_csv\ndiff --git a/bokeh/sampledata/us_states.py b/bokeh/sampledata/us_states.py\n--- a/bokeh/sampledata/us_states.py\n+++ b/bokeh/sampledata/us_states.py\n@@ -30,7 +30,7 @@\n import codecs\n import csv\n import gzip\n-import xml.etree.cElementTree as et\n+import xml.etree.ElementTree as et\n \n # Bokeh imports\n from ..util.sampledata import package_path\n", "issue": "[BUG] cElementTree has been deprecated and will be removed in favor of ElementTree\nReference : https://bugs.python.org/issue36543\r\n\r\n```\r\nbokeh/sampledata/us_states.py\r\n33:import xml.etree.cElementTree as et\r\n\r\nbokeh/sampledata/us_counties.py\r\n40:import xml.etree.cElementTree as et\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' This modules exposes geometry data for Unites States. It exposes a\ndictionary ``data``, which is indexed by the two-tuples:\n\n.. code-block:: python\n\n (state_id, county_id)\n\nthat have the following dictionaries as the associated value:\n\n.. code-block:: python\n\n data[(1,1)]['name']\n data[(1,1)]['state']\n data[(1,1)]['detailed name']\n data[(1,1)]['lats']\n data[(1,1)]['lons']\n\nEntries for ``'name'`` can have duplicates for certain states (e.g. Virginia).\nThe combination of ``'detailed name'`` and ``'state'`` will always be unique.\n\n'''\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport csv\nimport xml.etree.cElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import external_path, open_csv\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with open_csv(external_path('US_Counties.csv')) as f:\n next(f)\n reader = csv.reader(f, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n name, dummy, state, dummy, geometry, dummy, dummy, dummy, det_name, state_id, county_id, dummy, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[(int(state_id), int(county_id))] = {\n 'name' : name,\n 'detailed name' : det_name,\n 'state' : state,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_counties.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\nThis modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is\nindexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the\nassociated value:\n\n data['CA']['name']\n data['CA']['region']\n data['CA']['lats']\n data['CA']['lons']\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport codecs\nimport csv\nimport gzip\nimport xml.etree.cElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import package_path\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:\n decoded = codecs.iterdecode(f, \"utf-8\")\n next(decoded)\n reader = csv.reader(decoded, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n region, name, code, geometry, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[code] = {\n 'name' : name,\n 'region' : region,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_states.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' This modules exposes geometry data for Unites States. It exposes a\ndictionary ``data``, which is indexed by the two-tuples:\n\n.. code-block:: python\n\n (state_id, county_id)\n\nthat have the following dictionaries as the associated value:\n\n.. code-block:: python\n\n data[(1,1)]['name']\n data[(1,1)]['state']\n data[(1,1)]['detailed name']\n data[(1,1)]['lats']\n data[(1,1)]['lons']\n\nEntries for ``'name'`` can have duplicates for certain states (e.g. Virginia).\nThe combination of ``'detailed name'`` and ``'state'`` will always be unique.\n\n'''\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport csv\nimport xml.etree.ElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import external_path, open_csv\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with open_csv(external_path('US_Counties.csv')) as f:\n next(f)\n reader = csv.reader(f, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n name, dummy, state, dummy, geometry, dummy, dummy, dummy, det_name, state_id, county_id, dummy, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[(int(state_id), int(county_id))] = {\n 'name' : name,\n 'detailed name' : det_name,\n 'state' : state,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_counties.py"}, {"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n'''\nThis modules exposes geometry data for Unites States. It exposes a dictionary 'data' which is\nindexed by the two letter state code (e.g., 'CA', 'TX') and has the following dictionary as the\nassociated value:\n\n data['CA']['name']\n data['CA']['region']\n data['CA']['lats']\n data['CA']['lons']\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport codecs\nimport csv\nimport gzip\nimport xml.etree.ElementTree as et\n\n# Bokeh imports\nfrom ..util.sampledata import package_path\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'data',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\ndef _read_data():\n '''\n\n '''\n nan = float('NaN')\n\n data = {}\n\n with gzip.open(package_path('US_Regions_State_Boundaries.csv.gz')) as f:\n decoded = codecs.iterdecode(f, \"utf-8\")\n next(decoded)\n reader = csv.reader(decoded, delimiter=str(','), quotechar=str('\"'))\n for row in reader:\n region, name, code, geometry, dummy = row\n xml = et.fromstring(geometry)\n lats = []\n lons = []\n for i, poly in enumerate(xml.findall('.//outerBoundaryIs/LinearRing/coordinates')):\n if i > 0:\n lats.append(nan)\n lons.append(nan)\n coords = (c.split(',')[:2] for c in poly.text.split())\n lat, lon = list(zip(*[(float(lat), float(lon)) for lon, lat in\n coords]))\n lats.extend(lat)\n lons.extend(lon)\n data[code] = {\n 'name' : name,\n 'region' : region,\n 'lats' : lats,\n 'lons' : lons,\n }\n\n return data\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\ndata = _read_data()\n", "path": "bokeh/sampledata/us_states.py"}]}
| 1,931 | 205 |
gh_patches_debug_8327
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-3652
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty completion.timestamp_format crashes
After `:set completion.timestamp_format ''`:
```
17:26:29 ERROR misc crashsignal:exception_hook:216 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 260, in _update_completion
completion.set_pattern(pattern)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completionwidget.py", line 320, in set_pattern
self.model().set_pattern(pattern)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/completionmodel.py", line 185, in set_pattern
cat.set_pattern(pattern)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 85, in set_pattern
.format(timestamp_format.replace("'", "`")))
AttributeError: 'NoneType' object has no attribute 'replace'
```
cc @rcorre and @erikdsjostrom who reported this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/models/histcategory.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017-2018 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """A completion category that queries the SQL History store."""
21
22 from PyQt5.QtSql import QSqlQueryModel
23
24 from qutebrowser.misc import sql
25 from qutebrowser.utils import debug
26 from qutebrowser.config import config
27
28
29 class HistoryCategory(QSqlQueryModel):
30
31 """A completion category that queries the SQL History store."""
32
33 def __init__(self, *, delete_func=None, parent=None):
34 """Create a new History completion category."""
35 super().__init__(parent=parent)
36 self.name = "History"
37 self._query = None
38
39 # advertise that this model filters by URL and title
40 self.columns_to_filter = [0, 1]
41 self.delete_func = delete_func
42
43 def _atime_expr(self):
44 """If max_items is set, return an expression to limit the query."""
45 max_items = config.val.completion.web_history_max_items
46 # HistoryCategory should not be added to the completion in that case.
47 assert max_items != 0
48
49 if max_items < 0:
50 return ''
51
52 min_atime = sql.Query(' '.join([
53 'SELECT min(last_atime) FROM',
54 '(SELECT last_atime FROM CompletionHistory',
55 'ORDER BY last_atime DESC LIMIT :limit)',
56 ])).run(limit=max_items).value()
57
58 if not min_atime:
59 # if there are no history items, min_atime may be '' (issue #2849)
60 return ''
61
62 return "AND last_atime >= {}".format(min_atime)
63
64 def set_pattern(self, pattern):
65 """Set the pattern used to filter results.
66
67 Args:
68 pattern: string pattern to filter by.
69 """
70 # escape to treat a user input % or _ as a literal, not a wildcard
71 pattern = pattern.replace('%', '\\%')
72 pattern = pattern.replace('_', '\\_')
73 words = ['%{}%'.format(w) for w in pattern.split(' ')]
74
75 # build a where clause to match all of the words in any order
76 # given the search term "a b", the WHERE clause would be:
77 # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')
78 where_clause = ' AND '.join(
79 "(url || title) LIKE :{} escape '\\'".format(i)
80 for i in range(len(words)))
81
82 # replace ' in timestamp-format to avoid breaking the query
83 timestamp_format = config.val.completion.timestamp_format
84 timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
85 .format(timestamp_format.replace("'", "`")))
86
87 if not self._query or len(words) != len(self._query.boundValues()):
88 # if the number of words changed, we need to generate a new query
89 # otherwise, we can reuse the prepared query for performance
90 self._query = sql.Query(' '.join([
91 "SELECT url, title, {}".format(timefmt),
92 "FROM CompletionHistory",
93 # the incoming pattern will have literal % and _ escaped
94 # we need to tell sql to treat '\' as an escape character
95 'WHERE ({})'.format(where_clause),
96 self._atime_expr(),
97 "ORDER BY last_atime DESC",
98 ]), forward_only=False)
99
100 with debug.log_time('sql', 'Running completion query'):
101 self._query.run(**{
102 str(i): w for i, w in enumerate(words)})
103 self.setQuery(self._query)
104
105 def removeRows(self, row, _count, _parent=None):
106 """Override QAbstractItemModel::removeRows to re-run sql query."""
107 # re-run query to reload updated table
108 with debug.log_time('sql', 'Re-running completion query post-delete'):
109 self._query.run()
110 self.setQuery(self._query)
111 while self.rowCount() < row:
112 self.fetchMore()
113 return True
114
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py
--- a/qutebrowser/completion/models/histcategory.py
+++ b/qutebrowser/completion/models/histcategory.py
@@ -80,7 +80,7 @@
for i in range(len(words)))
# replace ' in timestamp-format to avoid breaking the query
- timestamp_format = config.val.completion.timestamp_format
+ timestamp_format = config.val.completion.timestamp_format or ''
timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
.format(timestamp_format.replace("'", "`")))
|
{"golden_diff": "diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -80,7 +80,7 @@\n for i in range(len(words)))\n \n # replace ' in timestamp-format to avoid breaking the query\n- timestamp_format = config.val.completion.timestamp_format\n+ timestamp_format = config.val.completion.timestamp_format or ''\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n", "issue": "Empty completion.timestamp_format crashes\nAfter `:set completion.timestamp_format ''`:\r\n\r\n```\r\n17:26:29 ERROR misc crashsignal:exception_hook:216 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 260, in _update_completion\r\n completion.set_pattern(pattern)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completionwidget.py\", line 320, in set_pattern\r\n self.model().set_pattern(pattern)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/completionmodel.py\", line 185, in set_pattern\r\n cat.set_pattern(pattern)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\", line 85, in set_pattern\r\n .format(timestamp_format.replace(\"'\", \"`\")))\r\nAttributeError: 'NoneType' object has no attribute 'replace'\r\n```\r\n\r\ncc @rcorre and @erikdsjostrom who reported this\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n self._query = None\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.val.completion.web_history_max_items\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n words = ['%{}%'.format(w) for w in pattern.split(' ')]\n\n # build a where clause to match all of the words in any order\n # given the search term \"a b\", the WHERE clause would be:\n # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')\n where_clause = ' AND '.join(\n \"(url || title) LIKE :{} escape '\\\\'\".format(i)\n for i in range(len(words)))\n\n # replace ' in timestamp-format to avoid breaking the query\n timestamp_format = config.val.completion.timestamp_format\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n\n if not self._query or len(words) != len(self._query.boundValues()):\n # if the number of words changed, we need to generate a new query\n # otherwise, we can reuse the prepared query for performance\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped\n # we need to tell sql to treat '\\' as an escape character\n 'WHERE ({})'.format(where_clause),\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(**{\n str(i): w for i, w in enumerate(words)})\n self.setQuery(self._query)\n\n def removeRows(self, row, _count, _parent=None):\n \"\"\"Override QAbstractItemModel::removeRows to re-run sql query.\"\"\"\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n while self.rowCount() < row:\n self.fetchMore()\n return True\n", "path": "qutebrowser/completion/models/histcategory.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017-2018 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n self._query = None\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.val.completion.web_history_max_items\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n words = ['%{}%'.format(w) for w in pattern.split(' ')]\n\n # build a where clause to match all of the words in any order\n # given the search term \"a b\", the WHERE clause would be:\n # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')\n where_clause = ' AND '.join(\n \"(url || title) LIKE :{} escape '\\\\'\".format(i)\n for i in range(len(words)))\n\n # replace ' in timestamp-format to avoid breaking the query\n timestamp_format = config.val.completion.timestamp_format or ''\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(timestamp_format.replace(\"'\", \"`\")))\n\n if not self._query or len(words) != len(self._query.boundValues()):\n # if the number of words changed, we need to generate a new query\n # otherwise, we can reuse the prepared query for performance\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped\n # we need to tell sql to treat '\\' as an escape character\n 'WHERE ({})'.format(where_clause),\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(**{\n str(i): w for i, w in enumerate(words)})\n self.setQuery(self._query)\n\n def removeRows(self, row, _count, _parent=None):\n \"\"\"Override QAbstractItemModel::removeRows to re-run sql query.\"\"\"\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n while self.rowCount() < row:\n self.fetchMore()\n return True\n", "path": "qutebrowser/completion/models/histcategory.py"}]}
| 1,777 | 140 |
gh_patches_debug_38751
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-116
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python Model download for GCS and S3
Downloading from GCS and S3 needs to be completed.
https://github.com/kubeflow/kfserving/blob/2f8d33d1a9773c5694a22ba749192163251fe287/python/kfserving/kfserving/storage.py#L27-L33
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/kfserving/storage.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import tempfile
17 import os
18
19 _GCS_PREFIX = "gs://"
20 _S3_PREFIX = "s3://"
21 _LOCAL_PREFIX = "file://"
22
23
24 class Storage(object):
25 @staticmethod
26 def download(uri: str) -> str:
27 logging.info("Copying contents of %s to local" % uri)
28 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
29 return Storage._download_local(uri)
30
31 temp_dir = tempfile.mkdtemp()
32 if uri.startswith(_GCS_PREFIX):
33 Storage._download_gcs(uri, temp_dir)
34 elif uri.startswith(_S3_PREFIX):
35 Storage._download_s3(uri, temp_dir)
36 else:
37 raise Exception("Cannot recognize storage type for " + uri +
38 "\n'%s', '%s', and '%s' are the current available storage type." %
39 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
40
41 logging.info("Successfully copied %s to %s" % (uri, temp_dir))
42 return temp_dir
43
44 @staticmethod
45 def _download_s3(uri, temp_dir: str):
46 raise NotImplementedError
47
48 @staticmethod
49 def _download_gcs(uri, temp_dir: str):
50 raise NotImplementedError
51
52 @staticmethod
53 def _download_local(uri):
54 local_path = uri.replace(_LOCAL_PREFIX, "", 1)
55 if not os.path.exists(local_path):
56 raise Exception("Local path %s does not exist." % (uri))
57 return local_path
58
```
Path: `python/kfserving/setup.py`
Content:
```
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup, find_packages
16
17 tests_require = [
18 'pytest',
19 'pytest-tornasync',
20 'mypy'
21 ]
22
23 setup(
24 name='kfserver',
25 version='0.1.0',
26 author_email='[email protected]',
27 license='../../LICENSE.txt',
28 url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',
29 description='Model Server for arbitrary python ML frameworks.',
30 long_description=open('README.md').read(),
31 python_requires='>3.4',
32 packages=find_packages("kfserving"),
33 install_requires=[
34 "tornado >= 1.4.1",
35 "argparse >= 1.4.0",
36 "numpy"
37 ],
38 tests_require=tests_require,
39 extras_require={'test': tests_require}
40 )
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py
--- a/python/kfserving/kfserving/storage.py
+++ b/python/kfserving/kfserving/storage.py
@@ -15,6 +15,10 @@
import logging
import tempfile
import os
+import re
+from minio import Minio
+from google.cloud import storage
+from google.auth import exceptions
_GCS_PREFIX = "gs://"
_S3_PREFIX = "s3://"
@@ -43,11 +47,36 @@
@staticmethod
def _download_s3(uri, temp_dir: str):
- raise NotImplementedError
+ client = Storage._create_minio_client()
+ bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
+ bucket_name = bucket_args[0]
+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
+ objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
+ for obj in objects:
+ # Replace any prefix from the object key with temp_dir
+ subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
+ client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))
@staticmethod
def _download_gcs(uri, temp_dir: str):
- raise NotImplementedError
+ try:
+ storage_client = storage.Client()
+ except exceptions.DefaultCredentialsError as e:
+ storage_client = storage.Client.create_anonymous_client()
+ bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
+ bucket_name = bucket_args[0]
+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
+ bucket = storage_client.bucket(bucket_name)
+ blobs = bucket.list_blobs(prefix=bucket_path)
+ for blob in blobs:
+ # Replace any prefix from the object key with temp_dir
+ subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
+ # Create necessary subdirectory to store the object locally
+ if "/" in subdir_object_key:
+ local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
+ if not os.path.isdir(local_object_dir):
+ os.makedirs(local_object_dir, exist_ok=True)
+ blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))
@staticmethod
def _download_local(uri):
@@ -55,3 +84,13 @@
if not os.path.exists(local_path):
raise Exception("Local path %s does not exist." % (uri))
return local_path
+
+ @staticmethod
+ def _create_minio_client():
+ # Remove possible http scheme for Minio
+ url = re.compile(r"https?://")
+ minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")),
+ access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
+ secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
+ secure=True)
+ return minioClient
diff --git a/python/kfserving/setup.py b/python/kfserving/setup.py
--- a/python/kfserving/setup.py
+++ b/python/kfserving/setup.py
@@ -33,6 +33,8 @@
install_requires=[
"tornado >= 1.4.1",
"argparse >= 1.4.0",
+ "minio >= 4.0.9",
+ "google-cloud-storage >= 1.16.0",
"numpy"
],
tests_require=tests_require,
|
{"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -15,6 +15,10 @@\n import logging\n import tempfile\n import os\n+import re\n+from minio import Minio\n+from google.cloud import storage\n+from google.auth import exceptions\n \n _GCS_PREFIX = \"gs://\"\n _S3_PREFIX = \"s3://\"\n@@ -43,11 +47,36 @@\n \n @staticmethod\n def _download_s3(uri, temp_dir: str):\n- raise NotImplementedError\n+ client = Storage._create_minio_client()\n+ bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n+ bucket_name = bucket_args[0]\n+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n+ objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n+ for obj in objects:\n+ # Replace any prefix from the object key with temp_dir\n+ subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n+ client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))\n \n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n- raise NotImplementedError\n+ try:\n+ storage_client = storage.Client()\n+ except exceptions.DefaultCredentialsError as e:\n+ storage_client = storage.Client.create_anonymous_client()\n+ bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n+ bucket_name = bucket_args[0]\n+ bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n+ bucket = storage_client.bucket(bucket_name)\n+ blobs = bucket.list_blobs(prefix=bucket_path)\n+ for blob in blobs:\n+ # Replace any prefix from the object key with temp_dir\n+ subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n+ # Create necessary subdirectory to store the object locally\n+ if \"/\" in subdir_object_key:\n+ local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n+ if not os.path.isdir(local_object_dir):\n+ os.makedirs(local_object_dir, exist_ok=True)\n+ blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n \n @staticmethod\n def _download_local(uri):\n@@ -55,3 +84,13 @@\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n+\n+ @staticmethod\n+ def _create_minio_client():\n+ # Remove possible http scheme for Minio\n+ url = re.compile(r\"https?://\")\n+ minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n+ access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n+ secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n+ secure=True)\n+ return minioClient\ndiff --git a/python/kfserving/setup.py b/python/kfserving/setup.py\n--- a/python/kfserving/setup.py\n+++ b/python/kfserving/setup.py\n@@ -33,6 +33,8 @@\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n+ \"minio >= 4.0.9\",\n+ \"google-cloud-storage >= 1.16.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n", "issue": "Python Model download for GCS and S3\nDownloading from GCS and S3 needs to be completed.\r\n\r\nhttps://github.com/kubeflow/kfserving/blob/2f8d33d1a9773c5694a22ba749192163251fe287/python/kfserving/kfserving/storage.py#L27-L33\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object):\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\" % uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\" % (uri, temp_dir))\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n raise NotImplementedError\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n raise NotImplementedError\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n", "path": "python/kfserving/kfserving/storage.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='kfserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',\n description='Model Server for arbitrary python ML frameworks.',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"kfserving\"),\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/kfserving/setup.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport tempfile\nimport os\nimport re\nfrom minio import Minio\nfrom google.cloud import storage\nfrom google.auth import exceptions\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object):\n @staticmethod\n def download(uri: str) -> str:\n logging.info(\"Copying contents of %s to local\" % uri)\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n return Storage._download_local(uri)\n\n temp_dir = tempfile.mkdtemp()\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, temp_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, temp_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\" % (uri, temp_dir))\n return temp_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n client.fget_object(bucket_name, obj.object_name, os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError as e:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n blobs = bucket.list_blobs(prefix=bucket_path)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n blob.download_to_filename(os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_local(uri):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n return local_path\n\n @staticmethod\n def _create_minio_client():\n # Remove possible http scheme for Minio\n url = re.compile(r\"https?://\")\n minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n secure=True)\n return minioClient\n", "path": "python/kfserving/kfserving/storage.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='kfserver',\n version='0.1.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/kfserving',\n description='Model Server for arbitrary python ML frameworks.',\n long_description=open('README.md').read(),\n python_requires='>3.4',\n packages=find_packages(\"kfserving\"),\n install_requires=[\n \"tornado >= 1.4.1\",\n \"argparse >= 1.4.0\",\n \"minio >= 4.0.9\",\n \"google-cloud-storage >= 1.16.0\",\n \"numpy\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/kfserving/setup.py"}]}
| 1,318 | 816 |
gh_patches_debug_17010
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-4880
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Alembic migration for metrics table uses incorrect server default
## MLflow Roadmap Item
This is an MLflow Roadmap item that has been prioritized by the MLflow maintainers. We're seeking help with the implementation of roadmap items tagged with the `help wanted` label.
For requirements clarifications and implementation questions, or to request a PR review, please tag @harupy in your communications related to this issue.
### System information
- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: miniconda container - debian buster
- **MLflow installed from (source or binary)**: mflow from pypi
- **MLflow version (run ``mlflow --version``)**: mlflow 1.14.1 trying to upgrade to 1.16 or 1.17
- **Python version**: Python 3.9.2
- **npm version, if running the dev UI**: NA
- **Exact command to reproduce**: mlflow db upgrade <MSSQL connection string >
- Tracking server DB: Azure Microsoft SQL DB
### Describe the problem
When I upgrade the database from 1.14.1 to a higher version I get an error. Currently use an Azure MSFT DB. Would like to upgrade to 1.16 or 1.17
### Code to reproduce issue
mlflow db upgrade "mssql+pyodbc://_rest_of_conn_string"
### Other info / logs
sqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Column already has a DEFAULT bound to it. (1781) (SQLExecDirectW)')
[SQL: ALTER TABLE metrics ADD DEFAULT '0' FOR is_nan]
(Background on this error at: http://sqlalche.me/e/14/f405)
### What component(s), interfaces, languages, and integrations does this bug affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py`
Content:
```
1 """reset_default_value_for_is_nan_in_metrics_table_for_mysql
2
3 Revision ID: c48cb773bb87
4 Revises: 39d1c3be5f05
5 Create Date: 2021-04-02 15:43:28.466043
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = "c48cb773bb87"
14 down_revision = "39d1c3be5f05"
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # This part of the migration is only relevant for MySQL.
21 # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py
22 # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`
23 # table without specifying `existing_server_default`. This alters the column default value to
24 # NULL in MySQL (see the doc below).
25 #
26 # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column
27 #
28 # To revert this change, set the default column value to "0" by specifying `server_default`
29 with op.batch_alter_table("metrics") as batch_op:
30 batch_op.alter_column(
31 "is_nan",
32 type_=sa.types.Boolean(create_constraint=True),
33 nullable=False,
34 server_default="0",
35 )
36
37
38 def downgrade():
39 pass
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py
--- a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py
+++ b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py
@@ -26,13 +26,15 @@
# https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column
#
# To revert this change, set the default column value to "0" by specifying `server_default`
- with op.batch_alter_table("metrics") as batch_op:
- batch_op.alter_column(
- "is_nan",
- type_=sa.types.Boolean(create_constraint=True),
- nullable=False,
- server_default="0",
- )
+ bind = op.get_bind()
+ if bind.engine.name == "mysql":
+ with op.batch_alter_table("metrics") as batch_op:
+ batch_op.alter_column(
+ "is_nan",
+ type_=sa.types.Boolean(create_constraint=True),
+ nullable=False,
+ server_default="0",
+ )
def downgrade():
|
{"golden_diff": "diff --git a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n--- a/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n+++ b/mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py\n@@ -26,13 +26,15 @@\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n- with op.batch_alter_table(\"metrics\") as batch_op:\n- batch_op.alter_column(\n- \"is_nan\",\n- type_=sa.types.Boolean(create_constraint=True),\n- nullable=False,\n- server_default=\"0\",\n- )\n+ bind = op.get_bind()\n+ if bind.engine.name == \"mysql\":\n+ with op.batch_alter_table(\"metrics\") as batch_op:\n+ batch_op.alter_column(\n+ \"is_nan\",\n+ type_=sa.types.Boolean(create_constraint=True),\n+ nullable=False,\n+ server_default=\"0\",\n+ )\n \n \n def downgrade():\n", "issue": "[BUG] Alembic migration for metrics table uses incorrect server default\n## MLflow Roadmap Item\r\n\r\nThis is an MLflow Roadmap item that has been prioritized by the MLflow maintainers. We're seeking help with the implementation of roadmap items tagged with the `help wanted` label.\r\n\r\nFor requirements clarifications and implementation questions, or to request a PR review, please tag @harupy in your communications related to this issue.\r\n\r\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: miniconda container - debian buster\r\n- **MLflow installed from (source or binary)**: mflow from pypi\r\n- **MLflow version (run ``mlflow --version``)**: mlflow 1.14.1 trying to upgrade to 1.16 or 1.17\r\n- **Python version**: Python 3.9.2\r\n- **npm version, if running the dev UI**: NA\r\n- **Exact command to reproduce**: mlflow db upgrade <MSSQL connection string >\r\n- Tracking server DB: Azure Microsoft SQL DB\r\n\r\n### Describe the problem\r\nWhen I upgrade the database from 1.14.1 to a higher version I get an error. Currently use an Azure MSFT DB. Would like to upgrade to 1.16 or 1.17\r\n\r\n### Code to reproduce issue\r\nmlflow db upgrade \"mssql+pyodbc://_rest_of_conn_string\"\r\n\r\n### Other info / logs\r\nsqlalchemy.exc.ProgrammingError: (pyodbc.ProgrammingError) ('42000', '[42000] [Microsoft][ODBC Driver 17 for SQL Server][SQL Server]Column already has a DEFAULT bound to it. (1781) (SQLExecDirectW)')\r\n[SQL: ALTER TABLE metrics ADD DEFAULT '0' FOR is_nan]\r\n(Background on this error at: http://sqlalche.me/e/14/f405)\r\n\r\n\r\n### What component(s), interfaces, languages, and integrations does this bug affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [x] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [ ] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [x] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\n", "before_files": [{"content": "\"\"\"reset_default_value_for_is_nan_in_metrics_table_for_mysql\n\nRevision ID: c48cb773bb87\nRevises: 39d1c3be5f05\nCreate Date: 2021-04-02 15:43:28.466043\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c48cb773bb87\"\ndown_revision = \"39d1c3be5f05\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # This part of the migration is only relevant for MySQL.\n # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py\n # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`\n # table without specifying `existing_server_default`. This alters the column default value to\n # NULL in MySQL (see the doc below).\n #\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n with op.batch_alter_table(\"metrics\") as batch_op:\n batch_op.alter_column(\n \"is_nan\",\n type_=sa.types.Boolean(create_constraint=True),\n nullable=False,\n server_default=\"0\",\n )\n\n\ndef downgrade():\n pass\n", "path": "mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py"}], "after_files": [{"content": "\"\"\"reset_default_value_for_is_nan_in_metrics_table_for_mysql\n\nRevision ID: c48cb773bb87\nRevises: 39d1c3be5f05\nCreate Date: 2021-04-02 15:43:28.466043\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c48cb773bb87\"\ndown_revision = \"39d1c3be5f05\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # This part of the migration is only relevant for MySQL.\n # In 39d1c3be5f05_add_is_nan_constraint_for_metrics_tables_if_necessary.py\n # (added in MLflow 1.15.0), `alter_column` is called on the `is_nan` column in the `metrics`\n # table without specifying `existing_server_default`. This alters the column default value to\n # NULL in MySQL (see the doc below).\n #\n # https://alembic.sqlalchemy.org/en/latest/ops.html#alembic.operations.Operations.alter_column\n #\n # To revert this change, set the default column value to \"0\" by specifying `server_default`\n bind = op.get_bind()\n if bind.engine.name == \"mysql\":\n with op.batch_alter_table(\"metrics\") as batch_op:\n batch_op.alter_column(\n \"is_nan\",\n type_=sa.types.Boolean(create_constraint=True),\n nullable=False,\n server_default=\"0\",\n )\n\n\ndef downgrade():\n pass\n", "path": "mlflow/store/db_migrations/versions/c48cb773bb87_reset_default_value_for_is_nan_in_metrics_table_for_mysql.py"}]}
| 1,513 | 328 |
gh_patches_debug_24334
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-3688
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[reply refactor] Allow journalists to download replies from journalist interface
After #3673 is implemented, we should allow journalists to download replies from the journalist interface UI. Note that for long-running SecureDrop instances, there will be old replies encrypted only to the source key that should be unavailable for download.
Epic: #3097
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist_app/col.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from flask import (Blueprint, redirect, url_for, render_template, flash,
4 request, abort, send_file, current_app)
5 from flask_babel import gettext
6 from sqlalchemy.orm.exc import NoResultFound
7
8 from db import db
9 from models import Submission
10 from journalist_app.forms import ReplyForm
11 from journalist_app.utils import (make_star_true, make_star_false, get_source,
12 delete_collection, col_download_unread,
13 col_download_all, col_star, col_un_star,
14 col_delete)
15
16
17 def make_blueprint(config):
18 view = Blueprint('col', __name__)
19
20 @view.route('/add_star/<filesystem_id>', methods=('POST',))
21 def add_star(filesystem_id):
22 make_star_true(filesystem_id)
23 db.session.commit()
24 return redirect(url_for('main.index'))
25
26 @view.route("/remove_star/<filesystem_id>", methods=('POST',))
27 def remove_star(filesystem_id):
28 make_star_false(filesystem_id)
29 db.session.commit()
30 return redirect(url_for('main.index'))
31
32 @view.route('/<filesystem_id>')
33 def col(filesystem_id):
34 form = ReplyForm()
35 source = get_source(filesystem_id)
36 source.has_key = current_app.crypto_util.getkey(filesystem_id)
37 return render_template("col.html", filesystem_id=filesystem_id,
38 source=source, form=form)
39
40 @view.route('/delete/<filesystem_id>', methods=('POST',))
41 def delete_single(filesystem_id):
42 """deleting a single collection from its /col page"""
43 source = get_source(filesystem_id)
44 delete_collection(filesystem_id)
45 flash(gettext("{source_name}'s collection deleted")
46 .format(source_name=source.journalist_designation),
47 "notification")
48 return redirect(url_for('main.index'))
49
50 @view.route('/process', methods=('POST',))
51 def process():
52 actions = {'download-unread': col_download_unread,
53 'download-all': col_download_all, 'star': col_star,
54 'un-star': col_un_star, 'delete': col_delete}
55 if 'cols_selected' not in request.form:
56 flash(gettext('No collections selected.'), 'error')
57 return redirect(url_for('main.index'))
58
59 # getlist is cgi.FieldStorage.getlist
60 cols_selected = request.form.getlist('cols_selected')
61 action = request.form['action']
62
63 if action not in actions:
64 return abort(500)
65
66 method = actions[action]
67 return method(cols_selected)
68
69 @view.route('/<filesystem_id>/<fn>')
70 def download_single_submission(filesystem_id, fn):
71 """Sends a client the contents of a single submission."""
72 if '..' in fn or fn.startswith('/'):
73 abort(404)
74
75 try:
76 Submission.query.filter(
77 Submission.filename == fn).one().downloaded = True
78 db.session.commit()
79 except NoResultFound as e:
80 current_app.logger.error(
81 "Could not mark " + fn + " as downloaded: %s" % (e,))
82
83 return send_file(current_app.storage.path(filesystem_id, fn),
84 mimetype="application/pgp-encrypted")
85
86 return view
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py
--- a/securedrop/journalist_app/col.py
+++ b/securedrop/journalist_app/col.py
@@ -67,18 +67,21 @@
return method(cols_selected)
@view.route('/<filesystem_id>/<fn>')
- def download_single_submission(filesystem_id, fn):
- """Sends a client the contents of a single submission."""
+ def download_single_file(filesystem_id, fn):
+ """Sends a client the contents of a single file, either a submission
+ or a journalist reply"""
if '..' in fn or fn.startswith('/'):
abort(404)
- try:
- Submission.query.filter(
- Submission.filename == fn).one().downloaded = True
- db.session.commit()
- except NoResultFound as e:
- current_app.logger.error(
- "Could not mark " + fn + " as downloaded: %s" % (e,))
+ # only mark as read when it's a submission (and not a journalist reply)
+ if not fn.endswith('reply.gpg'):
+ try:
+ Submission.query.filter(
+ Submission.filename == fn).one().downloaded = True
+ db.session.commit()
+ except NoResultFound as e:
+ current_app.logger.error(
+ "Could not mark " + fn + " as downloaded: %s" % (e,))
return send_file(current_app.storage.path(filesystem_id, fn),
mimetype="application/pgp-encrypted")
|
{"golden_diff": "diff --git a/securedrop/journalist_app/col.py b/securedrop/journalist_app/col.py\n--- a/securedrop/journalist_app/col.py\n+++ b/securedrop/journalist_app/col.py\n@@ -67,18 +67,21 @@\n return method(cols_selected)\n \n @view.route('/<filesystem_id>/<fn>')\n- def download_single_submission(filesystem_id, fn):\n- \"\"\"Sends a client the contents of a single submission.\"\"\"\n+ def download_single_file(filesystem_id, fn):\n+ \"\"\"Sends a client the contents of a single file, either a submission\n+ or a journalist reply\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n \n- try:\n- Submission.query.filter(\n- Submission.filename == fn).one().downloaded = True\n- db.session.commit()\n- except NoResultFound as e:\n- current_app.logger.error(\n- \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n+ # only mark as read when it's a submission (and not a journalist reply)\n+ if not fn.endswith('reply.gpg'):\n+ try:\n+ Submission.query.filter(\n+ Submission.filename == fn).one().downloaded = True\n+ db.session.commit()\n+ except NoResultFound as e:\n+ current_app.logger.error(\n+ \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n \n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n", "issue": "[reply refactor] Allow journalists to download replies from journalist interface\nAfter #3673 is implemented, we should allow journalists to download replies from the journalist interface UI. Note that for long-running SecureDrop instances, there will be old replies encrypted only to the source key that should be unavailable for download.\r\n\r\nEpic: #3097\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, redirect, url_for, render_template, flash,\n request, abort, send_file, current_app)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete)\n\n\ndef make_blueprint(config):\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id):\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id):\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id):\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.getkey(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n delete_collection(filesystem_id)\n flash(gettext(\"{source_name}'s collection deleted\")\n .format(source_name=source.journalist_designation),\n \"notification\")\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process():\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete}\n if 'cols_selected' not in request.form:\n flash(gettext('No collections selected.'), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_submission(filesystem_id, fn):\n \"\"\"Sends a client the contents of a single submission.\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n try:\n Submission.query.filter(\n Submission.filename == fn).one().downloaded = True\n db.session.commit()\n except NoResultFound as e:\n current_app.logger.error(\n \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, redirect, url_for, render_template, flash,\n request, abort, send_file, current_app)\nfrom flask_babel import gettext\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom db import db\nfrom models import Submission\nfrom journalist_app.forms import ReplyForm\nfrom journalist_app.utils import (make_star_true, make_star_false, get_source,\n delete_collection, col_download_unread,\n col_download_all, col_star, col_un_star,\n col_delete)\n\n\ndef make_blueprint(config):\n view = Blueprint('col', __name__)\n\n @view.route('/add_star/<filesystem_id>', methods=('POST',))\n def add_star(filesystem_id):\n make_star_true(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route(\"/remove_star/<filesystem_id>\", methods=('POST',))\n def remove_star(filesystem_id):\n make_star_false(filesystem_id)\n db.session.commit()\n return redirect(url_for('main.index'))\n\n @view.route('/<filesystem_id>')\n def col(filesystem_id):\n form = ReplyForm()\n source = get_source(filesystem_id)\n source.has_key = current_app.crypto_util.getkey(filesystem_id)\n return render_template(\"col.html\", filesystem_id=filesystem_id,\n source=source, form=form)\n\n @view.route('/delete/<filesystem_id>', methods=('POST',))\n def delete_single(filesystem_id):\n \"\"\"deleting a single collection from its /col page\"\"\"\n source = get_source(filesystem_id)\n delete_collection(filesystem_id)\n flash(gettext(\"{source_name}'s collection deleted\")\n .format(source_name=source.journalist_designation),\n \"notification\")\n return redirect(url_for('main.index'))\n\n @view.route('/process', methods=('POST',))\n def process():\n actions = {'download-unread': col_download_unread,\n 'download-all': col_download_all, 'star': col_star,\n 'un-star': col_un_star, 'delete': col_delete}\n if 'cols_selected' not in request.form:\n flash(gettext('No collections selected.'), 'error')\n return redirect(url_for('main.index'))\n\n # getlist is cgi.FieldStorage.getlist\n cols_selected = request.form.getlist('cols_selected')\n action = request.form['action']\n\n if action not in actions:\n return abort(500)\n\n method = actions[action]\n return method(cols_selected)\n\n @view.route('/<filesystem_id>/<fn>')\n def download_single_file(filesystem_id, fn):\n \"\"\"Sends a client the contents of a single file, either a submission\n or a journalist reply\"\"\"\n if '..' in fn or fn.startswith('/'):\n abort(404)\n\n # only mark as read when it's a submission (and not a journalist reply)\n if not fn.endswith('reply.gpg'):\n try:\n Submission.query.filter(\n Submission.filename == fn).one().downloaded = True\n db.session.commit()\n except NoResultFound as e:\n current_app.logger.error(\n \"Could not mark \" + fn + \" as downloaded: %s\" % (e,))\n\n return send_file(current_app.storage.path(filesystem_id, fn),\n mimetype=\"application/pgp-encrypted\")\n\n return view\n", "path": "securedrop/journalist_app/col.py"}]}
| 1,181 | 354 |
gh_patches_debug_42292
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-178
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add metrics for counting to eval.py
We should add a new metric to the detection evaluation script so that it computes how close the counts are compared to ground truth.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/rv/detection/commands/eval_predictions.py`
Content:
```
1 import json
2 from os.path import join
3
4 import numpy as np
5 import rasterio
6 import click
7
8 from object_detection.utils import object_detection_evaluation, label_map_util
9
10 from rv.utils import (
11 download_if_needed, make_empty_dir, get_local_path, upload_if_needed,
12 get_boxes_from_geojson, download_and_build_vrt)
13 from rv.detection.commands.settings import max_num_classes, temp_root_dir
14
15
16 def get_eval_result(ground_truth_path, predictions_path, image_dataset):
17 gt_boxes, gt_classes, _ = \
18 get_boxes_from_geojson(ground_truth_path, image_dataset)
19 # Subtract one because class id's start at 1, but evaluation api assumes
20 # the start at 0. You might think we could just write the label_map.pbtxt
21 # so the class ids start at 0, but that throws an exception.
22 gt_classes -= 1
23
24 pred_boxes, pred_classes, pred_scores = \
25 get_boxes_from_geojson(predictions_path, image_dataset)
26 pred_classes -= 1
27
28 nb_gt_classes = len(set(gt_classes))
29 od_eval = object_detection_evaluation.ObjectDetectionEvaluation(
30 nb_gt_classes, matching_iou_threshold=0.1)
31 image_key = 'image'
32 od_eval.add_single_ground_truth_image_info(
33 image_key, gt_boxes, gt_classes)
34 od_eval.add_single_detected_image_info(
35 image_key, pred_boxes, pred_scores, pred_classes)
36
37 od_eval.evaluate()
38 return od_eval.get_eval_result()
39
40
41 def write_results(output_path, label_map_path, eval_result):
42 label_map = label_map_util.load_labelmap(label_map_path)
43 categories = label_map_util.convert_label_map_to_categories(
44 label_map, max_num_classes=max_num_classes, use_display_name=True)
45 category_index = label_map_util.create_category_index(categories)
46
47 results = []
48 for class_id in range(1, len(category_index) + 1):
49 class_name = category_index[class_id]['name']
50 # Subtract one to account for fact that class id's start at 1.
51 # precisions and recalls are lists with one element for each
52 # predicted box, assuming they are sorted by score. Each element is
53 # the precision or recall assuming that all predicted boxes with that
54 # score or above are used. So, the last element is the value assuming
55 # that all predictions are used.
56
57 precisions = eval_result.precisions[class_id - 1]
58 recalls = eval_result.recalls[class_id - 1]
59 # Get precision and recall assuming all predicted boxes are used.
60 class_results = {
61 'name': class_name,
62 'precision': precisions[-1],
63 'recall': recalls[-1]
64 }
65 results.append(class_results)
66
67 with open(output_path, 'w') as output_file:
68 output_file.write(json.dumps(results, indent=4))
69
70
71 def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,
72 predictions_uri, output_uri):
73 temp_dir = join(temp_root_dir, 'eval_predictions')
74 make_empty_dir(temp_dir)
75
76 image_path = download_and_build_vrt(temp_dir, image_uris)
77 image_dataset = rasterio.open(image_path)
78
79 ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)
80 predictions_path = download_if_needed(temp_dir, predictions_uri)
81 label_map_path = download_if_needed(temp_dir, label_map_uri)
82
83 eval_result = get_eval_result(
84 ground_truth_path, predictions_path, image_dataset)
85
86 output_path = get_local_path(temp_dir, output_uri)
87 write_results(output_path, label_map_path, eval_result)
88 upload_if_needed(output_path, output_uri)
89
90
91 @click.command()
92 @click.argument('image_uris', nargs=-1)
93 @click.argument('label_map_uri')
94 @click.argument('ground_truth_uri')
95 @click.argument('predictions_uri')
96 @click.argument('output_uri')
97 def eval_predictions(image_uris, label_map_uri, ground_truth_uri,
98 predictions_uri, output_uri):
99 """Evaluate predictions against ground truth for a single predictions file.
100
101 Args:
102 ground_truth_uri: GeoJSON file with ground truth bounding boxes
103 predictions_uri: GeoJSON file with predicted bounding boxes
104 output_uri: JSON file with metrics
105 """
106 _eval_predictions(image_uris, label_map_uri, ground_truth_uri,
107 predictions_uri, output_uri)
108
109
110 if __name__ == '__main__':
111 eval_predictions()
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/rv/detection/commands/eval_predictions.py b/src/rv/detection/commands/eval_predictions.py
--- a/src/rv/detection/commands/eval_predictions.py
+++ b/src/rv/detection/commands/eval_predictions.py
@@ -1,7 +1,6 @@
import json
-from os.path import join
+from os.path import join, dirname
-import numpy as np
import rasterio
import click
@@ -13,7 +12,7 @@
from rv.detection.commands.settings import max_num_classes, temp_root_dir
-def get_eval_result(ground_truth_path, predictions_path, image_dataset):
+def get_od_eval(ground_truth_path, predictions_path, image_dataset):
gt_boxes, gt_classes, _ = \
get_boxes_from_geojson(ground_truth_path, image_dataset)
# Subtract one because class id's start at 1, but evaluation api assumes
@@ -35,10 +34,12 @@
image_key, pred_boxes, pred_scores, pred_classes)
od_eval.evaluate()
- return od_eval.get_eval_result()
+ return od_eval
-def write_results(output_path, label_map_path, eval_result):
+def write_results(output_path, label_map_path, od_eval):
+ make_empty_dir(dirname(output_path), empty_dir=False)
+
label_map = label_map_util.load_labelmap(label_map_path)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=max_num_classes, use_display_name=True)
@@ -53,19 +54,30 @@
# the precision or recall assuming that all predicted boxes with that
# score or above are used. So, the last element is the value assuming
# that all predictions are used.
-
+ eval_result = od_eval.get_eval_result()
precisions = eval_result.precisions[class_id - 1]
recalls = eval_result.recalls[class_id - 1]
# Get precision and recall assuming all predicted boxes are used.
+ precision = precisions[-1]
+ recall = recalls[-1]
+ f1 = (2 * precision * recall) / (precision + recall)
+
+ gt_count = od_eval.num_gt_instances_per_class[class_id -1]
+ pred_count = len(recalls)
+ count_error = pred_count - gt_count
+ norm_count_error = count_error / gt_count
+
class_results = {
'name': class_name,
- 'precision': precisions[-1],
- 'recall': recalls[-1]
+ 'precision': precision,
+ 'recall': recall,
+ 'f1': f1,
+ 'norm_count_error': norm_count_error
}
results.append(class_results)
with open(output_path, 'w') as output_file:
- output_file.write(json.dumps(results, indent=4))
+ output_file.write(json.dumps(results, indent=4, sort_keys=True))
def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,
@@ -80,11 +92,11 @@
predictions_path = download_if_needed(temp_dir, predictions_uri)
label_map_path = download_if_needed(temp_dir, label_map_uri)
- eval_result = get_eval_result(
+ od_eval = get_od_eval(
ground_truth_path, predictions_path, image_dataset)
output_path = get_local_path(temp_dir, output_uri)
- write_results(output_path, label_map_path, eval_result)
+ write_results(output_path, label_map_path, od_eval)
upload_if_needed(output_path, output_uri)
|
{"golden_diff": "diff --git a/src/rv/detection/commands/eval_predictions.py b/src/rv/detection/commands/eval_predictions.py\n--- a/src/rv/detection/commands/eval_predictions.py\n+++ b/src/rv/detection/commands/eval_predictions.py\n@@ -1,7 +1,6 @@\n import json\n-from os.path import join\n+from os.path import join, dirname\n \n-import numpy as np\n import rasterio\n import click\n \n@@ -13,7 +12,7 @@\n from rv.detection.commands.settings import max_num_classes, temp_root_dir\n \n \n-def get_eval_result(ground_truth_path, predictions_path, image_dataset):\n+def get_od_eval(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n@@ -35,10 +34,12 @@\n image_key, pred_boxes, pred_scores, pred_classes)\n \n od_eval.evaluate()\n- return od_eval.get_eval_result()\n+ return od_eval\n \n \n-def write_results(output_path, label_map_path, eval_result):\n+def write_results(output_path, label_map_path, od_eval):\n+ make_empty_dir(dirname(output_path), empty_dir=False)\n+\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n@@ -53,19 +54,30 @@\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n-\n+ eval_result = od_eval.get_eval_result()\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n+ precision = precisions[-1]\n+ recall = recalls[-1]\n+ f1 = (2 * precision * recall) / (precision + recall)\n+\n+ gt_count = od_eval.num_gt_instances_per_class[class_id -1]\n+ pred_count = len(recalls)\n+ count_error = pred_count - gt_count\n+ norm_count_error = count_error / gt_count\n+\n class_results = {\n 'name': class_name,\n- 'precision': precisions[-1],\n- 'recall': recalls[-1]\n+ 'precision': precision,\n+ 'recall': recall,\n+ 'f1': f1,\n+ 'norm_count_error': norm_count_error\n }\n results.append(class_results)\n \n with open(output_path, 'w') as output_file:\n- output_file.write(json.dumps(results, indent=4))\n+ output_file.write(json.dumps(results, indent=4, sort_keys=True))\n \n \n def _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n@@ -80,11 +92,11 @@\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n \n- eval_result = get_eval_result(\n+ od_eval = get_od_eval(\n ground_truth_path, predictions_path, image_dataset)\n \n output_path = get_local_path(temp_dir, output_uri)\n- write_results(output_path, label_map_path, eval_result)\n+ write_results(output_path, label_map_path, od_eval)\n upload_if_needed(output_path, output_uri)\n", "issue": "Add metrics for counting to eval.py\nWe should add a new metric to the detection evaluation script so that it computes how close the counts are compared to ground truth.\n", "before_files": [{"content": "import json\nfrom os.path import join\n\nimport numpy as np\nimport rasterio\nimport click\n\nfrom object_detection.utils import object_detection_evaluation, label_map_util\n\nfrom rv.utils import (\n download_if_needed, make_empty_dir, get_local_path, upload_if_needed,\n get_boxes_from_geojson, download_and_build_vrt)\nfrom rv.detection.commands.settings import max_num_classes, temp_root_dir\n\n\ndef get_eval_result(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n # the start at 0. You might think we could just write the label_map.pbtxt\n # so the class ids start at 0, but that throws an exception.\n gt_classes -= 1\n\n pred_boxes, pred_classes, pred_scores = \\\n get_boxes_from_geojson(predictions_path, image_dataset)\n pred_classes -= 1\n\n nb_gt_classes = len(set(gt_classes))\n od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n nb_gt_classes, matching_iou_threshold=0.1)\n image_key = 'image'\n od_eval.add_single_ground_truth_image_info(\n image_key, gt_boxes, gt_classes)\n od_eval.add_single_detected_image_info(\n image_key, pred_boxes, pred_scores, pred_classes)\n\n od_eval.evaluate()\n return od_eval.get_eval_result()\n\n\ndef write_results(output_path, label_map_path, eval_result):\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n results = []\n for class_id in range(1, len(category_index) + 1):\n class_name = category_index[class_id]['name']\n # Subtract one to account for fact that class id's start at 1.\n # precisions and recalls are lists with one element for each\n # predicted box, assuming they are sorted by score. Each element is\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n class_results = {\n 'name': class_name,\n 'precision': precisions[-1],\n 'recall': recalls[-1]\n }\n results.append(class_results)\n\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(results, indent=4))\n\n\ndef _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n temp_dir = join(temp_root_dir, 'eval_predictions')\n make_empty_dir(temp_dir)\n\n image_path = download_and_build_vrt(temp_dir, image_uris)\n image_dataset = rasterio.open(image_path)\n\n ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n\n eval_result = get_eval_result(\n ground_truth_path, predictions_path, image_dataset)\n\n output_path = get_local_path(temp_dir, output_uri)\n write_results(output_path, label_map_path, eval_result)\n upload_if_needed(output_path, output_uri)\n\n\[email protected]()\[email protected]('image_uris', nargs=-1)\[email protected]('label_map_uri')\[email protected]('ground_truth_uri')\[email protected]('predictions_uri')\[email protected]('output_uri')\ndef eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n \"\"\"Evaluate predictions against ground truth for a single predictions file.\n\n Args:\n ground_truth_uri: GeoJSON file with ground truth bounding boxes\n predictions_uri: GeoJSON file with predicted bounding boxes\n output_uri: JSON file with metrics\n \"\"\"\n _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri)\n\n\nif __name__ == '__main__':\n eval_predictions()\n", "path": "src/rv/detection/commands/eval_predictions.py"}], "after_files": [{"content": "import json\nfrom os.path import join, dirname\n\nimport rasterio\nimport click\n\nfrom object_detection.utils import object_detection_evaluation, label_map_util\n\nfrom rv.utils import (\n download_if_needed, make_empty_dir, get_local_path, upload_if_needed,\n get_boxes_from_geojson, download_and_build_vrt)\nfrom rv.detection.commands.settings import max_num_classes, temp_root_dir\n\n\ndef get_od_eval(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_classes, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n # Subtract one because class id's start at 1, but evaluation api assumes\n # the start at 0. You might think we could just write the label_map.pbtxt\n # so the class ids start at 0, but that throws an exception.\n gt_classes -= 1\n\n pred_boxes, pred_classes, pred_scores = \\\n get_boxes_from_geojson(predictions_path, image_dataset)\n pred_classes -= 1\n\n nb_gt_classes = len(set(gt_classes))\n od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n nb_gt_classes, matching_iou_threshold=0.1)\n image_key = 'image'\n od_eval.add_single_ground_truth_image_info(\n image_key, gt_boxes, gt_classes)\n od_eval.add_single_detected_image_info(\n image_key, pred_boxes, pred_scores, pred_classes)\n\n od_eval.evaluate()\n return od_eval\n\n\ndef write_results(output_path, label_map_path, od_eval):\n make_empty_dir(dirname(output_path), empty_dir=False)\n\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n results = []\n for class_id in range(1, len(category_index) + 1):\n class_name = category_index[class_id]['name']\n # Subtract one to account for fact that class id's start at 1.\n # precisions and recalls are lists with one element for each\n # predicted box, assuming they are sorted by score. Each element is\n # the precision or recall assuming that all predicted boxes with that\n # score or above are used. So, the last element is the value assuming\n # that all predictions are used.\n eval_result = od_eval.get_eval_result()\n precisions = eval_result.precisions[class_id - 1]\n recalls = eval_result.recalls[class_id - 1]\n # Get precision and recall assuming all predicted boxes are used.\n precision = precisions[-1]\n recall = recalls[-1]\n f1 = (2 * precision * recall) / (precision + recall)\n\n gt_count = od_eval.num_gt_instances_per_class[class_id -1]\n pred_count = len(recalls)\n count_error = pred_count - gt_count\n norm_count_error = count_error / gt_count\n\n class_results = {\n 'name': class_name,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'norm_count_error': norm_count_error\n }\n results.append(class_results)\n\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(results, indent=4, sort_keys=True))\n\n\ndef _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n temp_dir = join(temp_root_dir, 'eval_predictions')\n make_empty_dir(temp_dir)\n\n image_path = download_and_build_vrt(temp_dir, image_uris)\n image_dataset = rasterio.open(image_path)\n\n ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n\n od_eval = get_od_eval(\n ground_truth_path, predictions_path, image_dataset)\n\n output_path = get_local_path(temp_dir, output_uri)\n write_results(output_path, label_map_path, od_eval)\n upload_if_needed(output_path, output_uri)\n\n\[email protected]()\[email protected]('image_uris', nargs=-1)\[email protected]('label_map_uri')\[email protected]('ground_truth_uri')\[email protected]('predictions_uri')\[email protected]('output_uri')\ndef eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri):\n \"\"\"Evaluate predictions against ground truth for a single predictions file.\n\n Args:\n ground_truth_uri: GeoJSON file with ground truth bounding boxes\n predictions_uri: GeoJSON file with predicted bounding boxes\n output_uri: JSON file with metrics\n \"\"\"\n _eval_predictions(image_uris, label_map_uri, ground_truth_uri,\n predictions_uri, output_uri)\n\n\nif __name__ == '__main__':\n eval_predictions()\n", "path": "src/rv/detection/commands/eval_predictions.py"}]}
| 1,475 | 780 |
gh_patches_debug_524
|
rasdani/github-patches
|
git_diff
|
encode__uvicorn-660
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reload Behavior: Documentation != Code, Unnecessary Reloads
Hey! I upgraded to 0.11.4 shortly after release today, and it has a couple of quirks.
1. The documentation [here](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-1819b1daaccb3d358620ade9c67e9118R22) says "python file changes" but the code says ["all non-dotfile changes"](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-b0da863c7164698a2ef0fa805e4a9197R40).
2. That behavior, while from the test cases seems to be intended to roll up things like `.graphql` files, also unfortunately rolls up `.pyc` files, meaning every restart is a double restart:
```
WARNING: Detected file change in 'app/main.py'. Reloading...
INFO: Shutting down
INFO: Waiting for application shutdown.
INFO: Application shutdown complete.
INFO: Finished server process [87024]
INFO: Started server process [87080]
INFO: Waiting for application startup.
INFO: Application startup complete.
WARNING: Detected file change in 'app/__pycache__/main.cpython-37.pyc'. Reloading...
INFO: Shutting down
INFO: Waiting for application shutdown.
INFO: Application shutdown complete.
INFO: Finished server process [87080]
INFO: Started server process [87093]
INFO: Waiting for application startup.
INFO: Application startup complete.
```
It might be better to use [Path.glob](https://docs.python.org/3/library/pathlib.html#pathlib.Path.glob) so users can specify file extensions and paths more explicitly than with `os.walk`, but it's published already so maybe as another flag?
3. A minor point, but worth noting in the docs: `--reload_dir` on the CLI becomes `reload_dirs=['my_dir']` in code: `uvicorn.run('app.main:app', host="0.0.0.0", port=8000, reload=True, reload_dirs=['app'])`
Thanks for making this great library!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/__init__.py`
Content:
```
1 from uvicorn.config import Config
2 from uvicorn.main import Server, main, run
3
4 __version__ = "0.11.4"
5 __all__ = ["main", "run", "Config", "Server"]
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py
--- a/uvicorn/__init__.py
+++ b/uvicorn/__init__.py
@@ -1,5 +1,5 @@
from uvicorn.config import Config
from uvicorn.main import Server, main, run
-__version__ = "0.11.4"
+__version__ = "0.11.5"
__all__ = ["main", "run", "Config", "Server"]
|
{"golden_diff": "diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py\n--- a/uvicorn/__init__.py\n+++ b/uvicorn/__init__.py\n@@ -1,5 +1,5 @@\n from uvicorn.config import Config\n from uvicorn.main import Server, main, run\n \n-__version__ = \"0.11.4\"\n+__version__ = \"0.11.5\"\n __all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "issue": "Reload Behavior: Documentation != Code, Unnecessary Reloads\nHey! I upgraded to 0.11.4 shortly after release today, and it has a couple of quirks.\r\n\r\n1. The documentation [here](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-1819b1daaccb3d358620ade9c67e9118R22) says \"python file changes\" but the code says [\"all non-dotfile changes\"](https://github.com/encode/uvicorn/compare/0.11.3...0.11.4#diff-b0da863c7164698a2ef0fa805e4a9197R40).\r\n2. That behavior, while from the test cases seems to be intended to roll up things like `.graphql` files, also unfortunately rolls up `.pyc` files, meaning every restart is a double restart:\r\n\r\n```\r\nWARNING: Detected file change in 'app/main.py'. Reloading...\r\nINFO: Shutting down\r\nINFO: Waiting for application shutdown.\r\nINFO: Application shutdown complete.\r\nINFO: Finished server process [87024]\r\nINFO: Started server process [87080]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nWARNING: Detected file change in 'app/__pycache__/main.cpython-37.pyc'. Reloading...\r\nINFO: Shutting down\r\nINFO: Waiting for application shutdown.\r\nINFO: Application shutdown complete.\r\nINFO: Finished server process [87080]\r\nINFO: Started server process [87093]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\n```\r\n\r\nIt might be better to use [Path.glob](https://docs.python.org/3/library/pathlib.html#pathlib.Path.glob) so users can specify file extensions and paths more explicitly than with `os.walk`, but it's published already so maybe as another flag?\r\n\r\n3. A minor point, but worth noting in the docs: `--reload_dir` on the CLI becomes `reload_dirs=['my_dir']` in code: `uvicorn.run('app.main:app', host=\"0.0.0.0\", port=8000, reload=True, reload_dirs=['app'])`\r\n\r\nThanks for making this great library!\n", "before_files": [{"content": "from uvicorn.config import Config\nfrom uvicorn.main import Server, main, run\n\n__version__ = \"0.11.4\"\n__all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "path": "uvicorn/__init__.py"}], "after_files": [{"content": "from uvicorn.config import Config\nfrom uvicorn.main import Server, main, run\n\n__version__ = \"0.11.5\"\n__all__ = [\"main\", \"run\", \"Config\", \"Server\"]\n", "path": "uvicorn/__init__.py"}]}
| 840 | 110 |
gh_patches_debug_64390
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-2150
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider tmobile_us is broken
During the global build at 2021-06-30-14-42-26, spider **tmobile_us** failed with **7563 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/tmobile_us.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/tmobile_us.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 from urllib.parse import urlencode
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Monday': 'Mo',
11 'Tuesday': 'Tu',
12 'Wednesday': 'We',
13 'Thursday': 'Th',
14 'Friday': 'Fr',
15 'Saturday': 'Sa',
16 'Sunday': 'Su'}
17
18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
19
20
21 class TMobileUSSpider(scrapy.Spider):
22 name = "tmobile_us"
23 item_attributes = { 'brand': "T-Mobile" }
24 allowed_domains = ["www.t-mobile.com"]
25 download_delay = 0.2
26
27 def parse_hours(self, store_hours):
28 opening_hours = OpeningHours()
29 if store_hours is None:
30 return
31
32 for store_day in store_hours:
33 day = DAY_MAPPING[store_day.get("day")]
34 open_time = store_day.get("opens")
35 close_time = store_day.get("closes")
36 if open_time is None and close_time is None:
37 continue
38 opening_hours.add_range(day=day,
39 open_time=open_time,
40 close_time=close_time,
41 time_format='%H:%M'
42 )
43
44 return opening_hours.as_opening_hours()
45
46 def start_requests(self):
47 url = BASE_URL
48
49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
50
51 next(points) # Ignore the header
52 for point in points:
53 _, lat, lon = point.strip().split(',')
54
55 params = {
56 'latitude': '{}'.format(lat),
57 'longitude': '{}'.format(lon),
58 'count': '1000',
59 'radius': '25',
60 'ignoreLoadingBar': 'false'
61 }
62
63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse)
64
65 def parse(self, response):
66 data = json.loads(response.body_as_unicode())
67
68 for store in data:
69 properties = {
70 'name': store["name"],
71 'ref': store["id"],
72 'addr_full': store["location"]["address"]["streetAddress"],
73 'city': store["location"]["address"]["addressLocality"],
74 'state': store["location"]["address"]["addressRegion"],
75 'postcode': store["location"]["address"]["postalCode"],
76 'phone': store.get("telephone"),
77 'website': store.get("url") or response.url,
78 'lat': float(store["location"]["latitude"]),
79 'lon': float(store["location"]["longitude"]),
80 }
81
82 hours = self.parse_hours(store.get("hours", []))
83 if hours:
84 properties["opening_hours"] = hours
85
86 yield GeojsonPointItem(**properties)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py
--- a/locations/spiders/tmobile_us.py
+++ b/locations/spiders/tmobile_us.py
@@ -15,7 +15,7 @@
'Saturday': 'Sa',
'Sunday': 'Su'}
-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'
class TMobileUSSpider(scrapy.Spider):
|
{"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,7 +15,7 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-06-30-14-42-26, spider **tmobile_us** failed with **7563 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/logs/tmobile_us.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-30-14-42-26/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]}
| 1,227 | 150 |
gh_patches_debug_1059
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-5645
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image uploader does not recognise uploaded file
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
As of today (8-09-2021) the image uploader does not recognise that an image has been selected and uploaded. Instead, it displays "Please provide an image URL" after hitting submit.
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...any edition
2. Do ...upload an image as a cover and submit.
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: "Please provide an image URL"
* Expected: Image should be added as cover.
### Details
- **Logged in (Y/N)?** y
- **Browser type/version?** Chrome Version 92.0.4515.159 (Official Build) (x86_64)
- **Operating system?** MacOS
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `openlibrary/plugins/upstream/covers.py`
Content:
```
1 """Handle book cover/author photo upload.
2 """
3 from logging import getLogger
4
5 import requests
6 import six
7 import web
8 from six import BytesIO
9
10 from infogami.utils import delegate
11 from infogami.utils.view import safeint
12 from openlibrary import accounts
13 from openlibrary.plugins.upstream.models import Image
14 from openlibrary.plugins.upstream.utils import get_coverstore_url, render_template
15
16 logger = getLogger("openlibrary.plugins.upstream.covers")
17 def setup():
18 pass
19
20 class add_cover(delegate.page):
21 path = "(/books/OL\d+M)/add-cover"
22 cover_category = "b"
23
24 def GET(self, key):
25 book = web.ctx.site.get(key)
26 return render_template('covers/add', book)
27
28 def POST(self, key):
29 book = web.ctx.site.get(key)
30 if not book:
31 raise web.notfound("")
32
33 i = web.input(file={}, url="")
34
35 # remove references to field storage objects
36 web.ctx.pop("_fieldstorage", None)
37
38 data = self.upload(key, i)
39 coverid = data.get('id')
40
41 if coverid:
42 self.save(book, coverid, url=i.url)
43 cover = Image(web.ctx.site, "b", coverid)
44 return render_template("covers/saved", cover)
45 else:
46 return render_template("covers/add", book, {'url': i.url}, data)
47
48 def upload(self, key, i):
49 """Uploads a cover to coverstore and returns the response."""
50 olid = key.split("/")[-1]
51
52 if i.file is not None and hasattr(i.file, 'value'):
53 data = i.file.value
54 else:
55 data = None
56
57 if i.url and i.url.strip() == "http://":
58 i.url = ""
59
60 user = accounts.get_current_user()
61 params = {
62 "author": user and user.key,
63 "source_url": i.url,
64 "olid": olid,
65 "ip": web.ctx.ip
66 }
67
68 upload_url = '%s/%s/upload2' % (
69 get_coverstore_url(), self.cover_category)
70
71 if upload_url.startswith("//"):
72 upload_url = "http:" + upload_url
73
74 try:
75 files = {'data': BytesIO(data)}
76 response = requests.post(upload_url, data=params, files=files)
77 return web.storage(response.json())
78 except requests.HTTPError as e:
79 logger.exception("Covers upload failed")
80 return web.storage({'error': str(e)})
81
82 def save(self, book, coverid, url=None):
83 book.covers = [coverid] + [cover.id for cover in book.get_covers()]
84 book._save("Added new cover", action="add-cover", data={"url": url})
85
86 class add_work_cover(add_cover):
87 path = "(/works/OL\d+W)/add-cover"
88 cover_category = "w"
89
90 def upload(self, key, i):
91 if "coverid" in i and safeint(i.coverid):
92 return web.storage(id=int(i.coverid))
93 else:
94 return add_cover.upload(self, key, i)
95
96 class add_photo(add_cover):
97 path = "(/authors/OL\d+A)/add-photo"
98 cover_category = "a"
99
100 def save(self, author, photoid, url=None):
101 author.photos = [photoid] + [photo.id for photo in author.get_photos()]
102 author._save("Added new photo", action="add-photo", data={"url": url})
103
104 class manage_covers(delegate.page):
105 path = "(/books/OL\d+M)/manage-covers"
106 def GET(self, key):
107 book = web.ctx.site.get(key)
108 if not book:
109 raise web.notfound()
110 return render_template("covers/manage", key, self.get_images(book))
111
112 def get_images(self, book):
113 return book.get_covers()
114
115 def get_image(self, book):
116 return book.get_cover()
117
118 def save_images(self, book, covers):
119 book.covers = covers
120 book._save('Update covers')
121
122 def POST(self, key):
123 book = web.ctx.site.get(key)
124 if not book:
125 raise web.notfound()
126
127 images = web.input(image=[]).image
128 if '-' in images:
129 images = [int(id) for id in images[:images.index('-')]]
130 self.save_images(book, images)
131 return render_template("covers/saved", self.get_image(book), showinfo=False)
132 else:
133 # ERROR
134 pass
135
136 class manage_work_covers(manage_covers):
137 path = "(/works/OL\d+W)/manage-covers"
138
139
140 class manage_photos(manage_covers):
141 path = "(/authors/OL\d+A)/manage-photos"
142
143 def get_images(self, author):
144 return author.get_photos()
145
146 def get_image(self, author):
147 return author.get_photo()
148
149 def save_images(self, author, photos):
150 author.photos = photos
151 author._save('Update photos')
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py
--- a/openlibrary/plugins/upstream/covers.py
+++ b/openlibrary/plugins/upstream/covers.py
@@ -54,7 +54,7 @@
else:
data = None
- if i.url and i.url.strip() == "http://":
+ if i.url and i.url.strip() == "https://":
i.url = ""
user = accounts.get_current_user()
|
{"golden_diff": "diff --git a/openlibrary/plugins/upstream/covers.py b/openlibrary/plugins/upstream/covers.py\n--- a/openlibrary/plugins/upstream/covers.py\n+++ b/openlibrary/plugins/upstream/covers.py\n@@ -54,7 +54,7 @@\n else:\n data = None\n \n- if i.url and i.url.strip() == \"http://\":\n+ if i.url and i.url.strip() == \"https://\":\n i.url = \"\"\n \n user = accounts.get_current_user()\n", "issue": "Image uploader does not recognise uploaded file\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\nAs of today (8-09-2021) the image uploader does not recognise that an image has been selected and uploaded. Instead, it displays \"Please provide an image URL\" after hitting submit.\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...any edition\r\n2. Do ...upload an image as a cover and submit.\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: \"Please provide an image URL\"\r\n* Expected: Image should be added as cover.\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** y\r\n- **Browser type/version?** Chrome Version 92.0.4515.159 (Official Build) (x86_64)\r\n- **Operating system?** MacOS\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nfrom logging import getLogger\n\nimport requests\nimport six\nimport web\nfrom six import BytesIO\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n\nlogger = getLogger(\"openlibrary.plugins.upstream.covers\")\ndef setup():\n pass\n\nclass add_cover(delegate.page):\n path = \"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n coverid = data.get('id')\n\n if coverid:\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"http://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip\n }\n\n upload_url = '%s/%s/upload2' % (\n get_coverstore_url(), self.cover_category)\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n files = {'data': BytesIO(data)}\n response = requests.post(upload_url, data=params, files=files)\n return web.storage(response.json())\n except requests.HTTPError as e:\n logger.exception(\"Covers upload failed\")\n return web.storage({'error': str(e)})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\"Added new cover\", action=\"add-cover\", data={\"url\": url})\n\nclass add_work_cover(add_cover):\n path = \"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\nclass add_photo(add_cover):\n path = \"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\nclass manage_covers(delegate.page):\n path = \"(/books/OL\\d+M)/manage-covers\"\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[:images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\nclass manage_work_covers(manage_covers):\n path = \"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = \"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}], "after_files": [{"content": "\"\"\"Handle book cover/author photo upload.\n\"\"\"\nfrom logging import getLogger\n\nimport requests\nimport six\nimport web\nfrom six import BytesIO\n\nfrom infogami.utils import delegate\nfrom infogami.utils.view import safeint\nfrom openlibrary import accounts\nfrom openlibrary.plugins.upstream.models import Image\nfrom openlibrary.plugins.upstream.utils import get_coverstore_url, render_template\n\nlogger = getLogger(\"openlibrary.plugins.upstream.covers\")\ndef setup():\n pass\n\nclass add_cover(delegate.page):\n path = \"(/books/OL\\d+M)/add-cover\"\n cover_category = \"b\"\n\n def GET(self, key):\n book = web.ctx.site.get(key)\n return render_template('covers/add', book)\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound(\"\")\n\n i = web.input(file={}, url=\"\")\n\n # remove references to field storage objects\n web.ctx.pop(\"_fieldstorage\", None)\n\n data = self.upload(key, i)\n coverid = data.get('id')\n\n if coverid:\n self.save(book, coverid, url=i.url)\n cover = Image(web.ctx.site, \"b\", coverid)\n return render_template(\"covers/saved\", cover)\n else:\n return render_template(\"covers/add\", book, {'url': i.url}, data)\n\n def upload(self, key, i):\n \"\"\"Uploads a cover to coverstore and returns the response.\"\"\"\n olid = key.split(\"/\")[-1]\n\n if i.file is not None and hasattr(i.file, 'value'):\n data = i.file.value\n else:\n data = None\n\n if i.url and i.url.strip() == \"https://\":\n i.url = \"\"\n\n user = accounts.get_current_user()\n params = {\n \"author\": user and user.key,\n \"source_url\": i.url,\n \"olid\": olid,\n \"ip\": web.ctx.ip\n }\n\n upload_url = '%s/%s/upload2' % (\n get_coverstore_url(), self.cover_category)\n\n if upload_url.startswith(\"//\"):\n upload_url = \"http:\" + upload_url\n\n try:\n files = {'data': BytesIO(data)}\n response = requests.post(upload_url, data=params, files=files)\n return web.storage(response.json())\n except requests.HTTPError as e:\n logger.exception(\"Covers upload failed\")\n return web.storage({'error': str(e)})\n\n def save(self, book, coverid, url=None):\n book.covers = [coverid] + [cover.id for cover in book.get_covers()]\n book._save(\"Added new cover\", action=\"add-cover\", data={\"url\": url})\n\nclass add_work_cover(add_cover):\n path = \"(/works/OL\\d+W)/add-cover\"\n cover_category = \"w\"\n\n def upload(self, key, i):\n if \"coverid\" in i and safeint(i.coverid):\n return web.storage(id=int(i.coverid))\n else:\n return add_cover.upload(self, key, i)\n\nclass add_photo(add_cover):\n path = \"(/authors/OL\\d+A)/add-photo\"\n cover_category = \"a\"\n\n def save(self, author, photoid, url=None):\n author.photos = [photoid] + [photo.id for photo in author.get_photos()]\n author._save(\"Added new photo\", action=\"add-photo\", data={\"url\": url})\n\nclass manage_covers(delegate.page):\n path = \"(/books/OL\\d+M)/manage-covers\"\n def GET(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n return render_template(\"covers/manage\", key, self.get_images(book))\n\n def get_images(self, book):\n return book.get_covers()\n\n def get_image(self, book):\n return book.get_cover()\n\n def save_images(self, book, covers):\n book.covers = covers\n book._save('Update covers')\n\n def POST(self, key):\n book = web.ctx.site.get(key)\n if not book:\n raise web.notfound()\n\n images = web.input(image=[]).image\n if '-' in images:\n images = [int(id) for id in images[:images.index('-')]]\n self.save_images(book, images)\n return render_template(\"covers/saved\", self.get_image(book), showinfo=False)\n else:\n # ERROR\n pass\n\nclass manage_work_covers(manage_covers):\n path = \"(/works/OL\\d+W)/manage-covers\"\n\n\nclass manage_photos(manage_covers):\n path = \"(/authors/OL\\d+A)/manage-photos\"\n\n def get_images(self, author):\n return author.get_photos()\n\n def get_image(self, author):\n return author.get_photo()\n\n def save_images(self, author, photos):\n author.photos = photos\n author._save('Update photos')\n", "path": "openlibrary/plugins/upstream/covers.py"}]}
| 2,015 | 108 |
gh_patches_debug_36368
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-4017
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes when cryptography module is missing
### Brief description
scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes in multiple environments
### Scapy version
2.5.0
### Python version
3.11.2
### Operating system
macOS Ventura 13.3.1 (with M1 chip)
### Additional environment information
_No response_
### How to reproduce
```
$ python --version
Python 3.11.2
$ pip install scapy
$ pip show scapy
Name: scapy
Version: 2.5.0
...
$ python
Python 3.11.2 (main, Feb 16 2023, 02:55:59) [Clang 14.0.0 (clang-1400.0.29.202)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF
>>> TLS13_HKDF("sha256")
```
We can also reproduce from the default python docker image:
```
$ docker run -it --entrypoint bash python:latest
# pip install scapy
# python
>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF
>>> TLS13_HKDF("sha256")
```
### Actual result
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/homebrew/lib/python3.11/site-packages/scapy/layers/tls/crypto/hkdf.py", line 23, in __init__
self.hash = _get_hash(hash_name)
^^^^^^^^^^^^^^^^^^^^
TypeError: 'NoneType' object is not callable
### Expected result
<scapy.layers.tls.crypto.hkdf.TLS13_HKDF object at 0x...>
### Related resources
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scapy/layers/tls/crypto/hkdf.py`
Content:
```
1 # SPDX-License-Identifier: GPL-2.0-only
2 # This file is part of Scapy
3 # See https://scapy.net/ for more information
4 # Copyright (C) 2017 Maxence Tury
5
6 """
7 Stateless HKDF for TLS 1.3.
8 """
9
10 import struct
11
12 from scapy.config import conf
13 from scapy.layers.tls.crypto.pkcs1 import _get_hash
14
15 if conf.crypto_valid:
16 from cryptography.hazmat.backends import default_backend
17 from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand
18 from cryptography.hazmat.primitives.hashes import Hash
19 from cryptography.hazmat.primitives.hmac import HMAC
20
21
22 class TLS13_HKDF(object):
23 def __init__(self, hash_name="sha256"):
24 self.hash = _get_hash(hash_name)
25
26 def extract(self, salt, ikm):
27 h = self.hash
28 hkdf = HKDF(h, h.digest_size, salt, None, default_backend())
29 if ikm is None:
30 ikm = b"\x00" * h.digest_size
31 return hkdf._extract(ikm)
32
33 def expand(self, prk, info, L):
34 h = self.hash
35 hkdf = HKDFExpand(h, L, info, default_backend())
36 return hkdf.derive(prk)
37
38 def expand_label(self, secret, label, hash_value, length):
39 hkdf_label = struct.pack("!H", length)
40 hkdf_label += struct.pack("B", 6 + len(label))
41 hkdf_label += b"tls13 "
42 hkdf_label += label
43 hkdf_label += struct.pack("B", len(hash_value))
44 hkdf_label += hash_value
45 return self.expand(secret, hkdf_label, length)
46
47 def derive_secret(self, secret, label, messages):
48 h = Hash(self.hash, backend=default_backend())
49 h.update(messages)
50 hash_messages = h.finalize()
51 hash_len = self.hash.digest_size
52 return self.expand_label(secret, label, hash_messages, hash_len)
53
54 def compute_verify_data(self, basekey, handshake_context):
55 hash_len = self.hash.digest_size
56 finished_key = self.expand_label(basekey, b"finished", b"", hash_len)
57
58 h = Hash(self.hash, backend=default_backend())
59 h.update(handshake_context)
60 hash_value = h.finalize()
61
62 hm = HMAC(finished_key, self.hash, default_backend())
63 hm.update(hash_value)
64 return hm.finalize()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scapy/layers/tls/crypto/hkdf.py b/scapy/layers/tls/crypto/hkdf.py
--- a/scapy/layers/tls/crypto/hkdf.py
+++ b/scapy/layers/tls/crypto/hkdf.py
@@ -9,7 +9,7 @@
import struct
-from scapy.config import conf
+from scapy.config import conf, crypto_validator
from scapy.layers.tls.crypto.pkcs1 import _get_hash
if conf.crypto_valid:
@@ -20,9 +20,11 @@
class TLS13_HKDF(object):
+ @crypto_validator
def __init__(self, hash_name="sha256"):
self.hash = _get_hash(hash_name)
+ @crypto_validator
def extract(self, salt, ikm):
h = self.hash
hkdf = HKDF(h, h.digest_size, salt, None, default_backend())
@@ -30,11 +32,13 @@
ikm = b"\x00" * h.digest_size
return hkdf._extract(ikm)
+ @crypto_validator
def expand(self, prk, info, L):
h = self.hash
hkdf = HKDFExpand(h, L, info, default_backend())
return hkdf.derive(prk)
+ @crypto_validator
def expand_label(self, secret, label, hash_value, length):
hkdf_label = struct.pack("!H", length)
hkdf_label += struct.pack("B", 6 + len(label))
@@ -44,6 +48,7 @@
hkdf_label += hash_value
return self.expand(secret, hkdf_label, length)
+ @crypto_validator
def derive_secret(self, secret, label, messages):
h = Hash(self.hash, backend=default_backend())
h.update(messages)
@@ -51,6 +56,7 @@
hash_len = self.hash.digest_size
return self.expand_label(secret, label, hash_messages, hash_len)
+ @crypto_validator
def compute_verify_data(self, basekey, handshake_context):
hash_len = self.hash.digest_size
finished_key = self.expand_label(basekey, b"finished", b"", hash_len)
|
{"golden_diff": "diff --git a/scapy/layers/tls/crypto/hkdf.py b/scapy/layers/tls/crypto/hkdf.py\n--- a/scapy/layers/tls/crypto/hkdf.py\n+++ b/scapy/layers/tls/crypto/hkdf.py\n@@ -9,7 +9,7 @@\n \n import struct\n \n-from scapy.config import conf\n+from scapy.config import conf, crypto_validator\n from scapy.layers.tls.crypto.pkcs1 import _get_hash\n \n if conf.crypto_valid:\n@@ -20,9 +20,11 @@\n \n \n class TLS13_HKDF(object):\n+ @crypto_validator\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n \n+ @crypto_validator\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n@@ -30,11 +32,13 @@\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n \n+ @crypto_validator\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n \n+ @crypto_validator\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n@@ -44,6 +48,7 @@\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n \n+ @crypto_validator\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n@@ -51,6 +56,7 @@\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n \n+ @crypto_validator\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n", "issue": "scapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes when cryptography module is missing\n### Brief description\n\nscapy.layers.tls.crypto.hkdf.TLS13_HKDF crashes in multiple environments\n\n### Scapy version\n\n2.5.0\n\n### Python version\n\n3.11.2\n\n### Operating system\n\nmacOS Ventura 13.3.1 (with M1 chip)\n\n### Additional environment information\n\n_No response_\n\n### How to reproduce\n\n```\r\n$ python --version\r\nPython 3.11.2\r\n$ pip install scapy\r\n$ pip show scapy\r\nName: scapy\r\nVersion: 2.5.0\r\n...\r\n$ python\r\nPython 3.11.2 (main, Feb 16 2023, 02:55:59) [Clang 14.0.0 (clang-1400.0.29.202)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF\r\n>>> TLS13_HKDF(\"sha256\")\r\n```\r\n\r\nWe can also reproduce from the default python docker image:\r\n```\r\n$ docker run -it --entrypoint bash python:latest\r\n# pip install scapy\r\n# python\r\n>>> from scapy.layers.tls.crypto.hkdf import TLS13_HKDF\r\n>>> TLS13_HKDF(\"sha256\")\r\n```\n\n### Actual result\n\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/homebrew/lib/python3.11/site-packages/scapy/layers/tls/crypto/hkdf.py\", line 23, in __init__\r\n self.hash = _get_hash(hash_name)\r\n ^^^^^^^^^^^^^^^^^^^^\r\nTypeError: 'NoneType' object is not callable\n\n### Expected result\n\n<scapy.layers.tls.crypto.hkdf.TLS13_HKDF object at 0x...>\n\n### Related resources\n\n_No response_\n", "before_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) 2017 Maxence Tury\n\n\"\"\"\nStateless HKDF for TLS 1.3.\n\"\"\"\n\nimport struct\n\nfrom scapy.config import conf\nfrom scapy.layers.tls.crypto.pkcs1 import _get_hash\n\nif conf.crypto_valid:\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand\n from cryptography.hazmat.primitives.hashes import Hash\n from cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass TLS13_HKDF(object):\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n if ikm is None:\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n hkdf_label += b\"tls13 \"\n hkdf_label += label\n hkdf_label += struct.pack(\"B\", len(hash_value))\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n hash_messages = h.finalize()\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n\n h = Hash(self.hash, backend=default_backend())\n h.update(handshake_context)\n hash_value = h.finalize()\n\n hm = HMAC(finished_key, self.hash, default_backend())\n hm.update(hash_value)\n return hm.finalize()\n", "path": "scapy/layers/tls/crypto/hkdf.py"}], "after_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) 2017 Maxence Tury\n\n\"\"\"\nStateless HKDF for TLS 1.3.\n\"\"\"\n\nimport struct\n\nfrom scapy.config import conf, crypto_validator\nfrom scapy.layers.tls.crypto.pkcs1 import _get_hash\n\nif conf.crypto_valid:\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives.kdf.hkdf import HKDF, HKDFExpand\n from cryptography.hazmat.primitives.hashes import Hash\n from cryptography.hazmat.primitives.hmac import HMAC\n\n\nclass TLS13_HKDF(object):\n @crypto_validator\n def __init__(self, hash_name=\"sha256\"):\n self.hash = _get_hash(hash_name)\n\n @crypto_validator\n def extract(self, salt, ikm):\n h = self.hash\n hkdf = HKDF(h, h.digest_size, salt, None, default_backend())\n if ikm is None:\n ikm = b\"\\x00\" * h.digest_size\n return hkdf._extract(ikm)\n\n @crypto_validator\n def expand(self, prk, info, L):\n h = self.hash\n hkdf = HKDFExpand(h, L, info, default_backend())\n return hkdf.derive(prk)\n\n @crypto_validator\n def expand_label(self, secret, label, hash_value, length):\n hkdf_label = struct.pack(\"!H\", length)\n hkdf_label += struct.pack(\"B\", 6 + len(label))\n hkdf_label += b\"tls13 \"\n hkdf_label += label\n hkdf_label += struct.pack(\"B\", len(hash_value))\n hkdf_label += hash_value\n return self.expand(secret, hkdf_label, length)\n\n @crypto_validator\n def derive_secret(self, secret, label, messages):\n h = Hash(self.hash, backend=default_backend())\n h.update(messages)\n hash_messages = h.finalize()\n hash_len = self.hash.digest_size\n return self.expand_label(secret, label, hash_messages, hash_len)\n\n @crypto_validator\n def compute_verify_data(self, basekey, handshake_context):\n hash_len = self.hash.digest_size\n finished_key = self.expand_label(basekey, b\"finished\", b\"\", hash_len)\n\n h = Hash(self.hash, backend=default_backend())\n h.update(handshake_context)\n hash_value = h.finalize()\n\n hm = HMAC(finished_key, self.hash, default_backend())\n hm.update(hash_value)\n return hm.finalize()\n", "path": "scapy/layers/tls/crypto/hkdf.py"}]}
| 1,396 | 494 |
gh_patches_debug_4597
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-16277
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ThresholdedReLU crashes when the input is a list
**System information**.
- Have I written custom code (as opposed to using a stock example script provided in Keras): Yes
- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu
- TensorFlow installed from (source or binary): binary
- TensorFlow version (use command below): 2.8.0
- Python version: 3.7.12
- Bazel version (if compiling from source): N/A
- GPU model and memory: N/A
- Exact command to reproduce: https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing
**Describe the problem**.
`keras.layers.ThresholdedReLU` fails to accept a list input by reporting the following error:
```
[/usr/local/lib/python3.7/dist-packages/keras/layers/advanced_activations.py](https://localhost:8080/#) in call(self, inputs)
262
263 def call(self, inputs):
--> 264 theta = tf.cast(self.theta, inputs.dtype)
265 return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
266
AttributeError: Exception encountered when calling layer "thresholded_re_lu_1" (type ThresholdedReLU).
'list' object has no attribute 'dtype'
Call arguments received:
• inputs=['tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)']
```
In contrast, `keras.layers.ReLU` and `keras.layers.LeakyReLU` can accept the list input.
**Describe the current behavior**.
`keras.layers.ThresholdedReLU` crashes when the input is a list
**Describe the expected behavior**.
ThresholdedReLU can accept the list input.
**[Contributing](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)**.
- Do you want to contribute a PR? (yes/no):
- If yes, please read [this page](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md) for instructions
- Briefly describe your candidate solution(if contributing):
After comparing the code between `ThresholedReLU` and `ReLU`, I think the reason is that `ReLU` directly use the backend implementation: [keras/layers/activation/relu.py#L96](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/relu.py#L96) while ThresholdedReLU implements by itself: [keras/layers/activation/thresholded_relu.py#L63](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/thresholded_relu.py#L63). Not sure why does such an implementation inconsistency exist, but I think we can do something similar in the thresholded_relu.py#L61-63 like [backend.relu](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/backend.py#L4963) does:
```
def call(self, inputs):
dtype = getattr(inputs, 'dtype', floatx())
theta = tf.cast(self.theta, dtype)
return inputs * tf.cast(tf.greater(inputs, theta), dtype)
```
Of course, we can also directly use the `backend.relu` for the implementation of `ThresholdedReLU` like `ReLU` and `LeakyReLU` do.
**Standalone code to reproduce the issue**.
You can access this [link](https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing) or run the following code:
```
import keras
x = keras.layers.Input(shape=(1,10))
y = keras.layers.ThresholdedReLU()([x,x,x])
model = keras.models.Model(x,y)
model.summary()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/layers/activation/thresholded_relu.py`
Content:
```
1 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Thresholded Rectified Linear Unit activation layer."""
16 # pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import
17
18 from keras import backend
19 from keras.engine.base_layer import Layer
20 from keras.utils import tf_utils
21 import tensorflow.compat.v2 as tf
22
23 from tensorflow.python.util.tf_export import keras_export
24
25
26 @keras_export('keras.layers.ThresholdedReLU')
27 class ThresholdedReLU(Layer):
28 """Thresholded Rectified Linear Unit.
29
30 It follows:
31
32 ```
33 f(x) = x for x > theta
34 f(x) = 0 otherwise`
35 ```
36
37 Input shape:
38 Arbitrary. Use the keyword argument `input_shape`
39 (tuple of integers, does not include the samples axis)
40 when using this layer as the first layer in a model.
41
42 Output shape:
43 Same shape as the input.
44
45 Args:
46 theta: Float >= 0. Threshold location of activation.
47 """
48
49 def __init__(self, theta=1.0, **kwargs):
50 super(ThresholdedReLU, self).__init__(**kwargs)
51 if theta is None:
52 raise ValueError(
53 'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'
54 f' Received: {theta}')
55 if theta < 0:
56 raise ValueError('The theta value of a Thresholded ReLU layer '
57 f'should be >=0. Received: {theta}')
58 self.supports_masking = True
59 self.theta = backend.cast_to_floatx(theta)
60
61 def call(self, inputs):
62 theta = tf.cast(self.theta, inputs.dtype)
63 return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
64
65 def get_config(self):
66 config = {'theta': float(self.theta)}
67 base_config = super(ThresholdedReLU, self).get_config()
68 return dict(list(base_config.items()) + list(config.items()))
69
70 @tf_utils.shape_type_conversion
71 def compute_output_shape(self, input_shape):
72 return input_shape
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras/layers/activation/thresholded_relu.py b/keras/layers/activation/thresholded_relu.py
--- a/keras/layers/activation/thresholded_relu.py
+++ b/keras/layers/activation/thresholded_relu.py
@@ -59,8 +59,8 @@
self.theta = backend.cast_to_floatx(theta)
def call(self, inputs):
- theta = tf.cast(self.theta, inputs.dtype)
- return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)
+ dtype = self.compute_dtype
+ return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)
def get_config(self):
config = {'theta': float(self.theta)}
|
{"golden_diff": "diff --git a/keras/layers/activation/thresholded_relu.py b/keras/layers/activation/thresholded_relu.py\n--- a/keras/layers/activation/thresholded_relu.py\n+++ b/keras/layers/activation/thresholded_relu.py\n@@ -59,8 +59,8 @@\n self.theta = backend.cast_to_floatx(theta)\n \n def call(self, inputs):\n- theta = tf.cast(self.theta, inputs.dtype)\n- return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)\n+ dtype = self.compute_dtype\n+ return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)\n \n def get_config(self):\n config = {'theta': float(self.theta)}\n", "issue": "ThresholdedReLU crashes when the input is a list\n**System information**.\r\n- Have I written custom code (as opposed to using a stock example script provided in Keras): Yes\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu\r\n- TensorFlow installed from (source or binary): binary\r\n- TensorFlow version (use command below): 2.8.0\r\n- Python version: 3.7.12\r\n- Bazel version (if compiling from source): N/A\r\n- GPU model and memory: N/A\r\n- Exact command to reproduce: https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing\r\n\r\n**Describe the problem**.\r\n`keras.layers.ThresholdedReLU` fails to accept a list input by reporting the following error:\r\n\r\n```\r\n[/usr/local/lib/python3.7/dist-packages/keras/layers/advanced_activations.py](https://localhost:8080/#) in call(self, inputs)\r\n 262 \r\n 263 def call(self, inputs):\r\n--> 264 theta = tf.cast(self.theta, inputs.dtype)\r\n 265 return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)\r\n 266 \r\n\r\nAttributeError: Exception encountered when calling layer \"thresholded_re_lu_1\" (type ThresholdedReLU).\r\n\r\n'list' object has no attribute 'dtype'\r\n\r\nCall arguments received:\r\n \u2022 inputs=['tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)', 'tf.Tensor(shape=(None, 1, 10), dtype=float32)']\r\n```\r\nIn contrast, `keras.layers.ReLU` and `keras.layers.LeakyReLU` can accept the list input.\r\n\r\n**Describe the current behavior**.\r\n`keras.layers.ThresholdedReLU` crashes when the input is a list\r\n\r\n**Describe the expected behavior**.\r\nThresholdedReLU can accept the list input.\r\n\r\n**[Contributing](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md)**.\r\n\r\n- Do you want to contribute a PR? (yes/no):\r\n- If yes, please read [this page](https://github.com/keras-team/keras/blob/master/CONTRIBUTING.md) for instructions\r\n- Briefly describe your candidate solution(if contributing):\r\n\r\nAfter comparing the code between `ThresholedReLU` and `ReLU`, I think the reason is that `ReLU` directly use the backend implementation: [keras/layers/activation/relu.py#L96](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/relu.py#L96) while ThresholdedReLU implements by itself: [keras/layers/activation/thresholded_relu.py#L63](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/layers/activation/thresholded_relu.py#L63). Not sure why does such an implementation inconsistency exist, but I think we can do something similar in the thresholded_relu.py#L61-63 like [backend.relu](https://github.com/keras-team/keras/blob/90aa76f6c48ec5252b2db7926ff060e262dfc1cc/keras/backend.py#L4963) does:\r\n\r\n```\r\ndef call(self, inputs):\r\n dtype = getattr(inputs, 'dtype', floatx())\r\n theta = tf.cast(self.theta, dtype)\r\n return inputs * tf.cast(tf.greater(inputs, theta), dtype)\r\n```\r\n\r\nOf course, we can also directly use the `backend.relu` for the implementation of `ThresholdedReLU` like `ReLU` and `LeakyReLU` do.\r\n\r\n**Standalone code to reproduce the issue**.\r\nYou can access this [link](https://colab.research.google.com/drive/144FOk8RO-Ew_eBtGZlCmsUL6k0cEAlUO?usp=sharing) or run the following code:\r\n\r\n```\r\nimport keras\r\nx = keras.layers.Input(shape=(1,10))\r\ny = keras.layers.ThresholdedReLU()([x,x,x])\r\nmodel = keras.models.Model(x,y)\r\nmodel.summary()\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Thresholded Rectified Linear Unit activation layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.ThresholdedReLU')\nclass ThresholdedReLU(Layer):\n \"\"\"Thresholded Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n theta: Float >= 0. Threshold location of activation.\n \"\"\"\n\n def __init__(self, theta=1.0, **kwargs):\n super(ThresholdedReLU, self).__init__(**kwargs)\n if theta is None:\n raise ValueError(\n 'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'\n f' Received: {theta}')\n if theta < 0:\n raise ValueError('The theta value of a Thresholded ReLU layer '\n f'should be >=0. Received: {theta}')\n self.supports_masking = True\n self.theta = backend.cast_to_floatx(theta)\n\n def call(self, inputs):\n theta = tf.cast(self.theta, inputs.dtype)\n return inputs * tf.cast(tf.greater(inputs, theta), inputs.dtype)\n\n def get_config(self):\n config = {'theta': float(self.theta)}\n base_config = super(ThresholdedReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "keras/layers/activation/thresholded_relu.py"}], "after_files": [{"content": "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Thresholded Rectified Linear Unit activation layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.base_layer import Layer\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.ThresholdedReLU')\nclass ThresholdedReLU(Layer):\n \"\"\"Thresholded Rectified Linear Unit.\n\n It follows:\n\n ```\n f(x) = x for x > theta\n f(x) = 0 otherwise`\n ```\n\n Input shape:\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n Output shape:\n Same shape as the input.\n\n Args:\n theta: Float >= 0. Threshold location of activation.\n \"\"\"\n\n def __init__(self, theta=1.0, **kwargs):\n super(ThresholdedReLU, self).__init__(**kwargs)\n if theta is None:\n raise ValueError(\n 'Theta of a Thresholded ReLU layer cannot be None, expecting a float.'\n f' Received: {theta}')\n if theta < 0:\n raise ValueError('The theta value of a Thresholded ReLU layer '\n f'should be >=0. Received: {theta}')\n self.supports_masking = True\n self.theta = backend.cast_to_floatx(theta)\n\n def call(self, inputs):\n dtype = self.compute_dtype\n return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)\n\n def get_config(self):\n config = {'theta': float(self.theta)}\n base_config = super(ThresholdedReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n return input_shape\n", "path": "keras/layers/activation/thresholded_relu.py"}]}
| 1,993 | 162 |
gh_patches_debug_2919
|
rasdani/github-patches
|
git_diff
|
mesonbuild__meson-1538
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VS 2017 backend emits bad WindowsTargetPlatformVersion value
When I tried generating a VS 2017 solution, the generated app.vcxproj contained this:
```
<WindowsTargetPlatformVersion>10.0.14393.0\</WindowsTargetPlatformVersion>
```
Which then causes errors in other `.targets` files attempting to do a numeric comparison against that.
This value is probably taken straight from one of these environment variables:
```
WindowsSDKLibVersion=10.0.14393.0\
WindowsSDKVersion=10.0.14393.0\
```
The trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to
```
WindowsSdkDir=C:\Program Files (x86)\Windows Kits\10\
```
directly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/backend/vs2017backend.py`
Content:
```
1 # Copyright 2014-2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 from .vs2010backend import Vs2010Backend
18
19
20 class Vs2017Backend(Vs2010Backend):
21 def __init__(self, build):
22 super().__init__(build)
23 self.name = 'vs2017'
24 self.platform_toolset = 'v141'
25 self.vs_version = '2017'
26 # WindowsSDKVersion should be set by command prompt.
27 self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py
--- a/mesonbuild/backend/vs2017backend.py
+++ b/mesonbuild/backend/vs2017backend.py
@@ -24,4 +24,4 @@
self.platform_toolset = 'v141'
self.vs_version = '2017'
# WindowsSDKVersion should be set by command prompt.
- self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)
+ self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\')
|
{"golden_diff": "diff --git a/mesonbuild/backend/vs2017backend.py b/mesonbuild/backend/vs2017backend.py\n--- a/mesonbuild/backend/vs2017backend.py\n+++ b/mesonbuild/backend/vs2017backend.py\n@@ -24,4 +24,4 @@\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n- self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n+ self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\\\')\n", "issue": "VS 2017 backend emits bad WindowsTargetPlatformVersion value\nWhen I tried generating a VS 2017 solution, the generated app.vcxproj contained this:\r\n\r\n```\r\n<WindowsTargetPlatformVersion>10.0.14393.0\\</WindowsTargetPlatformVersion>\r\n```\r\n\r\nWhich then causes errors in other `.targets` files attempting to do a numeric comparison against that.\r\nThis value is probably taken straight from one of these environment variables:\r\n\r\n```\r\nWindowsSDKLibVersion=10.0.14393.0\\\r\nWindowsSDKVersion=10.0.14393.0\\\r\n```\r\n\r\nThe trailing backslash is a bit suspect, but may be there intentionally so it can be concatenated to \r\n```\r\nWindowsSdkDir=C:\\Program Files (x86)\\Windows Kits\\10\\\r\n```\r\ndirectly.\n", "before_files": [{"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None)\n", "path": "mesonbuild/backend/vs2017backend.py"}], "after_files": [{"content": "# Copyright 2014-2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom .vs2010backend import Vs2010Backend\n\n\nclass Vs2017Backend(Vs2010Backend):\n def __init__(self, build):\n super().__init__(build)\n self.name = 'vs2017'\n self.platform_toolset = 'v141'\n self.vs_version = '2017'\n # WindowsSDKVersion should be set by command prompt.\n self.windows_target_platform_version = os.getenv('WindowsSDKVersion', None).rstrip('\\\\')\n", "path": "mesonbuild/backend/vs2017backend.py"}]}
| 750 | 144 |
gh_patches_debug_10204
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1098
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEAT][CV] Add a "per-class" option to property drift & heatmap comparison
In this per class option, the drift would be shown per class for the top drifted classes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Image Property Drift Check
4 **************************
5 This notebooks provides an overview for using and understanding the image property drift check.
6
7 **Structure:**
8
9 * `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__
10 * `Which Image Properties Are Used? <#which-image-properties-are-used>`__
11 * `Prepare data <#prepare-data>`__
12 * `Run the check <#run-the-check>`__
13 * `Define a condition <#define-a-condition>`__
14 * `Check Parameters <#check-parameters>`__
15
16 How Does the ImagePropertyDrift Check Work?
17 =================================
18 Data drift is simply a change in the distribution of data over time. It is also one
19 of the top reasons that a machine learning model performance degrades over time.
20
21 In the context of machine learning, drift between the training set and the test set
22 will likely make the model prone to errors. In other words, if the model was trained
23 on data that is different from the current test data, it will probably make more mistakes
24 predicting the target variable.
25
26 The Image Property Drift check calculates a drift score for each image property in
27 the test dataset, by comparing its distribution to the train dataset. For this, we
28 use the Earth Movers Distance (Wasserstein distance).
29
30 Which Image Properties Are Used?
31 =================================
32 ============================== ==========
33 Property name What is it
34 ============================== ==========
35 Aspect Ratio Ratio between height and width of image (height / width)
36 Area Area of image in pixels (height * width)
37 Brightness Average intensity of image pixels. Color channels have different weights according to
38 RGB-to-Grayscale formula
39 RMS Contrast Contrast of image, calculated by standard deviation of pixels
40 Mean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in
41 comparison to the other channels [r / (r + g + b)].
42 Mean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in
43 comparison to the other channels [g / (r + g + b)].
44 Mean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in
45 comparison to the other channels [b / (r + g + b)].
46 ============================== ==========
47
48 Imports
49 -------
50 """
51
52 #%%
53
54 from deepchecks.vision.datasets.detection import coco
55 from deepchecks.vision.checks.distribution import ImagePropertyDrift
56
57 #%%
58 # Prepare data
59 # ------------
60 from deepchecks.vision.utils import image_properties
61
62 train_dataset = coco.load_dataset(train=True, object_type='VisionData')
63 test_dataset = coco.load_dataset(train=False, object_type='VisionData')
64
65 #%%
66 # Run the check
67 # -------------
68
69 check_result = ImagePropertyDrift().run(train_dataset, test_dataset)
70 check_result
71
72 #%%
73 # Observe the check’s output
74 # --------------------------
75 # The result value is a pandas DataFrame that contains drift score for each image property.
76
77 check_result.value
78
79 #%%
80 # Define a condition
81 # ==================
82 # We can define a condition that make sure that image properties drift scores do not
83 # exceed allowed threshold.
84
85 check_result = (
86 ImagePropertyDrift()
87 .add_condition_drift_score_not_greater_than(0.001)
88 .run(train_dataset, test_dataset)
89 )
90 check_result.show(show_additional_outputs=False)
91
92 #%%
93 # Check Parameters
94 # ----------------
95 # Image Property Drift Check accepts two parameters that allows us to control the look of the output:
96 #
97 # * `image_properties` - list of image properties that we are interested in
98 # * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)
99 #
100 # Only next string values are allowed for the `image_properties` parameter:
101 #
102 # * `aspect_ratio`
103 # * `area`
104 # * `brightness`
105 # * `mean_red_relative_intensity`
106 # * `mean_green_relative_intensity`
107 # * `mean_blue_relative_intensity`
108
109 from typing import List
110 import numpy as np
111
112
113 def area(images: List[np.ndarray]) -> List[int]:
114 # Return list of integers of image areas (height multiplied by width)
115 return [img.shape[0] * img.shape[1] for img in images]
116
117
118 def aspect_ratio(images: List[np.ndarray]) -> List[float]:
119 # Return list of floats of image height to width ratio
120 return [img.shape[0] / img.shape[1] for img in images]
121
122
123 properties = [
124 {'name': 'Area', 'method': area, 'output_type': 'continuous'},
125 {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}
126 ]
127
128 check_result = ImagePropertyDrift(
129 alternative_image_properties=properties,
130 max_num_categories=20
131 ).run(train_dataset, test_dataset)
132
133 check_result
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py
--- a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py
+++ b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py
@@ -76,6 +76,17 @@
check_result.value
+#%%
+# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only
+# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the
+# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class
+# filtration)
+
+check_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5
+ ).run(train_dataset, test_dataset)
+check_result
+
+
#%%
# Define a condition
# ==================
|
{"golden_diff": "diff --git a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n--- a/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n+++ b/docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py\n@@ -76,6 +76,17 @@\n \n check_result.value\n \n+#%%\n+# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only\n+# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the\n+# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class\n+# filtration)\n+\n+check_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5\n+ ).run(train_dataset, test_dataset)\n+check_result\n+\n+\n #%%\n # Define a condition\n # ==================\n", "issue": "[FEAT][CV] Add a \"per-class\" option to property drift & heatmap comparison\nIn this per class option, the drift would be shown per class for the top drifted classes. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nImage Property Drift Check\n**************************\nThis notebooks provides an overview for using and understanding the image property drift check.\n\n**Structure:**\n\n* `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__\n* `Which Image Properties Are Used? <#which-image-properties-are-used>`__\n* `Prepare data <#prepare-data>`__\n* `Run the check <#run-the-check>`__\n* `Define a condition <#define-a-condition>`__\n* `Check Parameters <#check-parameters>`__\n\nHow Does the ImagePropertyDrift Check Work?\n=================================\nData drift is simply a change in the distribution of data over time. It is also one\nof the top reasons that a machine learning model performance degrades over time.\n\nIn the context of machine learning, drift between the training set and the test set\nwill likely make the model prone to errors. In other words, if the model was trained\non data that is different from the current test data, it will probably make more mistakes\npredicting the target variable.\n\nThe Image Property Drift check calculates a drift score for each image property in\nthe test dataset, by comparing its distribution to the train dataset. For this, we\nuse the Earth Movers Distance (Wasserstein distance).\n\nWhich Image Properties Are Used?\n=================================\n============================== ==========\nProperty name What is it\n============================== ==========\nAspect Ratio Ratio between height and width of image (height / width)\nArea Area of image in pixels (height * width)\nBrightness Average intensity of image pixels. Color channels have different weights according to\n RGB-to-Grayscale formula\nRMS Contrast Contrast of image, calculated by standard deviation of pixels\nMean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in\n comparison to the other channels [r / (r + g + b)].\nMean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in\n comparison to the other channels [g / (r + g + b)].\nMean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in\n comparison to the other channels [b / (r + g + b)].\n============================== ==========\n\nImports\n-------\n\"\"\"\n\n#%%\n\nfrom deepchecks.vision.datasets.detection import coco\nfrom deepchecks.vision.checks.distribution import ImagePropertyDrift\n\n#%%\n# Prepare data\n# ------------\nfrom deepchecks.vision.utils import image_properties\n\ntrain_dataset = coco.load_dataset(train=True, object_type='VisionData')\ntest_dataset = coco.load_dataset(train=False, object_type='VisionData')\n\n#%%\n# Run the check \n# -------------\n\ncheck_result = ImagePropertyDrift().run(train_dataset, test_dataset)\ncheck_result\n\n#%%\n# Observe the check\u2019s output \n# --------------------------\n# The result value is a pandas DataFrame that contains drift score for each image property.\n\ncheck_result.value\n\n#%%\n# Define a condition\n# ==================\n# We can define a condition that make sure that image properties drift scores do not\n# exceed allowed threshold.\n\ncheck_result = (\n ImagePropertyDrift()\n .add_condition_drift_score_not_greater_than(0.001)\n .run(train_dataset, test_dataset)\n)\ncheck_result.show(show_additional_outputs=False)\n\n#%%\n# Check Parameters\n# ----------------\n# Image Property Drift Check accepts two parameters that allows us to control the look of the output:\n#\n# * `image_properties` - list of image properties that we are interested in\n# * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)\n#\n# Only next string values are allowed for the `image_properties` parameter:\n#\n# * `aspect_ratio`\n# * `area`\n# * `brightness`\n# * `mean_red_relative_intensity`\n# * `mean_green_relative_intensity`\n# * `mean_blue_relative_intensity`\n\nfrom typing import List\nimport numpy as np\n\n\ndef area(images: List[np.ndarray]) -> List[int]:\n # Return list of integers of image areas (height multiplied by width)\n return [img.shape[0] * img.shape[1] for img in images]\n\n\ndef aspect_ratio(images: List[np.ndarray]) -> List[float]:\n # Return list of floats of image height to width ratio\n return [img.shape[0] / img.shape[1] for img in images]\n\n\nproperties = [\n {'name': 'Area', 'method': area, 'output_type': 'continuous'},\n {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}\n]\n\ncheck_result = ImagePropertyDrift(\n alternative_image_properties=properties,\n max_num_categories=20\n).run(train_dataset, test_dataset)\n\ncheck_result", "path": "docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nImage Property Drift Check\n**************************\nThis notebooks provides an overview for using and understanding the image property drift check.\n\n**Structure:**\n\n* `How Does the ImagePropertyDrift Check Work? <#how-does-the-imagepropertydrift-check-work>`__\n* `Which Image Properties Are Used? <#which-image-properties-are-used>`__\n* `Prepare data <#prepare-data>`__\n* `Run the check <#run-the-check>`__\n* `Define a condition <#define-a-condition>`__\n* `Check Parameters <#check-parameters>`__\n\nHow Does the ImagePropertyDrift Check Work?\n=================================\nData drift is simply a change in the distribution of data over time. It is also one\nof the top reasons that a machine learning model performance degrades over time.\n\nIn the context of machine learning, drift between the training set and the test set\nwill likely make the model prone to errors. In other words, if the model was trained\non data that is different from the current test data, it will probably make more mistakes\npredicting the target variable.\n\nThe Image Property Drift check calculates a drift score for each image property in\nthe test dataset, by comparing its distribution to the train dataset. For this, we\nuse the Earth Movers Distance (Wasserstein distance).\n\nWhich Image Properties Are Used?\n=================================\n============================== ==========\nProperty name What is it\n============================== ==========\nAspect Ratio Ratio between height and width of image (height / width)\nArea Area of image in pixels (height * width)\nBrightness Average intensity of image pixels. Color channels have different weights according to\n RGB-to-Grayscale formula\nRMS Contrast Contrast of image, calculated by standard deviation of pixels\nMean Red Relative Intensity Mean over all pixels of the red channel, scaled to their relative intensity in\n comparison to the other channels [r / (r + g + b)].\nMean Green Relative Intensity Mean over all pixels of the green channel, scaled to their relative intensity in\n comparison to the other channels [g / (r + g + b)].\nMean Blue Relative Intensity Mean over all pixels of the blue channel, scaled to their relative intensity in\n comparison to the other channels [b / (r + g + b)].\n============================== ==========\n\nImports\n-------\n\"\"\"\n\n#%%\n\nfrom deepchecks.vision.datasets.detection import coco\nfrom deepchecks.vision.checks.distribution import ImagePropertyDrift\n\n#%%\n# Prepare data\n# ------------\nfrom deepchecks.vision.utils import image_properties\n\ntrain_dataset = coco.load_dataset(train=True, object_type='VisionData')\ntest_dataset = coco.load_dataset(train=False, object_type='VisionData')\n\n#%%\n# Run the check \n# -------------\n\ncheck_result = ImagePropertyDrift().run(train_dataset, test_dataset)\ncheck_result\n\n#%%\n# Observe the check\u2019s output \n# --------------------------\n# The result value is a pandas DataFrame that contains drift score for each image property.\n\ncheck_result.value\n\n#%%\n# We can also pass the check a list of classes we wish to inspect, and the check will calculate the properties only\n# for images either belonging to the classes or containing annotations belonging to the classes. (We'll lower the\n# min_samples to 5 to tell the check to calculate drift despite having only a few images left after the class\n# filtration)\n\ncheck_result = ImagePropertyDrift(classes_to_display=['person', 'cat', 'cell phone', 'car'], min_samples=5\n ).run(train_dataset, test_dataset)\ncheck_result\n\n\n#%%\n# Define a condition\n# ==================\n# We can define a condition that make sure that image properties drift scores do not\n# exceed allowed threshold.\n\ncheck_result = (\n ImagePropertyDrift()\n .add_condition_drift_score_not_greater_than(0.001)\n .run(train_dataset, test_dataset)\n)\ncheck_result.show(show_additional_outputs=False)\n\n#%%\n# Check Parameters\n# ----------------\n# Image Property Drift Check accepts two parameters that allows us to control the look of the output:\n#\n# * `image_properties` - list of image properties that we are interested in\n# * `max_num_categories` - Maximal number of categories to use for the calculation of drift using PSI (Population Stability Index)\n#\n# Only next string values are allowed for the `image_properties` parameter:\n#\n# * `aspect_ratio`\n# * `area`\n# * `brightness`\n# * `mean_red_relative_intensity`\n# * `mean_green_relative_intensity`\n# * `mean_blue_relative_intensity`\n\nfrom typing import List\nimport numpy as np\n\n\ndef area(images: List[np.ndarray]) -> List[int]:\n # Return list of integers of image areas (height multiplied by width)\n return [img.shape[0] * img.shape[1] for img in images]\n\n\ndef aspect_ratio(images: List[np.ndarray]) -> List[float]:\n # Return list of floats of image height to width ratio\n return [img.shape[0] / img.shape[1] for img in images]\n\n\nproperties = [\n {'name': 'Area', 'method': area, 'output_type': 'continuous'},\n {'name': 'Aspect Ratio', 'method': aspect_ratio, 'output_type': 'continuous'}\n]\n\ncheck_result = ImagePropertyDrift(\n alternative_image_properties=properties,\n max_num_categories=20\n).run(train_dataset, test_dataset)\n\ncheck_result", "path": "docs/source/examples/vision/checks/distribution/source/plot_image_property_check.py"}]}
| 1,646 | 226 |
gh_patches_debug_1730
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-209
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Secure Django's language cookie
The following can be configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py)
* [x] [`LANGUAGE_COOKIE_HTTPONLY`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-httponly) = True (same as `SESSION_COOKIE_HTTPONLY`)
* [x] [`LANGUAGE_COOKIE_SAMESITE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-samesite) = "Strict" (same as `SESSION_COOKIE_SAMESITE`)
* [x] [`LANGUAGE_COOKIE_SECURE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-secure) = True (same as `SESSION_COOKIE_SECURE`)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8
9 # SECURITY WARNING: keep the secret key used in production secret!
10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
11
12 # SECURITY WARNING: don't run with debug turned on in production!
13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
14
15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
16
17 ALLOWED_HOSTS = []
18
19 if DEBUG:
20 ALLOWED_HOSTS.extend(["*"])
21 else:
22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split(",")
23 ALLOWED_HOSTS.extend(hosts)
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 ]
34
35 if ADMIN:
36 INSTALLED_APPS.extend(
37 [
38 "django.contrib.admin",
39 "django.contrib.auth",
40 "django.contrib.contenttypes",
41 "django.contrib.messages",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.middleware.locale.LocaleMiddleware",
49 "benefits.core.middleware.Healthcheck",
50 "django.middleware.common.CommonMiddleware",
51 "django.middleware.csrf.CsrfViewMiddleware",
52 "django.middleware.clickjacking.XFrameOptionsMiddleware",
53 "benefits.core.middleware.DebugSession",
54 "benefits.core.middleware.ChangedLanguageEvent",
55 ]
56
57 if ADMIN:
58 MIDDLEWARE.extend(
59 [
60 "django.contrib.auth.middleware.AuthenticationMiddleware",
61 "django.contrib.messages.middleware.MessageMiddleware",
62 ]
63 )
64
65 CSRF_COOKIE_HTTPONLY = True
66
67 SESSION_COOKIE_AGE = 3600
68 SESSION_COOKIE_SAMESITE = "Strict"
69 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
70
71 if not DEBUG:
72 CSRF_COOKIE_SECURE = True
73 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
74 SESSION_COOKIE_SECURE = True
75
76 ROOT_URLCONF = "benefits.urls"
77
78 template_ctx_processors = [
79 "django.template.context_processors.request",
80 "benefits.core.context_processors.analytics",
81 ]
82
83 if DEBUG:
84 template_ctx_processors.extend(
85 [
86 "django.template.context_processors.debug",
87 "benefits.core.context_processors.debug",
88 ]
89 )
90
91 if ADMIN:
92 template_ctx_processors.extend(
93 [
94 "django.contrib.auth.context_processors.auth",
95 "django.contrib.messages.context_processors.messages",
96 ]
97 )
98
99 TEMPLATES = [
100 {
101 "BACKEND": "django.template.backends.django.DjangoTemplates",
102 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
103 "APP_DIRS": True,
104 "OPTIONS": {
105 "context_processors": template_ctx_processors,
106 },
107 },
108 ]
109
110 WSGI_APPLICATION = "benefits.wsgi.application"
111
112 DATABASES = {
113 "default": {
114 "ENGINE": "django.db.backends.sqlite3",
115 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
116 }
117 }
118
119 # Password validation
120
121 AUTH_PASSWORD_VALIDATORS = []
122
123 if ADMIN:
124 AUTH_PASSWORD_VALIDATORS.extend(
125 [
126 {
127 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
128 },
129 {
130 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
131 },
132 {
133 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
134 },
135 {
136 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
137 },
138 ]
139 )
140
141 # Internationalization
142
143 LANGUAGE_CODE = "en"
144
145 LANGUAGES = [("en", "English"), ("es", "Español")]
146
147 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
148
149 USE_I18N = True
150 USE_L10N = True
151
152 TIME_ZONE = "UTC"
153 USE_TZ = True
154
155 # Static files (CSS, JavaScript, Images)
156
157 STATIC_URL = "/static/"
158 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
159 STATIC_ROOT = os.path.join(BASE_DIR, "static")
160
161 # Logging configuration
162
163 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
164 LOGGING = {
165 "version": 1,
166 "disable_existing_loggers": False,
167 "formatters": {
168 "default": {
169 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
170 "datefmt": "%d/%b/%Y %H:%M:%S",
171 "style": "{",
172 },
173 },
174 "handlers": {
175 "default": {"class": "logging.StreamHandler", "formatter": "default"},
176 },
177 "root": {
178 "handlers": ["default"],
179 "level": LOG_LEVEL,
180 },
181 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
182 }
183
184 # Analytics configuration
185
186 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -142,6 +142,10 @@
LANGUAGE_CODE = "en"
+LANGUAGE_COOKIE_HTTPONLY = True
+LANGUAGE_COOKIE_SAMESITE = "Strict"
+LANGUAGE_COOKIE_SECURE = True
+
LANGUAGES = [("en", "English"), ("es", "Español")]
LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -142,6 +142,10 @@\n \n LANGUAGE_CODE = \"en\"\n \n+LANGUAGE_COOKIE_HTTPONLY = True\n+LANGUAGE_COOKIE_SAMESITE = \"Strict\"\n+LANGUAGE_COOKIE_SECURE = True\n+\n LANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n \n LOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n", "issue": "Secure Django's language cookie\nThe following can be configured in [`settings.py`](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py)\r\n\r\n* [x] [`LANGUAGE_COOKIE_HTTPONLY`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-httponly) = True (same as `SESSION_COOKIE_HTTPONLY`)\r\n* [x] [`LANGUAGE_COOKIE_SAMESITE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-samesite) = \"Strict\" (same as `SESSION_COOKIE_SAMESITE`)\r\n* [x] [`LANGUAGE_COOKIE_SECURE`](https://docs.djangoproject.com/en/3.2/ref/settings/#language-cookie-secure) = True (same as `SESSION_COOKIE_SECURE`)\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}]}
| 1,977 | 119 |
gh_patches_debug_36828
|
rasdani/github-patches
|
git_diff
|
crytic__slither-577
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
FileNotFoundError: [Errno 2] No such file or directory: 'solc'
I launch program like this:
slither Contract.sol
and receive an error:
"FileNotFoundError: [Errno 2] No such file or directory: 'solc'"
I have solc installed.
$ solcjs --version
0.4.25+commit.59dbf8f1.Emscripten.clang
But executable is called **solcjs**, not **solc**. Or it is something different?
Reasoning for "Trusted" versions of Solidity
Re: https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity
I am wondering why `0.4.25 or 0.5.11` are chosen as trusted versions.
Would 0.4.26 or 0.5.17 be acceptable? (current highest patches of those minor versions)
Why are none of the 0.6.x versions included as a "trusted" version?
What are the criteria for adding / bumping the version of "trusted"? Is it based on a simple metric like time since release? Or were those two specific versions manually audited by your team members?
Sorry for all the questions. I have a project which is fairly portable between `>=0.5.0 <=0.7.0` and am wondering which version to target... I would like to use `immutable` from `>=0.6.5`, but not at the expense of some possible security issue. And after using this tool I wondered what the criteria was for "trusted versions" of the compiler.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/attributes/incorrect_solc.py`
Content:
```
1 """
2 Check if an incorrect version of solc is used
3 """
4
5 import re
6 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
7 from slither.formatters.attributes.incorrect_solc import format
8
9 # group:
10 # 0: ^ > >= < <= (optional)
11 # 1: ' ' (optional)
12 # 2: version number
13 # 3: version number
14 # 4: version number
15
16 PATTERN = re.compile('(\^|>|>=|<|<=)?([ ]+)?(\d+)\.(\d+)\.(\d+)')
17
18
19 class IncorrectSolc(AbstractDetector):
20 """
21 Check if an old version of solc is used
22 """
23
24 ARGUMENT = 'solc-version'
25 HELP = 'Incorrect Solidity version'
26 IMPACT = DetectorClassification.INFORMATIONAL
27 CONFIDENCE = DetectorClassification.HIGH
28
29 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity'
30
31 WIKI_TITLE = 'Incorrect versions of Solidity'
32 WIKI_DESCRIPTION = '''
33 `solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.
34 We also recommend avoiding complex `pragma` statement.'''
35 WIKI_RECOMMENDATION = '''
36 Use Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''
37
38 COMPLEX_PRAGMA_TXT = "is too complex"
39 OLD_VERSION_TXT = "allows old versions"
40 LESS_THAN_TXT = "uses lesser than"
41
42 TOO_RECENT_VERSION_TXT = "necessitates versions too recent to be trusted. Consider deploying with 0.5.11"
43 BUGGY_VERSION_TXT = "is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)"
44
45 # Indicates the allowed versions. Must be formatted in increasing order.
46 ALLOWED_VERSIONS = ["0.4.25", "0.4.26", "0.5.11"]
47
48 # Indicates the versions that should not be used.
49 BUGGY_VERSIONS = ["0.4.22", "^0.4.22",
50 "0.5.5", "^0.5.5",
51 "0.5.6", "^0.5.6",
52 "0.5.14", "^0.5.14"]
53
54 def _check_version(self, version):
55 op = version[0]
56 if op and op not in ['>', '>=', '^']:
57 return self.LESS_THAN_TXT
58 version_number = '.'.join(version[2:])
59 if version_number not in self.ALLOWED_VERSIONS:
60 if list(map(int, version[2:])) > list(map(int, self.ALLOWED_VERSIONS[-1].split('.'))):
61 return self.TOO_RECENT_VERSION_TXT
62 return self.OLD_VERSION_TXT
63 return None
64
65 def _check_pragma(self, version):
66 if version in self.BUGGY_VERSIONS:
67 return self.BUGGY_VERSION_TXT
68 versions = PATTERN.findall(version)
69 if len(versions) == 1:
70 version = versions[0]
71 return self._check_version(version)
72 elif len(versions) == 2:
73 version_left = versions[0]
74 version_right = versions[1]
75 # Only allow two elements if the second one is
76 # <0.5.0 or <0.6.0
77 if version_right not in [('<', '', '0', '5', '0'), ('<', '', '0', '6', '0'), ('<', '', '0', '7', '0')]:
78 return self.COMPLEX_PRAGMA_TXT
79 return self._check_version(version_left)
80 else:
81 return self.COMPLEX_PRAGMA_TXT
82
83 def _detect(self):
84 """
85 Detects pragma statements that allow for outdated solc versions.
86 :return: Returns the relevant JSON data for the findings.
87 """
88 # Detect all version related pragmas and check if they are disallowed.
89 results = []
90 pragma = self.slither.pragma_directives
91 disallowed_pragmas = []
92 detected_version = False
93 for p in pragma:
94 # Skip any pragma directives which do not refer to version
95 if len(p.directive) < 1 or p.directive[0] != "solidity":
96 continue
97
98 # This is version, so we test if this is disallowed.
99 detected_version = True
100 reason = self._check_pragma(p.version)
101 if reason:
102 disallowed_pragmas.append((reason, p))
103
104 # If we found any disallowed pragmas, we output our findings.
105 if disallowed_pragmas:
106 for (reason, p) in disallowed_pragmas:
107 info = ["Pragma version", p, f" {reason}\n"]
108
109 json = self.generate_result(info)
110
111 results.append(json)
112
113 return results
114
115 @staticmethod
116 def _format(slither, result):
117 format(slither, result)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/detectors/attributes/incorrect_solc.py b/slither/detectors/attributes/incorrect_solc.py
--- a/slither/detectors/attributes/incorrect_solc.py
+++ b/slither/detectors/attributes/incorrect_solc.py
@@ -33,23 +33,30 @@
`solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.
We also recommend avoiding complex `pragma` statement.'''
WIKI_RECOMMENDATION = '''
-Use Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''
+Deploy with any of the following Solidity versions:
+- 0.5.11 - 0.5.13,
+- 0.5.15 - 0.5.17,
+- 0.6.8,
+- 0.6.10 - 0.6.11.
+Use a simple pragma version that allows any of these versions.
+Consider using the latest version of Solidity for testing.'''
COMPLEX_PRAGMA_TXT = "is too complex"
OLD_VERSION_TXT = "allows old versions"
LESS_THAN_TXT = "uses lesser than"
- TOO_RECENT_VERSION_TXT = "necessitates versions too recent to be trusted. Consider deploying with 0.5.11"
- BUGGY_VERSION_TXT = "is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)"
+ TOO_RECENT_VERSION_TXT = "necessitates a version too recent to be trusted. Consider deploying with 0.6.11"
+ BUGGY_VERSION_TXT = "is known to contain severe issues (https://solidity.readthedocs.io/en/latest/bugs.html)"
# Indicates the allowed versions. Must be formatted in increasing order.
- ALLOWED_VERSIONS = ["0.4.25", "0.4.26", "0.5.11"]
+ ALLOWED_VERSIONS = ["0.5.11", "0.5.12", "0.5.13", "0.5.15", "0.5.16", "0.5.17", "0.6.8", "0.6.10", "0.6.11"]
# Indicates the versions that should not be used.
BUGGY_VERSIONS = ["0.4.22", "^0.4.22",
"0.5.5", "^0.5.5",
"0.5.6", "^0.5.6",
- "0.5.14", "^0.5.14"]
+ "0.5.14", "^0.5.14",
+ "0.6.9", "^0.6.9"]
def _check_version(self, version):
op = version[0]
@@ -110,6 +117,17 @@
results.append(json)
+ if self.slither.crytic_compile:
+ if self.slither.crytic_compile.compiler_version:
+ if self.slither.crytic_compile.compiler_version.version not in self.ALLOWED_VERSIONS:
+ info = ["solc-",
+ self.slither.crytic_compile.compiler_version.version,
+ " is not recommended for deployement\n"]
+
+ json = self.generate_result(info)
+
+ results.append(json)
+
return results
@staticmethod
|
{"golden_diff": "diff --git a/slither/detectors/attributes/incorrect_solc.py b/slither/detectors/attributes/incorrect_solc.py\n--- a/slither/detectors/attributes/incorrect_solc.py\n+++ b/slither/detectors/attributes/incorrect_solc.py\n@@ -33,23 +33,30 @@\n `solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.\n We also recommend avoiding complex `pragma` statement.'''\n WIKI_RECOMMENDATION = '''\n-Use Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''\n+Deploy with any of the following Solidity versions:\n+- 0.5.11 - 0.5.13,\n+- 0.5.15 - 0.5.17,\n+- 0.6.8,\n+- 0.6.10 - 0.6.11.\n+Use a simple pragma version that allows any of these versions.\n+Consider using the latest version of Solidity for testing.'''\n \n COMPLEX_PRAGMA_TXT = \"is too complex\"\n OLD_VERSION_TXT = \"allows old versions\"\n LESS_THAN_TXT = \"uses lesser than\"\n \n- TOO_RECENT_VERSION_TXT = \"necessitates versions too recent to be trusted. Consider deploying with 0.5.11\"\n- BUGGY_VERSION_TXT = \"is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)\"\n+ TOO_RECENT_VERSION_TXT = \"necessitates a version too recent to be trusted. Consider deploying with 0.6.11\"\n+ BUGGY_VERSION_TXT = \"is known to contain severe issues (https://solidity.readthedocs.io/en/latest/bugs.html)\"\n \n # Indicates the allowed versions. Must be formatted in increasing order.\n- ALLOWED_VERSIONS = [\"0.4.25\", \"0.4.26\", \"0.5.11\"]\n+ ALLOWED_VERSIONS = [\"0.5.11\", \"0.5.12\", \"0.5.13\", \"0.5.15\", \"0.5.16\", \"0.5.17\", \"0.6.8\", \"0.6.10\", \"0.6.11\"]\n \n # Indicates the versions that should not be used.\n BUGGY_VERSIONS = [\"0.4.22\", \"^0.4.22\",\n \"0.5.5\", \"^0.5.5\",\n \"0.5.6\", \"^0.5.6\",\n- \"0.5.14\", \"^0.5.14\"]\n+ \"0.5.14\", \"^0.5.14\",\n+ \"0.6.9\", \"^0.6.9\"]\n \n def _check_version(self, version):\n op = version[0]\n@@ -110,6 +117,17 @@\n \n results.append(json)\n \n+ if self.slither.crytic_compile:\n+ if self.slither.crytic_compile.compiler_version:\n+ if self.slither.crytic_compile.compiler_version.version not in self.ALLOWED_VERSIONS:\n+ info = [\"solc-\",\n+ self.slither.crytic_compile.compiler_version.version,\n+ \" is not recommended for deployement\\n\"]\n+\n+ json = self.generate_result(info)\n+\n+ results.append(json)\n+\n return results\n \n @staticmethod\n", "issue": "FileNotFoundError: [Errno 2] No such file or directory: 'solc'\nI launch program like this:\r\n\r\n slither Contract.sol \r\n\r\nand receive an error:\r\n\"FileNotFoundError: [Errno 2] No such file or directory: 'solc'\"\r\n\r\nI have solc installed.\r\n\r\n $ solcjs --version\r\n 0.4.25+commit.59dbf8f1.Emscripten.clang\r\n\r\nBut executable is called **solcjs**, not **solc**. Or it is something different?\r\n\nReasoning for \"Trusted\" versions of Solidity\nRe: https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity\r\n\r\nI am wondering why `0.4.25 or 0.5.11` are chosen as trusted versions.\r\n\r\nWould 0.4.26 or 0.5.17 be acceptable? (current highest patches of those minor versions)\r\n\r\nWhy are none of the 0.6.x versions included as a \"trusted\" version?\r\n\r\nWhat are the criteria for adding / bumping the version of \"trusted\"? Is it based on a simple metric like time since release? Or were those two specific versions manually audited by your team members?\r\n\r\nSorry for all the questions. I have a project which is fairly portable between `>=0.5.0 <=0.7.0` and am wondering which version to target... I would like to use `immutable` from `>=0.6.5`, but not at the expense of some possible security issue. And after using this tool I wondered what the criteria was for \"trusted versions\" of the compiler.\r\n\r\nThanks.\n", "before_files": [{"content": "\"\"\"\n Check if an incorrect version of solc is used\n\"\"\"\n\nimport re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.attributes.incorrect_solc import format\n\n# group:\n# 0: ^ > >= < <= (optional)\n# 1: ' ' (optional)\n# 2: version number\n# 3: version number\n# 4: version number\n\nPATTERN = re.compile('(\\^|>|>=|<|<=)?([ ]+)?(\\d+)\\.(\\d+)\\.(\\d+)')\n\n\nclass IncorrectSolc(AbstractDetector):\n \"\"\"\n Check if an old version of solc is used\n \"\"\"\n\n ARGUMENT = 'solc-version'\n HELP = 'Incorrect Solidity version'\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity'\n\n WIKI_TITLE = 'Incorrect versions of Solidity'\n WIKI_DESCRIPTION = '''\n`solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.\nWe also recommend avoiding complex `pragma` statement.'''\n WIKI_RECOMMENDATION = '''\nUse Solidity 0.4.25 or 0.5.11. Consider using the latest version of Solidity for testing the compilation, and a trusted version for deploying.'''\n\n COMPLEX_PRAGMA_TXT = \"is too complex\"\n OLD_VERSION_TXT = \"allows old versions\"\n LESS_THAN_TXT = \"uses lesser than\"\n\n TOO_RECENT_VERSION_TXT = \"necessitates versions too recent to be trusted. Consider deploying with 0.5.11\"\n BUGGY_VERSION_TXT = \"is known to contain severe issue (https://solidity.readthedocs.io/en/v0.5.8/bugs.html)\"\n\n # Indicates the allowed versions. Must be formatted in increasing order.\n ALLOWED_VERSIONS = [\"0.4.25\", \"0.4.26\", \"0.5.11\"]\n\n # Indicates the versions that should not be used.\n BUGGY_VERSIONS = [\"0.4.22\", \"^0.4.22\",\n \"0.5.5\", \"^0.5.5\",\n \"0.5.6\", \"^0.5.6\",\n \"0.5.14\", \"^0.5.14\"]\n\n def _check_version(self, version):\n op = version[0]\n if op and op not in ['>', '>=', '^']:\n return self.LESS_THAN_TXT\n version_number = '.'.join(version[2:])\n if version_number not in self.ALLOWED_VERSIONS:\n if list(map(int, version[2:])) > list(map(int, self.ALLOWED_VERSIONS[-1].split('.'))):\n return self.TOO_RECENT_VERSION_TXT\n return self.OLD_VERSION_TXT\n return None\n\n def _check_pragma(self, version):\n if version in self.BUGGY_VERSIONS:\n return self.BUGGY_VERSION_TXT\n versions = PATTERN.findall(version)\n if len(versions) == 1:\n version = versions[0]\n return self._check_version(version)\n elif len(versions) == 2:\n version_left = versions[0]\n version_right = versions[1]\n # Only allow two elements if the second one is\n # <0.5.0 or <0.6.0\n if version_right not in [('<', '', '0', '5', '0'), ('<', '', '0', '6', '0'), ('<', '', '0', '7', '0')]:\n return self.COMPLEX_PRAGMA_TXT\n return self._check_version(version_left)\n else:\n return self.COMPLEX_PRAGMA_TXT\n\n def _detect(self):\n \"\"\"\n Detects pragma statements that allow for outdated solc versions.\n :return: Returns the relevant JSON data for the findings.\n \"\"\"\n # Detect all version related pragmas and check if they are disallowed.\n results = []\n pragma = self.slither.pragma_directives\n disallowed_pragmas = []\n detected_version = False\n for p in pragma:\n # Skip any pragma directives which do not refer to version\n if len(p.directive) < 1 or p.directive[0] != \"solidity\":\n continue\n\n # This is version, so we test if this is disallowed.\n detected_version = True\n reason = self._check_pragma(p.version)\n if reason:\n disallowed_pragmas.append((reason, p))\n\n # If we found any disallowed pragmas, we output our findings.\n if disallowed_pragmas:\n for (reason, p) in disallowed_pragmas:\n info = [\"Pragma version\", p, f\" {reason}\\n\"]\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n format(slither, result)\n", "path": "slither/detectors/attributes/incorrect_solc.py"}], "after_files": [{"content": "\"\"\"\n Check if an incorrect version of solc is used\n\"\"\"\n\nimport re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.attributes.incorrect_solc import format\n\n# group:\n# 0: ^ > >= < <= (optional)\n# 1: ' ' (optional)\n# 2: version number\n# 3: version number\n# 4: version number\n\nPATTERN = re.compile('(\\^|>|>=|<|<=)?([ ]+)?(\\d+)\\.(\\d+)\\.(\\d+)')\n\n\nclass IncorrectSolc(AbstractDetector):\n \"\"\"\n Check if an old version of solc is used\n \"\"\"\n\n ARGUMENT = 'solc-version'\n HELP = 'Incorrect Solidity version'\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#incorrect-versions-of-solidity'\n\n WIKI_TITLE = 'Incorrect versions of Solidity'\n WIKI_DESCRIPTION = '''\n`solc` frequently releases new compiler versions. Using an old version prevents access to new Solidity security checks.\nWe also recommend avoiding complex `pragma` statement.'''\n WIKI_RECOMMENDATION = '''\nDeploy with any of the following Solidity versions:\n- 0.5.11 - 0.5.13,\n- 0.5.15 - 0.5.17,\n- 0.6.8,\n- 0.6.10 - 0.6.11.\nUse a simple pragma version that allows any of these versions.\nConsider using the latest version of Solidity for testing.'''\n\n COMPLEX_PRAGMA_TXT = \"is too complex\"\n OLD_VERSION_TXT = \"allows old versions\"\n LESS_THAN_TXT = \"uses lesser than\"\n\n TOO_RECENT_VERSION_TXT = \"necessitates a version too recent to be trusted. Consider deploying with 0.6.11\"\n BUGGY_VERSION_TXT = \"is known to contain severe issues (https://solidity.readthedocs.io/en/latest/bugs.html)\"\n\n # Indicates the allowed versions. Must be formatted in increasing order.\n ALLOWED_VERSIONS = [\"0.5.11\", \"0.5.12\", \"0.5.13\", \"0.5.15\", \"0.5.16\", \"0.5.17\", \"0.6.8\", \"0.6.10\", \"0.6.11\"]\n\n # Indicates the versions that should not be used.\n BUGGY_VERSIONS = [\"0.4.22\", \"^0.4.22\",\n \"0.5.5\", \"^0.5.5\",\n \"0.5.6\", \"^0.5.6\",\n \"0.5.14\", \"^0.5.14\",\n \"0.6.9\", \"^0.6.9\"]\n\n def _check_version(self, version):\n op = version[0]\n if op and op not in ['>', '>=', '^']:\n return self.LESS_THAN_TXT\n version_number = '.'.join(version[2:])\n if version_number not in self.ALLOWED_VERSIONS:\n if list(map(int, version[2:])) > list(map(int, self.ALLOWED_VERSIONS[-1].split('.'))):\n return self.TOO_RECENT_VERSION_TXT\n return self.OLD_VERSION_TXT\n return None\n\n def _check_pragma(self, version):\n if version in self.BUGGY_VERSIONS:\n return self.BUGGY_VERSION_TXT\n versions = PATTERN.findall(version)\n if len(versions) == 1:\n version = versions[0]\n return self._check_version(version)\n elif len(versions) == 2:\n version_left = versions[0]\n version_right = versions[1]\n # Only allow two elements if the second one is\n # <0.5.0 or <0.6.0\n if version_right not in [('<', '', '0', '5', '0'), ('<', '', '0', '6', '0'), ('<', '', '0', '7', '0')]:\n return self.COMPLEX_PRAGMA_TXT\n return self._check_version(version_left)\n else:\n return self.COMPLEX_PRAGMA_TXT\n\n def _detect(self):\n \"\"\"\n Detects pragma statements that allow for outdated solc versions.\n :return: Returns the relevant JSON data for the findings.\n \"\"\"\n # Detect all version related pragmas and check if they are disallowed.\n results = []\n pragma = self.slither.pragma_directives\n disallowed_pragmas = []\n detected_version = False\n for p in pragma:\n # Skip any pragma directives which do not refer to version\n if len(p.directive) < 1 or p.directive[0] != \"solidity\":\n continue\n\n # This is version, so we test if this is disallowed.\n detected_version = True\n reason = self._check_pragma(p.version)\n if reason:\n disallowed_pragmas.append((reason, p))\n\n # If we found any disallowed pragmas, we output our findings.\n if disallowed_pragmas:\n for (reason, p) in disallowed_pragmas:\n info = [\"Pragma version\", p, f\" {reason}\\n\"]\n\n json = self.generate_result(info)\n\n results.append(json)\n\n if self.slither.crytic_compile:\n if self.slither.crytic_compile.compiler_version:\n if self.slither.crytic_compile.compiler_version.version not in self.ALLOWED_VERSIONS:\n info = [\"solc-\",\n self.slither.crytic_compile.compiler_version.version,\n \" is not recommended for deployement\\n\"]\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n format(slither, result)\n", "path": "slither/detectors/attributes/incorrect_solc.py"}]}
| 1,993 | 794 |
gh_patches_debug_2854
|
rasdani/github-patches
|
git_diff
|
wger-project__wger-170
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BMI And Calorie Calculator Not Working
Using this software in Linux Mint 13.
When I enter my data into either the BMI calculator or the calorie estimator nothing happens.
I have entered my height in cm and my weight in kgs.
The BMI calculator says my BMI = 0.
I'd be happy with 10.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wger/nutrition/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This file is part of wger Workout Manager.
4 #
5 # wger Workout Manager is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # wger Workout Manager is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16
17 import logging
18
19 from django import forms
20 from django.utils.translation import ugettext as _
21 from wger.core.models import UserProfile
22
23 from wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem
24 from wger.utils.widgets import Html5NumberInput
25
26
27 logger = logging.getLogger(__name__)
28
29
30 class UnitChooserForm(forms.Form):
31 '''
32 A small form to select an amount and a unit for an ingredient
33 '''
34 amount = forms.DecimalField(decimal_places=2,
35 max_digits=5,
36 localize=True)
37 unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),
38 empty_label="g",
39 required=False)
40
41 def __init__(self, *args, **kwargs):
42 super(UnitChooserForm, self).__init__(*args, **kwargs)
43
44 if len(args) and args[0].get('ingredient'):
45 ingredient_id = args[0]['ingredient']
46
47 elif kwargs.get('data'):
48 ingredient_id = kwargs['data']['ingredient_id']
49
50 else:
51 ingredient_id = -1
52
53 self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(
54 ingredient_id=ingredient_id).select_related()
55
56
57 class BmiForm(forms.ModelForm):
58 weight = forms.DecimalField(widget=Html5NumberInput(),
59 max_value=999)
60
61 class Meta:
62 model = UserProfile
63 fields = ('height', )
64
65
66 class BmrForm(forms.ModelForm):
67 '''
68 Form for the basal metabolic rate
69 '''
70 weight = forms.DecimalField(widget=Html5NumberInput())
71
72 class Meta:
73 model = UserProfile
74 fields = ('age', 'height', 'gender')
75
76
77 class PhysicalActivitiesForm(forms.ModelForm):
78 '''
79 Form for the additional physical activities
80 '''
81 class Meta:
82 model = UserProfile
83 fields = ('sleep_hours',
84 'work_hours',
85 'work_intensity',
86 'sport_hours',
87 'sport_intensity',
88 'freetime_hours',
89 'freetime_intensity')
90
91
92 class DailyCaloriesForm(forms.ModelForm):
93 '''
94 Form for the total daily calories needed
95 '''
96
97 base_calories = forms.IntegerField(label=_('Basic caloric intake'),
98 help_text=_('Your basic caloric intake as calculated for '
99 'your data'),
100 required=False,
101 widget=Html5NumberInput())
102 additional_calories = forms.IntegerField(label=_('Additional calories'),
103 help_text=_('Additional calories to add to the base '
104 'rate (to substract, enter a negative '
105 'number)'),
106 initial=0,
107 required=False,
108 widget=Html5NumberInput())
109
110 class Meta:
111 model = UserProfile
112 fields = ('calories',)
113
114
115 class MealItemForm(forms.ModelForm):
116 weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),
117 empty_label="g",
118 required=False)
119 ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),
120 widget=forms.HiddenInput)
121
122 class Meta:
123 model = MealItem
124 fields = '__all__'
125
126 def __init__(self, *args, **kwargs):
127 super(MealItemForm, self).__init__(*args, **kwargs)
128
129 # Get the ingredient_id
130 ingredient_id = None
131
132 if kwargs.get('instance'):
133 ingredient_id = kwargs['instance'].ingredient_id
134
135 if kwargs.get('data'):
136 ingredient_id = kwargs['data']['ingredient']
137
138 # Filter the available ingredients
139 if ingredient_id:
140 self.fields['weight_unit'].queryset = \
141 IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)
142
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py
--- a/wger/nutrition/forms.py
+++ b/wger/nutrition/forms.py
@@ -55,6 +55,9 @@
class BmiForm(forms.ModelForm):
+ height = forms.DecimalField(widget=Html5NumberInput(),
+ max_value=999,
+ label=_('Height (cm)'))
weight = forms.DecimalField(widget=Html5NumberInput(),
max_value=999)
|
{"golden_diff": "diff --git a/wger/nutrition/forms.py b/wger/nutrition/forms.py\n--- a/wger/nutrition/forms.py\n+++ b/wger/nutrition/forms.py\n@@ -55,6 +55,9 @@\n \n \n class BmiForm(forms.ModelForm):\n+ height = forms.DecimalField(widget=Html5NumberInput(),\n+ max_value=999,\n+ label=_('Height (cm)'))\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n", "issue": "BMI And Calorie Calculator Not Working\nUsing this software in Linux Mint 13.\nWhen I enter my data into either the BMI calculator or the calorie estimator nothing happens.\nI have entered my height in cm and my weight in kgs.\nThe BMI calculator says my BMI = 0.\nI'd be happy with 10.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\nfrom wger.core.models import UserProfile\n\nfrom wger.nutrition.models import IngredientWeightUnit, Ingredient, MealItem\nfrom wger.utils.widgets import Html5NumberInput\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnitChooserForm(forms.Form):\n '''\n A small form to select an amount and a unit for an ingredient\n '''\n amount = forms.DecimalField(decimal_places=2,\n max_digits=5,\n localize=True)\n unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n\n def __init__(self, *args, **kwargs):\n super(UnitChooserForm, self).__init__(*args, **kwargs)\n\n if len(args) and args[0].get('ingredient'):\n ingredient_id = args[0]['ingredient']\n\n elif kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient_id']\n\n else:\n ingredient_id = -1\n\n self.fields['unit'].queryset = IngredientWeightUnit.objects.filter(\n ingredient_id=ingredient_id).select_related()\n\n\nclass BmiForm(forms.ModelForm):\n height = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999,\n label=_('Height (cm)'))\n weight = forms.DecimalField(widget=Html5NumberInput(),\n max_value=999)\n\n class Meta:\n model = UserProfile\n fields = ('height', )\n\n\nclass BmrForm(forms.ModelForm):\n '''\n Form for the basal metabolic rate\n '''\n weight = forms.DecimalField(widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('age', 'height', 'gender')\n\n\nclass PhysicalActivitiesForm(forms.ModelForm):\n '''\n Form for the additional physical activities\n '''\n class Meta:\n model = UserProfile\n fields = ('sleep_hours',\n 'work_hours',\n 'work_intensity',\n 'sport_hours',\n 'sport_intensity',\n 'freetime_hours',\n 'freetime_intensity')\n\n\nclass DailyCaloriesForm(forms.ModelForm):\n '''\n Form for the total daily calories needed\n '''\n\n base_calories = forms.IntegerField(label=_('Basic caloric intake'),\n help_text=_('Your basic caloric intake as calculated for '\n 'your data'),\n required=False,\n widget=Html5NumberInput())\n additional_calories = forms.IntegerField(label=_('Additional calories'),\n help_text=_('Additional calories to add to the base '\n 'rate (to substract, enter a negative '\n 'number)'),\n initial=0,\n required=False,\n widget=Html5NumberInput())\n\n class Meta:\n model = UserProfile\n fields = ('calories',)\n\n\nclass MealItemForm(forms.ModelForm):\n weight_unit = forms.ModelChoiceField(queryset=IngredientWeightUnit.objects.none(),\n empty_label=\"g\",\n required=False)\n ingredient = forms.ModelChoiceField(queryset=Ingredient.objects.all(),\n widget=forms.HiddenInput)\n\n class Meta:\n model = MealItem\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MealItemForm, self).__init__(*args, **kwargs)\n\n # Get the ingredient_id\n ingredient_id = None\n\n if kwargs.get('instance'):\n ingredient_id = kwargs['instance'].ingredient_id\n\n if kwargs.get('data'):\n ingredient_id = kwargs['data']['ingredient']\n\n # Filter the available ingredients\n if ingredient_id:\n self.fields['weight_unit'].queryset = \\\n IngredientWeightUnit.objects.filter(ingredient_id=ingredient_id)\n", "path": "wger/nutrition/forms.py"}]}
| 1,550 | 111 |
gh_patches_debug_13384
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-5460
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/expectations/metrics/query_metrics/query_table.py`
Content:
```
1 from typing import Any, Dict, List, Optional, Union
2
3 from great_expectations.core.metric_domain_types import MetricDomainTypes
4 from great_expectations.execution_engine import (
5 SparkDFExecutionEngine,
6 SqlAlchemyExecutionEngine,
7 )
8 from great_expectations.expectations.metrics.import_manager import (
9 pyspark_sql_DataFrame,
10 pyspark_sql_Row,
11 pyspark_sql_SparkSession,
12 sa,
13 sqlalchemy_engine_Engine,
14 sqlalchemy_engine_Row,
15 )
16 from great_expectations.expectations.metrics.metric_provider import metric_value
17 from great_expectations.expectations.metrics.query_metric_provider import (
18 QueryMetricProvider,
19 )
20
21
22 class QueryTable(QueryMetricProvider):
23 metric_name = "query.table"
24 value_keys = ("query",)
25
26 @metric_value(engine=SqlAlchemyExecutionEngine)
27 def _sqlalchemy(
28 cls,
29 execution_engine: SqlAlchemyExecutionEngine,
30 metric_domain_kwargs: dict,
31 metric_value_kwargs: dict,
32 metrics: Dict[str, Any],
33 runtime_configuration: dict,
34 ) -> List[sqlalchemy_engine_Row]:
35 query: Optional[str] = metric_value_kwargs.get(
36 "query"
37 ) or cls.default_kwarg_values.get("query")
38
39 selectable: Union[sa.sql.Selectable, str]
40 selectable, _, _ = execution_engine.get_compute_domain(
41 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
42 )
43
44 if isinstance(selectable, sa.Table):
45 query = query.format(active_batch=selectable)
46 elif isinstance(
47 selectable, sa.sql.Subquery
48 ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break
49 query = query.format(active_batch=f"({selectable})")
50 elif isinstance(
51 selectable, sa.sql.Select
52 ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query
53 query = query.format(
54 active_batch=f'({selectable.compile(compile_kwargs={"literal_binds": True})}) AS subselect',
55 )
56 else:
57 query = query.format(active_batch=f"({selectable})")
58
59 engine: sqlalchemy_engine_Engine = execution_engine.engine
60 result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()
61
62 return result
63
64 @metric_value(engine=SparkDFExecutionEngine)
65 def _spark(
66 cls,
67 execution_engine: SparkDFExecutionEngine,
68 metric_domain_kwargs: dict,
69 metric_value_kwargs: dict,
70 metrics: Dict[str, Any],
71 runtime_configuration: dict,
72 ) -> List[pyspark_sql_Row]:
73 query: Optional[str] = metric_value_kwargs.get(
74 "query"
75 ) or cls.default_kwarg_values.get("query")
76
77 df: pyspark_sql_DataFrame
78 df, _, _ = execution_engine.get_compute_domain(
79 metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
80 )
81
82 df.createOrReplaceTempView("tmp_view")
83 query = query.format(active_batch="tmp_view")
84
85 engine: pyspark_sql_SparkSession = execution_engine.spark
86 result: List[pyspark_sql_Row] = engine.sql(query).collect()
87
88 return result
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/expectations/metrics/query_metrics/query_table.py b/great_expectations/expectations/metrics/query_metrics/query_table.py
--- a/great_expectations/expectations/metrics/query_metrics/query_table.py
+++ b/great_expectations/expectations/metrics/query_metrics/query_table.py
@@ -23,6 +23,7 @@
metric_name = "query.table"
value_keys = ("query",)
+ # <snippet>
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
@@ -60,6 +61,7 @@
result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()
return result
+ # </snippet>
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
|
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/query_metrics/query_table.py b/great_expectations/expectations/metrics/query_metrics/query_table.py\n--- a/great_expectations/expectations/metrics/query_metrics/query_table.py\n+++ b/great_expectations/expectations/metrics/query_metrics/query_table.py\n@@ -23,6 +23,7 @@\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n \n+ # <snippet>\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n@@ -60,6 +61,7 @@\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n \n return result\n+ # </snippet>\n \n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Union\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import (\n pyspark_sql_DataFrame,\n pyspark_sql_Row,\n pyspark_sql_SparkSession,\n sa,\n sqlalchemy_engine_Engine,\n sqlalchemy_engine_Row,\n)\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.query_metric_provider import (\n QueryMetricProvider,\n)\n\n\nclass QueryTable(QueryMetricProvider):\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[sqlalchemy_engine_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n selectable: Union[sa.sql.Selectable, str]\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n if isinstance(selectable, sa.Table):\n query = query.format(active_batch=selectable)\n elif isinstance(\n selectable, sa.sql.Subquery\n ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break\n query = query.format(active_batch=f\"({selectable})\")\n elif isinstance(\n selectable, sa.sql.Select\n ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query\n query = query.format(\n active_batch=f'({selectable.compile(compile_kwargs={\"literal_binds\": True})}) AS subselect',\n )\n else:\n query = query.format(active_batch=f\"({selectable})\")\n\n engine: sqlalchemy_engine_Engine = execution_engine.engine\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n\n return result\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[pyspark_sql_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n df: pyspark_sql_DataFrame\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n df.createOrReplaceTempView(\"tmp_view\")\n query = query.format(active_batch=\"tmp_view\")\n\n engine: pyspark_sql_SparkSession = execution_engine.spark\n result: List[pyspark_sql_Row] = engine.sql(query).collect()\n\n return result\n", "path": "great_expectations/expectations/metrics/query_metrics/query_table.py"}], "after_files": [{"content": "from typing import Any, Dict, List, Optional, Union\n\nfrom great_expectations.core.metric_domain_types import MetricDomainTypes\nfrom great_expectations.execution_engine import (\n SparkDFExecutionEngine,\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.import_manager import (\n pyspark_sql_DataFrame,\n pyspark_sql_Row,\n pyspark_sql_SparkSession,\n sa,\n sqlalchemy_engine_Engine,\n sqlalchemy_engine_Row,\n)\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.expectations.metrics.query_metric_provider import (\n QueryMetricProvider,\n)\n\n\nclass QueryTable(QueryMetricProvider):\n metric_name = \"query.table\"\n value_keys = (\"query\",)\n\n # <snippet>\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(\n cls,\n execution_engine: SqlAlchemyExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[sqlalchemy_engine_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n selectable: Union[sa.sql.Selectable, str]\n selectable, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n if isinstance(selectable, sa.Table):\n query = query.format(active_batch=selectable)\n elif isinstance(\n selectable, sa.sql.Subquery\n ): # Specifying a runtime query in a RuntimeBatchRequest returns the active bacth as a Subquery; sectioning the active batch off w/ parentheses ensures flow of operations doesn't break\n query = query.format(active_batch=f\"({selectable})\")\n elif isinstance(\n selectable, sa.sql.Select\n ): # Specifying a row_condition returns the active batch as a Select object, requiring compilation & aliasing when formatting the parameterized query\n query = query.format(\n active_batch=f'({selectable.compile(compile_kwargs={\"literal_binds\": True})}) AS subselect',\n )\n else:\n query = query.format(active_batch=f\"({selectable})\")\n\n engine: sqlalchemy_engine_Engine = execution_engine.engine\n result: List[sqlalchemy_engine_Row] = engine.execute(sa.text(query)).fetchall()\n\n return result\n # </snippet>\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(\n cls,\n execution_engine: SparkDFExecutionEngine,\n metric_domain_kwargs: dict,\n metric_value_kwargs: dict,\n metrics: Dict[str, Any],\n runtime_configuration: dict,\n ) -> List[pyspark_sql_Row]:\n query: Optional[str] = metric_value_kwargs.get(\n \"query\"\n ) or cls.default_kwarg_values.get(\"query\")\n\n df: pyspark_sql_DataFrame\n df, _, _ = execution_engine.get_compute_domain(\n metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE\n )\n\n df.createOrReplaceTempView(\"tmp_view\")\n query = query.format(active_batch=\"tmp_view\")\n\n engine: pyspark_sql_SparkSession = execution_engine.spark\n result: List[pyspark_sql_Row] = engine.sql(query).collect()\n\n return result\n", "path": "great_expectations/expectations/metrics/query_metrics/query_table.py"}]}
| 1,163 | 183 |
gh_patches_debug_31612
|
rasdani/github-patches
|
git_diff
|
tough-dev-school__education-backend-855
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Слать в лототрон только один ответ от одного пользователя
Сейчас, если пользователь дал три ответа на домашку, то при кросс-проверке все три ответа уйдут разным студентам. Это — плохо, нужно либо слать только первый ответ, либо собирать все ответы в пачку и слать их одному пользователю.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/homework/services/answer_crosscheck_dispatcher.py`
Content:
```
1 from typing import Optional
2
3 from django.db import transaction
4 from django.db.models import Count, Q, QuerySet
5
6 from homework.models import Answer, AnswerCrossCheck
7 from users.models import User
8
9
10 class AnswerCrossCheckDispatcher:
11 """Given a bunch of answers and users, create a cross-check record
12 for each of them, making sure each answer has a user to check
13 and number of answers if equal for each user
14 """
15 def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):
16 self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])
17 self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')
18 self.answers_per_user = answers_per_user
19
20 @transaction.atomic
21 def __call__(self) -> list[AnswerCrossCheck]:
22 crosschecks = list()
23 for user in self.users.iterator():
24 for _ in range(self.answers_per_user):
25 answer = self.get_answer_to_check(user)
26 if answer is not None:
27 crosschecks.append(
28 self.give_answer_to_user(answer, user),
29 )
30
31 return crosschecks
32
33 def get_answer_to_check(self, user: User) -> Optional[Answer]:
34 return self.get_answers_with_crosscheck_count() \
35 .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \
36 .exclude(already_checking__gte=1) \
37 .exclude(author=user) \
38 .exclude(do_not_crosscheck=True) \
39 .order_by('crosscheck_count').first()
40
41 def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:
42 return AnswerCrossCheck.objects.create(answer=answer, checker=user)
43
44 def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:
45 return self.answers.annotate(
46 crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),
47 )
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/homework/services/answer_crosscheck_dispatcher.py b/src/homework/services/answer_crosscheck_dispatcher.py
--- a/src/homework/services/answer_crosscheck_dispatcher.py
+++ b/src/homework/services/answer_crosscheck_dispatcher.py
@@ -9,11 +9,12 @@
class AnswerCrossCheckDispatcher:
"""Given a bunch of answers and users, create a cross-check record
- for each of them, making sure each answer has a user to check
- and number of answers if equal for each user
+ for each of them, making sure the first answer of each user has a user to
+ check and number of answers if equal for each user
"""
def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):
self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])
+ self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')
self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')
self.answers_per_user = answers_per_user
@@ -27,11 +28,11 @@
crosschecks.append(
self.give_answer_to_user(answer, user),
)
-
return crosschecks
def get_answer_to_check(self, user: User) -> Optional[Answer]:
return self.get_answers_with_crosscheck_count() \
+ .filter(id__in=self.unique_author_answers) \
.annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \
.exclude(already_checking__gte=1) \
.exclude(author=user) \
|
{"golden_diff": "diff --git a/src/homework/services/answer_crosscheck_dispatcher.py b/src/homework/services/answer_crosscheck_dispatcher.py\n--- a/src/homework/services/answer_crosscheck_dispatcher.py\n+++ b/src/homework/services/answer_crosscheck_dispatcher.py\n@@ -9,11 +9,12 @@\n \n class AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n- for each of them, making sure each answer has a user to check\n- and number of answers if equal for each user\n+ for each of them, making sure the first answer of each user has a user to\n+ check and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n+ self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n \n@@ -27,11 +28,11 @@\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n-\n return crosschecks\n \n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n+ .filter(id__in=self.unique_author_answers) \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n", "issue": "\u0421\u043b\u0430\u0442\u044c \u0432 \u043b\u043e\u0442\u043e\u0442\u0440\u043e\u043d \u0442\u043e\u043b\u044c\u043a\u043e \u043e\u0434\u0438\u043d \u043e\u0442\u0432\u0435\u0442 \u043e\u0442 \u043e\u0434\u043d\u043e\u0433\u043e \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f\n\u0421\u0435\u0439\u0447\u0430\u0441, \u0435\u0441\u043b\u0438 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0434\u0430\u043b \u0442\u0440\u0438 \u043e\u0442\u0432\u0435\u0442\u0430 \u043d\u0430 \u0434\u043e\u043c\u0430\u0448\u043a\u0443, \u0442\u043e \u043f\u0440\u0438 \u043a\u0440\u043e\u0441\u0441-\u043f\u0440\u043e\u0432\u0435\u0440\u043a\u0435 \u0432\u0441\u0435 \u0442\u0440\u0438 \u043e\u0442\u0432\u0435\u0442\u0430 \u0443\u0439\u0434\u0443\u0442 \u0440\u0430\u0437\u043d\u044b\u043c \u0441\u0442\u0443\u0434\u0435\u043d\u0442\u0430\u043c. \u042d\u0442\u043e \u2014 \u043f\u043b\u043e\u0445\u043e, \u043d\u0443\u0436\u043d\u043e \u043b\u0438\u0431\u043e \u0441\u043b\u0430\u0442\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u043f\u0435\u0440\u0432\u044b\u0439 \u043e\u0442\u0432\u0435\u0442, \u043b\u0438\u0431\u043e \u0441\u043e\u0431\u0438\u0440\u0430\u0442\u044c \u0432\u0441\u0435 \u043e\u0442\u0432\u0435\u0442\u044b \u0432 \u043f\u0430\u0447\u043a\u0443 \u0438 \u0441\u043b\u0430\u0442\u044c \u0438\u0445 \u043e\u0434\u043d\u043e\u043c\u0443 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044e.\n", "before_files": [{"content": "from typing import Optional\n\nfrom django.db import transaction\nfrom django.db.models import Count, Q, QuerySet\n\nfrom homework.models import Answer, AnswerCrossCheck\nfrom users.models import User\n\n\nclass AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n for each of them, making sure each answer has a user to check\n and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n\n @transaction.atomic\n def __call__(self) -> list[AnswerCrossCheck]:\n crosschecks = list()\n for user in self.users.iterator():\n for _ in range(self.answers_per_user):\n answer = self.get_answer_to_check(user)\n if answer is not None:\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n\n return crosschecks\n\n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n .exclude(do_not_crosscheck=True) \\\n .order_by('crosscheck_count').first()\n\n def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:\n return AnswerCrossCheck.objects.create(answer=answer, checker=user)\n\n def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:\n return self.answers.annotate(\n crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),\n )\n", "path": "src/homework/services/answer_crosscheck_dispatcher.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom django.db import transaction\nfrom django.db.models import Count, Q, QuerySet\n\nfrom homework.models import Answer, AnswerCrossCheck\nfrom users.models import User\n\n\nclass AnswerCrossCheckDispatcher:\n \"\"\"Given a bunch of answers and users, create a cross-check record\n for each of them, making sure the first answer of each user has a user to\n check and number of answers if equal for each user\n \"\"\"\n def __init__(self, answers: QuerySet[Answer], answers_per_user: int = 3):\n self.answers = Answer.objects.filter(pk__in=[answer.pk for answer in answers])\n self.unique_author_answers = self.answers.order_by('author_id', 'created').distinct('author_id')\n self.users = User.objects.filter(pk__in=[answer.author_id for answer in answers]).order_by('?')\n self.answers_per_user = answers_per_user\n\n @transaction.atomic\n def __call__(self) -> list[AnswerCrossCheck]:\n crosschecks = list()\n for user in self.users.iterator():\n for _ in range(self.answers_per_user):\n answer = self.get_answer_to_check(user)\n if answer is not None:\n crosschecks.append(\n self.give_answer_to_user(answer, user),\n )\n return crosschecks\n\n def get_answer_to_check(self, user: User) -> Optional[Answer]:\n return self.get_answers_with_crosscheck_count() \\\n .filter(id__in=self.unique_author_answers) \\\n .annotate(already_checking=Count('answercrosscheck', filter=Q(answercrosscheck__checker_id=user.id))) \\\n .exclude(already_checking__gte=1) \\\n .exclude(author=user) \\\n .exclude(do_not_crosscheck=True) \\\n .order_by('crosscheck_count').first()\n\n def give_answer_to_user(self, answer: Answer, user: User) -> AnswerCrossCheck:\n return AnswerCrossCheck.objects.create(answer=answer, checker=user)\n\n def get_answers_with_crosscheck_count(self) -> QuerySet[Answer]:\n return self.answers.annotate(\n crosscheck_count=Count('answercrosscheck', filter=Q(answercrosscheck__checker__in=self.users)),\n )\n", "path": "src/homework/services/answer_crosscheck_dispatcher.py"}]}
| 880 | 382 |
gh_patches_debug_17240
|
rasdani/github-patches
|
git_diff
|
napari__napari-6139
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Removing comments from PR does not work
## 🐛 Bug
After merging it looks like the action for removing comments does not work.
I will be happy to fast merge potential bugfix without the standard 24 hours as it needs to be merged to test.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/remove_html_comments_from_pr.py`
Content:
```
1 """
2 Edit pull request description to remove HTML comments
3
4 We might want to remove section with markdown task lists that are completely empty
5 """
6
7 import re
8 import sys
9 from os import environ
10
11 import requests
12
13
14 def remove_html_comments(text):
15 # Regular expression to remove HTML comments
16 # [^\S\r\n] is whitespace but not new line
17 html_comment_pattern = r"[^\S\r\n]*<!--(.*?)-->[^\S\r\n]*\n?"
18 return re.sub(html_comment_pattern, "", text, flags=re.DOTALL)
19
20
21 def edit_pull_request_description(repo, pull_request_number, access_token):
22 # GitHub API base URL
23 base_url = "https://api.github.com"
24
25 # Prepare the headers with the access token
26 headers = {"Authorization": f"token {access_token}"}
27
28 # Get the current pull request description
29 pr_url = f"{base_url}/repos/{repo}/pulls/{pull_request_number}"
30 response = requests.get(pr_url, headers=headers)
31 response.raise_for_status()
32 response_json = response.json()
33 current_description = response_json["body"]
34
35 # Remove HTML comments from the description
36 edited_description = remove_html_comments(current_description)
37 if edited_description == current_description:
38 print("No HTML comments found in the pull request description")
39 return
40
41 # Update the pull request description
42 update_pr_url = f"{base_url}/repos/{repo}/pulls/{pull_request_number}"
43 payload = {"body": edited_description}
44 response = requests.patch(update_pr_url, json=payload, headers=headers)
45 response.raise_for_status()
46
47 if response.status_code == 200:
48 print(
49 f"Pull request #{pull_request_number} description has been updated successfully!"
50 )
51 else:
52 print(
53 f"Failed to update pull request description. Status code: {response.status_code}"
54 )
55
56
57 if __name__ == "__main__":
58 # Replace with your repository and pull request number
59 # get cuurrent repository name from github actions
60 repository_name = environ.get("GITHUB_REPOSITORY")
61 if repository_name == "napari/napari":
62 sys.exit(0)
63
64 # get current PR number from github actions
65 github_ref = environ.get("GITHUB_REF")
66 refs, pull, number, merge = github_ref.split('/')
67 assert refs == 'refs'
68 assert pull == 'pull'
69 assert merge == 'merge'
70
71 # Replace with your GitHub access token
72 access_token = environ.get("GITHUB_TOKEN")
73
74 edit_pull_request_description(repository_name, number, access_token)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tools/remove_html_comments_from_pr.py b/tools/remove_html_comments_from_pr.py
--- a/tools/remove_html_comments_from_pr.py
+++ b/tools/remove_html_comments_from_pr.py
@@ -10,6 +10,8 @@
import requests
+REPO = 'napari/napari'
+
def remove_html_comments(text):
# Regular expression to remove HTML comments
@@ -55,10 +57,12 @@
if __name__ == "__main__":
+ print('Will inspect PR description to remove html comments.')
# Replace with your repository and pull request number
# get cuurrent repository name from github actions
repository_name = environ.get("GITHUB_REPOSITORY")
- if repository_name == "napari/napari":
+ if repository_name != REPO:
+ print('Not on main repo, aborting with success')
sys.exit(0)
# get current PR number from github actions
|
{"golden_diff": "diff --git a/tools/remove_html_comments_from_pr.py b/tools/remove_html_comments_from_pr.py\n--- a/tools/remove_html_comments_from_pr.py\n+++ b/tools/remove_html_comments_from_pr.py\n@@ -10,6 +10,8 @@\n \n import requests\n \n+REPO = 'napari/napari'\n+\n \n def remove_html_comments(text):\n # Regular expression to remove HTML comments\n@@ -55,10 +57,12 @@\n \n \n if __name__ == \"__main__\":\n+ print('Will inspect PR description to remove html comments.')\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n- if repository_name == \"napari/napari\":\n+ if repository_name != REPO:\n+ print('Not on main repo, aborting with success')\n sys.exit(0)\n \n # get current PR number from github actions\n", "issue": "Removing comments from PR does not work\n## \ud83d\udc1b Bug\r\nAfter merging it looks like the action for removing comments does not work. \r\n\r\nI will be happy to fast merge potential bugfix without the standard 24 hours as it needs to be merged to test. \r\n\n", "before_files": [{"content": "\"\"\"\nEdit pull request description to remove HTML comments\n\nWe might want to remove section with markdown task lists that are completely empty\n\"\"\"\n\nimport re\nimport sys\nfrom os import environ\n\nimport requests\n\n\ndef remove_html_comments(text):\n # Regular expression to remove HTML comments\n # [^\\S\\r\\n] is whitespace but not new line\n html_comment_pattern = r\"[^\\S\\r\\n]*<!--(.*?)-->[^\\S\\r\\n]*\\n?\"\n return re.sub(html_comment_pattern, \"\", text, flags=re.DOTALL)\n\n\ndef edit_pull_request_description(repo, pull_request_number, access_token):\n # GitHub API base URL\n base_url = \"https://api.github.com\"\n\n # Prepare the headers with the access token\n headers = {\"Authorization\": f\"token {access_token}\"}\n\n # Get the current pull request description\n pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n response = requests.get(pr_url, headers=headers)\n response.raise_for_status()\n response_json = response.json()\n current_description = response_json[\"body\"]\n\n # Remove HTML comments from the description\n edited_description = remove_html_comments(current_description)\n if edited_description == current_description:\n print(\"No HTML comments found in the pull request description\")\n return\n\n # Update the pull request description\n update_pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n payload = {\"body\": edited_description}\n response = requests.patch(update_pr_url, json=payload, headers=headers)\n response.raise_for_status()\n\n if response.status_code == 200:\n print(\n f\"Pull request #{pull_request_number} description has been updated successfully!\"\n )\n else:\n print(\n f\"Failed to update pull request description. Status code: {response.status_code}\"\n )\n\n\nif __name__ == \"__main__\":\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n if repository_name == \"napari/napari\":\n sys.exit(0)\n\n # get current PR number from github actions\n github_ref = environ.get(\"GITHUB_REF\")\n refs, pull, number, merge = github_ref.split('/')\n assert refs == 'refs'\n assert pull == 'pull'\n assert merge == 'merge'\n\n # Replace with your GitHub access token\n access_token = environ.get(\"GITHUB_TOKEN\")\n\n edit_pull_request_description(repository_name, number, access_token)\n", "path": "tools/remove_html_comments_from_pr.py"}], "after_files": [{"content": "\"\"\"\nEdit pull request description to remove HTML comments\n\nWe might want to remove section with markdown task lists that are completely empty\n\"\"\"\n\nimport re\nimport sys\nfrom os import environ\n\nimport requests\n\nREPO = 'napari/napari'\n\n\ndef remove_html_comments(text):\n # Regular expression to remove HTML comments\n # [^\\S\\r\\n] is whitespace but not new line\n html_comment_pattern = r\"[^\\S\\r\\n]*<!--(.*?)-->[^\\S\\r\\n]*\\n?\"\n return re.sub(html_comment_pattern, \"\", text, flags=re.DOTALL)\n\n\ndef edit_pull_request_description(repo, pull_request_number, access_token):\n # GitHub API base URL\n base_url = \"https://api.github.com\"\n\n # Prepare the headers with the access token\n headers = {\"Authorization\": f\"token {access_token}\"}\n\n # Get the current pull request description\n pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n response = requests.get(pr_url, headers=headers)\n response.raise_for_status()\n response_json = response.json()\n current_description = response_json[\"body\"]\n\n # Remove HTML comments from the description\n edited_description = remove_html_comments(current_description)\n if edited_description == current_description:\n print(\"No HTML comments found in the pull request description\")\n return\n\n # Update the pull request description\n update_pr_url = f\"{base_url}/repos/{repo}/pulls/{pull_request_number}\"\n payload = {\"body\": edited_description}\n response = requests.patch(update_pr_url, json=payload, headers=headers)\n response.raise_for_status()\n\n if response.status_code == 200:\n print(\n f\"Pull request #{pull_request_number} description has been updated successfully!\"\n )\n else:\n print(\n f\"Failed to update pull request description. Status code: {response.status_code}\"\n )\n\n\nif __name__ == \"__main__\":\n print('Will inspect PR description to remove html comments.')\n # Replace with your repository and pull request number\n # get cuurrent repository name from github actions\n repository_name = environ.get(\"GITHUB_REPOSITORY\")\n if repository_name != REPO:\n print('Not on main repo, aborting with success')\n sys.exit(0)\n\n # get current PR number from github actions\n github_ref = environ.get(\"GITHUB_REF\")\n refs, pull, number, merge = github_ref.split('/')\n assert refs == 'refs'\n assert pull == 'pull'\n assert merge == 'merge'\n\n # Replace with your GitHub access token\n access_token = environ.get(\"GITHUB_TOKEN\")\n\n edit_pull_request_description(repository_name, number, access_token)\n", "path": "tools/remove_html_comments_from_pr.py"}]}
| 1,016 | 207 |
gh_patches_debug_18479
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1744
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
How could I reduce number of workers?
Could I reduce the number_of_workers?
---------
I run the doccano in my machine use this code.
```
doccano init
doccano create user ***
doccano web server --port ***
```
And then I got this log:
```
Booting worker with pid: 19
Booting worker with pid: 20
...
Booting worker with pid: 157
```
It run lots of worker and it took up a lot of memory. So, can I change the number_of_worker varlible. I saw the default number_of_worker= ``` multiprocessing.cpu_count()*2+1 ```. How could I change it?
Your Environment
---------
* Operating System: Linux
* Python Version Used: Python38
* When you install doccano: 2021-11-30
* How did you install doccano (Heroku button etc): pip install doccano
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cli.py`
Content:
```
1 import argparse
2 import multiprocessing
3 import os
4 import platform
5 import sys
6 from pathlib import Path
7
8 import django
9 from django.core import management
10
11 from .config.celery import app
12
13 DOCCANO_HOME = os.path.expanduser(os.environ.get("DOCCANO_HOME", "~/doccano"))
14 Path(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)
15 os.environ["STANDALONE"] = "True"
16 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
17 os.environ.setdefault("DATABASE_URL", os.path.join(f"sqlite:///{DOCCANO_HOME}", "db.sqlite3"))
18 os.environ.setdefault("MEDIA_ROOT", os.path.join(DOCCANO_HOME, "media"))
19 base = os.path.abspath(os.path.dirname(__file__))
20 sys.path.append(base)
21 django.setup()
22 parser = argparse.ArgumentParser(description="doccano, text annotation for machine learning practitioners.")
23
24
25 def number_of_workers():
26 return (multiprocessing.cpu_count() * 2) + 1
27
28
29 def is_windows():
30 return platform.system() == "Windows"
31
32
33 def run_on_nix(args):
34 import gunicorn.app.base
35 import gunicorn.util
36
37 class StandaloneApplication(gunicorn.app.base.BaseApplication):
38 def __init__(self, options=None):
39 self.options = options or {}
40 super().__init__()
41
42 def load_config(self):
43 config = {
44 key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None
45 }
46 for key, value in config.items():
47 self.cfg.set(key.lower(), value)
48
49 def load(self):
50 return gunicorn.util.import_app("config.wsgi")
51
52 options = {
53 "bind": "%s:%s" % ("0.0.0.0", args.port),
54 "workers": number_of_workers(),
55 "chdir": base,
56 "capture_output": True,
57 "loglevel": "debug",
58 }
59 StandaloneApplication(options).run()
60
61
62 def run_on_windows(args):
63 from waitress import serve
64
65 from config.wsgi import application
66
67 serve(application, port=args.port)
68
69
70 def command_db_init(args):
71 print("Setup Database.")
72 management.call_command("wait_for_db")
73 management.call_command("migrate")
74 management.call_command("create_roles")
75
76
77 def command_user_create(args):
78 print("Create admin user.")
79 management.call_command(
80 "create_admin", "--noinput", username=args.username, password=args.password, email=args.email
81 )
82
83
84 def command_migrate(args):
85 print("Start migration.")
86 management.call_command("migrate")
87
88
89 def command_run_webserver(args):
90 print(f"Starting server with port {args.port}.")
91 if is_windows():
92 run_on_windows(args)
93 else:
94 run_on_nix(args)
95
96
97 def command_run_task_queue(args):
98 print("Starting task queue.")
99 argv = [
100 "--app=config",
101 "--workdir={}".format(base),
102 "worker",
103 "--loglevel=info",
104 "--concurrency={}".format(args.concurrency),
105 ]
106 if is_windows():
107 argv.append("--pool=solo")
108 app.worker_main(argv=argv)
109
110
111 def command_help(args):
112 print(parser.parse_args([args.command, "--help"]))
113
114
115 def main():
116 # Create a command line parser.
117 subparsers = parser.add_subparsers()
118
119 # Create a parser for db initialization.
120 parser_init = subparsers.add_parser("init", help="see `init -h`")
121 parser_init.set_defaults(handler=command_db_init)
122
123 # Create a parser for migration.
124 parser_migration = subparsers.add_parser("migrate", help="Updates database schema.")
125 parser_migration.set_defaults(handler=command_migrate)
126
127 # Create a parser for user creation.
128 parser_create_user = subparsers.add_parser("createuser", help="see `createuser -h`")
129 parser_create_user.add_argument("--username", type=str, default="admin", help="admin username")
130 parser_create_user.add_argument("--password", type=str, default="password", help="admin password")
131 parser_create_user.add_argument("--email", type=str, default="[email protected]", help="admin email")
132 parser_create_user.set_defaults(handler=command_user_create)
133
134 # Create a parser for web server.
135 parser_server = subparsers.add_parser("webserver", help="see `webserver -h`")
136 parser_server.add_argument("--port", type=int, default=8000, help="port number")
137 parser_server.set_defaults(handler=command_run_webserver)
138
139 # Create a parser for task queue.
140 parser_queue = subparsers.add_parser("task", help="see `task -h`")
141 parser_queue.add_argument("--concurrency", type=int, default=2, help="concurrency")
142 parser_queue.set_defaults(handler=command_run_task_queue)
143
144 # Create a parser for help.
145 parser_help = subparsers.add_parser("help", help="see `help -h`")
146 parser_help.add_argument("command", help="command name which help is shown")
147 parser_help.set_defaults(handler=command_help)
148
149 # Dispatch handler.
150 args = parser.parse_args()
151 if hasattr(args, "handler"):
152 args.handler(args)
153 else:
154 # If specified unknown command, show help.
155 parser.print_help()
156
157
158 if __name__ == "__main__":
159 main()
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/cli.py b/backend/cli.py
--- a/backend/cli.py
+++ b/backend/cli.py
@@ -51,7 +51,7 @@
options = {
"bind": "%s:%s" % ("0.0.0.0", args.port),
- "workers": number_of_workers(),
+ "workers": args.workers,
"chdir": base,
"capture_output": True,
"loglevel": "debug",
@@ -134,6 +134,7 @@
# Create a parser for web server.
parser_server = subparsers.add_parser("webserver", help="see `webserver -h`")
parser_server.add_argument("--port", type=int, default=8000, help="port number")
+ parser_server.add_argument("--workers", type=int, default=number_of_workers(), help="the number of workers")
parser_server.set_defaults(handler=command_run_webserver)
# Create a parser for task queue.
|
{"golden_diff": "diff --git a/backend/cli.py b/backend/cli.py\n--- a/backend/cli.py\n+++ b/backend/cli.py\n@@ -51,7 +51,7 @@\n \n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n- \"workers\": number_of_workers(),\n+ \"workers\": args.workers,\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n@@ -134,6 +134,7 @@\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n+ parser_server.add_argument(\"--workers\", type=int, default=number_of_workers(), help=\"the number of workers\")\n parser_server.set_defaults(handler=command_run_webserver)\n \n # Create a parser for task queue.\n", "issue": "How could I reduce number of workers?\nCould I reduce the number_of_workers?\r\n---------\r\nI run the doccano in my machine use this code.\r\n```\r\ndoccano init\r\ndoccano create user ***\r\ndoccano web server --port ***\r\n```\r\nAnd then I got this log: \r\n```\r\nBooting worker with pid: 19\r\nBooting worker with pid: 20\r\n...\r\nBooting worker with pid: 157\r\n```\r\nIt run lots of worker and it took up a lot of memory. So, can I change the number_of_worker varlible. I saw the default number_of_worker= ``` multiprocessing.cpu_count()*2+1 ```. How could I change it?\r\n\r\n\r\nYour Environment\r\n---------\r\n* Operating System: Linux\r\n* Python Version Used: Python38\r\n* When you install doccano: 2021-11-30\r\n* How did you install doccano (Heroku button etc): pip install doccano\r\n\n", "before_files": [{"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport django\nfrom django.core import management\n\nfrom .config.celery import app\n\nDOCCANO_HOME = os.path.expanduser(os.environ.get(\"DOCCANO_HOME\", \"~/doccano\"))\nPath(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)\nos.environ[\"STANDALONE\"] = \"True\"\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.production\")\nos.environ.setdefault(\"DATABASE_URL\", os.path.join(f\"sqlite:///{DOCCANO_HOME}\", \"db.sqlite3\"))\nos.environ.setdefault(\"MEDIA_ROOT\", os.path.join(DOCCANO_HOME, \"media\"))\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\ndjango.setup()\nparser = argparse.ArgumentParser(description=\"doccano, text annotation for machine learning practitioners.\")\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {\n key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app(\"config.wsgi\")\n\n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n \"workers\": number_of_workers(),\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n\n from config.wsgi import application\n\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print(\"Setup Database.\")\n management.call_command(\"wait_for_db\")\n management.call_command(\"migrate\")\n management.call_command(\"create_roles\")\n\n\ndef command_user_create(args):\n print(\"Create admin user.\")\n management.call_command(\n \"create_admin\", \"--noinput\", username=args.username, password=args.password, email=args.email\n )\n\n\ndef command_migrate(args):\n print(\"Start migration.\")\n management.call_command(\"migrate\")\n\n\ndef command_run_webserver(args):\n print(f\"Starting server with port {args.port}.\")\n if is_windows():\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print(\"Starting task queue.\")\n argv = [\n \"--app=config\",\n \"--workdir={}\".format(base),\n \"worker\",\n \"--loglevel=info\",\n \"--concurrency={}\".format(args.concurrency),\n ]\n if is_windows():\n argv.append(\"--pool=solo\")\n app.worker_main(argv=argv)\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, \"--help\"]))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser(\"init\", help=\"see `init -h`\")\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for migration.\n parser_migration = subparsers.add_parser(\"migrate\", help=\"Updates database schema.\")\n parser_migration.set_defaults(handler=command_migrate)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser(\"createuser\", help=\"see `createuser -h`\")\n parser_create_user.add_argument(\"--username\", type=str, default=\"admin\", help=\"admin username\")\n parser_create_user.add_argument(\"--password\", type=str, default=\"password\", help=\"admin password\")\n parser_create_user.add_argument(\"--email\", type=str, default=\"[email protected]\", help=\"admin email\")\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser(\"task\", help=\"see `task -h`\")\n parser_queue.add_argument(\"--concurrency\", type=int, default=2, help=\"concurrency\")\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser(\"help\", help=\"see `help -h`\")\n parser_help.add_argument(\"command\", help=\"command name which help is shown\")\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, \"handler\"):\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "backend/cli.py"}], "after_files": [{"content": "import argparse\nimport multiprocessing\nimport os\nimport platform\nimport sys\nfrom pathlib import Path\n\nimport django\nfrom django.core import management\n\nfrom .config.celery import app\n\nDOCCANO_HOME = os.path.expanduser(os.environ.get(\"DOCCANO_HOME\", \"~/doccano\"))\nPath(DOCCANO_HOME).mkdir(parents=True, exist_ok=True)\nos.environ[\"STANDALONE\"] = \"True\"\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.production\")\nos.environ.setdefault(\"DATABASE_URL\", os.path.join(f\"sqlite:///{DOCCANO_HOME}\", \"db.sqlite3\"))\nos.environ.setdefault(\"MEDIA_ROOT\", os.path.join(DOCCANO_HOME, \"media\"))\nbase = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(base)\ndjango.setup()\nparser = argparse.ArgumentParser(description=\"doccano, text annotation for machine learning practitioners.\")\n\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\n\ndef is_windows():\n return platform.system() == \"Windows\"\n\n\ndef run_on_nix(args):\n import gunicorn.app.base\n import gunicorn.util\n\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, options=None):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n config = {\n key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None\n }\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return gunicorn.util.import_app(\"config.wsgi\")\n\n options = {\n \"bind\": \"%s:%s\" % (\"0.0.0.0\", args.port),\n \"workers\": args.workers,\n \"chdir\": base,\n \"capture_output\": True,\n \"loglevel\": \"debug\",\n }\n StandaloneApplication(options).run()\n\n\ndef run_on_windows(args):\n from waitress import serve\n\n from config.wsgi import application\n\n serve(application, port=args.port)\n\n\ndef command_db_init(args):\n print(\"Setup Database.\")\n management.call_command(\"wait_for_db\")\n management.call_command(\"migrate\")\n management.call_command(\"create_roles\")\n\n\ndef command_user_create(args):\n print(\"Create admin user.\")\n management.call_command(\n \"create_admin\", \"--noinput\", username=args.username, password=args.password, email=args.email\n )\n\n\ndef command_migrate(args):\n print(\"Start migration.\")\n management.call_command(\"migrate\")\n\n\ndef command_run_webserver(args):\n print(f\"Starting server with port {args.port}.\")\n if is_windows():\n run_on_windows(args)\n else:\n run_on_nix(args)\n\n\ndef command_run_task_queue(args):\n print(\"Starting task queue.\")\n argv = [\n \"--app=config\",\n \"--workdir={}\".format(base),\n \"worker\",\n \"--loglevel=info\",\n \"--concurrency={}\".format(args.concurrency),\n ]\n if is_windows():\n argv.append(\"--pool=solo\")\n app.worker_main(argv=argv)\n\n\ndef command_help(args):\n print(parser.parse_args([args.command, \"--help\"]))\n\n\ndef main():\n # Create a command line parser.\n subparsers = parser.add_subparsers()\n\n # Create a parser for db initialization.\n parser_init = subparsers.add_parser(\"init\", help=\"see `init -h`\")\n parser_init.set_defaults(handler=command_db_init)\n\n # Create a parser for migration.\n parser_migration = subparsers.add_parser(\"migrate\", help=\"Updates database schema.\")\n parser_migration.set_defaults(handler=command_migrate)\n\n # Create a parser for user creation.\n parser_create_user = subparsers.add_parser(\"createuser\", help=\"see `createuser -h`\")\n parser_create_user.add_argument(\"--username\", type=str, default=\"admin\", help=\"admin username\")\n parser_create_user.add_argument(\"--password\", type=str, default=\"password\", help=\"admin password\")\n parser_create_user.add_argument(\"--email\", type=str, default=\"[email protected]\", help=\"admin email\")\n parser_create_user.set_defaults(handler=command_user_create)\n\n # Create a parser for web server.\n parser_server = subparsers.add_parser(\"webserver\", help=\"see `webserver -h`\")\n parser_server.add_argument(\"--port\", type=int, default=8000, help=\"port number\")\n parser_server.add_argument(\"--workers\", type=int, default=number_of_workers(), help=\"the number of workers\")\n parser_server.set_defaults(handler=command_run_webserver)\n\n # Create a parser for task queue.\n parser_queue = subparsers.add_parser(\"task\", help=\"see `task -h`\")\n parser_queue.add_argument(\"--concurrency\", type=int, default=2, help=\"concurrency\")\n parser_queue.set_defaults(handler=command_run_task_queue)\n\n # Create a parser for help.\n parser_help = subparsers.add_parser(\"help\", help=\"see `help -h`\")\n parser_help.add_argument(\"command\", help=\"command name which help is shown\")\n parser_help.set_defaults(handler=command_help)\n\n # Dispatch handler.\n args = parser.parse_args()\n if hasattr(args, \"handler\"):\n args.handler(args)\n else:\n # If specified unknown command, show help.\n parser.print_help()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "backend/cli.py"}]}
| 1,990 | 218 |
gh_patches_debug_14081
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-439
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Double check conditional HTTP implementation
The conditional HTTP implementation doesn't check the status code of the response at all. Determine if it should, and if it should update it to do the right thing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/cache/http.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import collections.abc
14 import functools
15
16 from pyramid.tweens import EXCVIEW
17
18
19 def _add_vary_callback(*varies):
20 def inner(request, response):
21 vary = set(response.vary if response.vary is not None else [])
22 vary |= set(varies)
23 response.vary = vary
24 return inner
25
26
27 def add_vary(*varies):
28 def inner(view):
29 @functools.wraps(view)
30 def wrapped(context, request):
31 request.add_response_callback(_add_vary_callback(*varies))
32 return view(context, request)
33 return wrapped
34 return inner
35
36
37 def cache_control(seconds, public=True):
38 def inner(view):
39 @functools.wraps(view)
40 def wrapped(context, request):
41 response = view(context, request)
42
43 if not request.registry.settings.get(
44 "pyramid.prevent_http_cache", False):
45 if seconds:
46 if public:
47 response.cache_control.public = True
48 else:
49 response.cache_control.private = True
50
51 response.cache_control.max_age = seconds
52 else:
53 response.cache_control.no_cache = True
54 response.cache_control.no_store = True
55 response.cache_control.must_revalidate = True
56
57 return response
58 return wrapped
59 return inner
60
61
62 def conditional_http_tween_factory(handler, registry):
63 def conditional_http_tween(request):
64 response = handler(request)
65
66 # If the Last-Modified header has been set, we want to enable the
67 # conditional response processing.
68 if response.last_modified is not None:
69 response.conditional_response = True
70
71 # We want to only enable the conditional machinery if either we
72 # were given an explicit ETag header by the view or we have a
73 # buffered response and can generate the ETag header ourself.
74 if response.etag is not None:
75 response.conditional_response = True
76 elif (isinstance(response.app_iter, collections.abc.Sequence) and
77 len(response.app_iter) == 1):
78 response.conditional_response = True
79 response.md5_etag()
80
81 return response
82 return conditional_http_tween
83
84
85 def includeme(config):
86 config.add_tween(
87 "warehouse.cache.http.conditional_http_tween_factory",
88 under=EXCVIEW,
89 )
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/warehouse/cache/http.py b/warehouse/cache/http.py
--- a/warehouse/cache/http.py
+++ b/warehouse/cache/http.py
@@ -75,8 +75,13 @@
response.conditional_response = True
elif (isinstance(response.app_iter, collections.abc.Sequence) and
len(response.app_iter) == 1):
- response.conditional_response = True
- response.md5_etag()
+ # We can only reasonably implement automatic ETags on 200 responses
+ # to GET or HEAD requests. The subtles of doing it in other cases
+ # are too hard to get right.
+ if (request.method in {"GET", "HEAD"} and
+ response.status_code == 200):
+ response.conditional_response = True
+ response.md5_etag()
return response
return conditional_http_tween
|
{"golden_diff": "diff --git a/warehouse/cache/http.py b/warehouse/cache/http.py\n--- a/warehouse/cache/http.py\n+++ b/warehouse/cache/http.py\n@@ -75,8 +75,13 @@\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n- response.conditional_response = True\n- response.md5_etag()\n+ # We can only reasonably implement automatic ETags on 200 responses\n+ # to GET or HEAD requests. The subtles of doing it in other cases\n+ # are too hard to get right.\n+ if (request.method in {\"GET\", \"HEAD\"} and\n+ response.status_code == 200):\n+ response.conditional_response = True\n+ response.md5_etag()\n \n return response\n return conditional_http_tween\n", "issue": "Double check conditional HTTP implementation\nThe conditional HTTP implementation doesn't check the status code of the response at all. Determine if it should, and if it should update it to do the right thing.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections.abc\nimport functools\n\nfrom pyramid.tweens import EXCVIEW\n\n\ndef _add_vary_callback(*varies):\n def inner(request, response):\n vary = set(response.vary if response.vary is not None else [])\n vary |= set(varies)\n response.vary = vary\n return inner\n\n\ndef add_vary(*varies):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request.add_response_callback(_add_vary_callback(*varies))\n return view(context, request)\n return wrapped\n return inner\n\n\ndef cache_control(seconds, public=True):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n response = view(context, request)\n\n if not request.registry.settings.get(\n \"pyramid.prevent_http_cache\", False):\n if seconds:\n if public:\n response.cache_control.public = True\n else:\n response.cache_control.private = True\n\n response.cache_control.max_age = seconds\n else:\n response.cache_control.no_cache = True\n response.cache_control.no_store = True\n response.cache_control.must_revalidate = True\n\n return response\n return wrapped\n return inner\n\n\ndef conditional_http_tween_factory(handler, registry):\n def conditional_http_tween(request):\n response = handler(request)\n\n # If the Last-Modified header has been set, we want to enable the\n # conditional response processing.\n if response.last_modified is not None:\n response.conditional_response = True\n\n # We want to only enable the conditional machinery if either we\n # were given an explicit ETag header by the view or we have a\n # buffered response and can generate the ETag header ourself.\n if response.etag is not None:\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n response.conditional_response = True\n response.md5_etag()\n\n return response\n return conditional_http_tween\n\n\ndef includeme(config):\n config.add_tween(\n \"warehouse.cache.http.conditional_http_tween_factory\",\n under=EXCVIEW,\n )\n", "path": "warehouse/cache/http.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections.abc\nimport functools\n\nfrom pyramid.tweens import EXCVIEW\n\n\ndef _add_vary_callback(*varies):\n def inner(request, response):\n vary = set(response.vary if response.vary is not None else [])\n vary |= set(varies)\n response.vary = vary\n return inner\n\n\ndef add_vary(*varies):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n request.add_response_callback(_add_vary_callback(*varies))\n return view(context, request)\n return wrapped\n return inner\n\n\ndef cache_control(seconds, public=True):\n def inner(view):\n @functools.wraps(view)\n def wrapped(context, request):\n response = view(context, request)\n\n if not request.registry.settings.get(\n \"pyramid.prevent_http_cache\", False):\n if seconds:\n if public:\n response.cache_control.public = True\n else:\n response.cache_control.private = True\n\n response.cache_control.max_age = seconds\n else:\n response.cache_control.no_cache = True\n response.cache_control.no_store = True\n response.cache_control.must_revalidate = True\n\n return response\n return wrapped\n return inner\n\n\ndef conditional_http_tween_factory(handler, registry):\n def conditional_http_tween(request):\n response = handler(request)\n\n # If the Last-Modified header has been set, we want to enable the\n # conditional response processing.\n if response.last_modified is not None:\n response.conditional_response = True\n\n # We want to only enable the conditional machinery if either we\n # were given an explicit ETag header by the view or we have a\n # buffered response and can generate the ETag header ourself.\n if response.etag is not None:\n response.conditional_response = True\n elif (isinstance(response.app_iter, collections.abc.Sequence) and\n len(response.app_iter) == 1):\n # We can only reasonably implement automatic ETags on 200 responses\n # to GET or HEAD requests. The subtles of doing it in other cases\n # are too hard to get right.\n if (request.method in {\"GET\", \"HEAD\"} and\n response.status_code == 200):\n response.conditional_response = True\n response.md5_etag()\n\n return response\n return conditional_http_tween\n\n\ndef includeme(config):\n config.add_tween(\n \"warehouse.cache.http.conditional_http_tween_factory\",\n under=EXCVIEW,\n )\n", "path": "warehouse/cache/http.py"}]}
| 1,076 | 198 |
gh_patches_debug_35947
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invert constraints
I can imagine situations where it would be useful to be able to invert constraints so that they do the opposite of what they are designed to do.
```python
from opsdroid.skill import Skill
from opsdroid.matchers import match_regex
from opsdroid.constraints import constrain_users
class MySkill(Skill):
@match_regex(r'hi')
@constrain_users(['alice', 'bob'], invert=True)
async def hello(self, message):
"""Says 'Hey' to anyone EXCEPT 'alice' and 'bob'."""
await message.respond('Hey')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/constraints.py`
Content:
```
1 """Decorator functions to use when creating skill modules.
2
3 These decorators are for specifying when a skill should not be called despite
4 having a matcher which matches the current message.
5 """
6
7 import logging
8
9 from opsdroid.helper import add_skill_attributes
10
11
12 _LOGGER = logging.getLogger(__name__)
13
14
15 def constrain_rooms(rooms):
16 """Return room constraint decorator."""
17
18 def constraint_decorator(func):
19 """Add room constraint to skill."""
20
21 def constraint_callback(message, rooms=rooms):
22 """Check if the room is correct."""
23 return message.target in rooms
24
25 func = add_skill_attributes(func)
26 func.constraints.append(constraint_callback)
27 return func
28
29 return constraint_decorator
30
31
32 def constrain_users(users):
33 """Return user constraint decorator."""
34
35 def constraint_decorator(func):
36 """Add user constraint to skill."""
37
38 def constraint_callback(message, users=users):
39 """Check if the user is correct."""
40 return message.user in users
41
42 func = add_skill_attributes(func)
43 func.constraints.append(constraint_callback)
44 return func
45
46 return constraint_decorator
47
48
49 def constrain_connectors(connectors):
50 """Return connector constraint decorator."""
51
52 def constraint_decorator(func):
53 """Add connectors constraint to skill."""
54
55 def constraint_callback(message, connectors=connectors):
56 """Check if the connectors is correct."""
57 return message.connector and (message.connector.name in connectors)
58
59 func = add_skill_attributes(func)
60 func.constraints.append(constraint_callback)
61 return func
62
63 return constraint_decorator
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/opsdroid/constraints.py b/opsdroid/constraints.py
--- a/opsdroid/constraints.py
+++ b/opsdroid/constraints.py
@@ -5,6 +5,7 @@
"""
import logging
+from functools import wraps
from opsdroid.helper import add_skill_attributes
@@ -12,7 +13,17 @@
_LOGGER = logging.getLogger(__name__)
-def constrain_rooms(rooms):
+def invert_wrapper(func):
+ """Inverts the result of a function."""
+
+ @wraps(func)
+ def inverted_func(*args, **kwargs):
+ return not func(*args, **kwargs)
+
+ return inverted_func
+
+
+def constrain_rooms(rooms, invert=False):
"""Return room constraint decorator."""
def constraint_decorator(func):
@@ -23,13 +34,15 @@
return message.target in rooms
func = add_skill_attributes(func)
+ if invert:
+ constraint_callback = invert_wrapper(constraint_callback)
func.constraints.append(constraint_callback)
return func
return constraint_decorator
-def constrain_users(users):
+def constrain_users(users, invert=False):
"""Return user constraint decorator."""
def constraint_decorator(func):
@@ -40,13 +53,15 @@
return message.user in users
func = add_skill_attributes(func)
+ if invert:
+ constraint_callback = invert_wrapper(constraint_callback)
func.constraints.append(constraint_callback)
return func
return constraint_decorator
-def constrain_connectors(connectors):
+def constrain_connectors(connectors, invert=False):
"""Return connector constraint decorator."""
def constraint_decorator(func):
@@ -57,6 +72,8 @@
return message.connector and (message.connector.name in connectors)
func = add_skill_attributes(func)
+ if invert:
+ constraint_callback = invert_wrapper(constraint_callback)
func.constraints.append(constraint_callback)
return func
|
{"golden_diff": "diff --git a/opsdroid/constraints.py b/opsdroid/constraints.py\n--- a/opsdroid/constraints.py\n+++ b/opsdroid/constraints.py\n@@ -5,6 +5,7 @@\n \"\"\"\n \n import logging\n+from functools import wraps\n \n from opsdroid.helper import add_skill_attributes\n \n@@ -12,7 +13,17 @@\n _LOGGER = logging.getLogger(__name__)\n \n \n-def constrain_rooms(rooms):\n+def invert_wrapper(func):\n+ \"\"\"Inverts the result of a function.\"\"\"\n+\n+ @wraps(func)\n+ def inverted_func(*args, **kwargs):\n+ return not func(*args, **kwargs)\n+\n+ return inverted_func\n+\n+\n+def constrain_rooms(rooms, invert=False):\n \"\"\"Return room constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -23,13 +34,15 @@\n return message.target in rooms\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n \n return constraint_decorator\n \n \n-def constrain_users(users):\n+def constrain_users(users, invert=False):\n \"\"\"Return user constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -40,13 +53,15 @@\n return message.user in users\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n \n return constraint_decorator\n \n \n-def constrain_connectors(connectors):\n+def constrain_connectors(connectors, invert=False):\n \"\"\"Return connector constraint decorator.\"\"\"\n \n def constraint_decorator(func):\n@@ -57,6 +72,8 @@\n return message.connector and (message.connector.name in connectors)\n \n func = add_skill_attributes(func)\n+ if invert:\n+ constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n", "issue": "Invert constraints\nI can imagine situations where it would be useful to be able to invert constraints so that they do the opposite of what they are designed to do.\r\n\r\n```python\r\nfrom opsdroid.skill import Skill\r\nfrom opsdroid.matchers import match_regex\r\nfrom opsdroid.constraints import constrain_users\r\n\r\nclass MySkill(Skill):\r\n\r\n @match_regex(r'hi')\r\n @constrain_users(['alice', 'bob'], invert=True)\r\n async def hello(self, message):\r\n \"\"\"Says 'Hey' to anyone EXCEPT 'alice' and 'bob'.\"\"\"\r\n await message.respond('Hey')\r\n```\n", "before_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\n\nThese decorators are for specifying when a skill should not be called despite\nhaving a matcher which matches the current message.\n\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import add_skill_attributes\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef constrain_rooms(rooms):\n \"\"\"Return room constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add room constraint to skill.\"\"\"\n\n def constraint_callback(message, rooms=rooms):\n \"\"\"Check if the room is correct.\"\"\"\n return message.target in rooms\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_users(users):\n \"\"\"Return user constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add user constraint to skill.\"\"\"\n\n def constraint_callback(message, users=users):\n \"\"\"Check if the user is correct.\"\"\"\n return message.user in users\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_connectors(connectors):\n \"\"\"Return connector constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add connectors constraint to skill.\"\"\"\n\n def constraint_callback(message, connectors=connectors):\n \"\"\"Check if the connectors is correct.\"\"\"\n return message.connector and (message.connector.name in connectors)\n\n func = add_skill_attributes(func)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n", "path": "opsdroid/constraints.py"}], "after_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\n\nThese decorators are for specifying when a skill should not be called despite\nhaving a matcher which matches the current message.\n\"\"\"\n\nimport logging\nfrom functools import wraps\n\nfrom opsdroid.helper import add_skill_attributes\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef invert_wrapper(func):\n \"\"\"Inverts the result of a function.\"\"\"\n\n @wraps(func)\n def inverted_func(*args, **kwargs):\n return not func(*args, **kwargs)\n\n return inverted_func\n\n\ndef constrain_rooms(rooms, invert=False):\n \"\"\"Return room constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add room constraint to skill.\"\"\"\n\n def constraint_callback(message, rooms=rooms):\n \"\"\"Check if the room is correct.\"\"\"\n return message.target in rooms\n\n func = add_skill_attributes(func)\n if invert:\n constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_users(users, invert=False):\n \"\"\"Return user constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add user constraint to skill.\"\"\"\n\n def constraint_callback(message, users=users):\n \"\"\"Check if the user is correct.\"\"\"\n return message.user in users\n\n func = add_skill_attributes(func)\n if invert:\n constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n\n\ndef constrain_connectors(connectors, invert=False):\n \"\"\"Return connector constraint decorator.\"\"\"\n\n def constraint_decorator(func):\n \"\"\"Add connectors constraint to skill.\"\"\"\n\n def constraint_callback(message, connectors=connectors):\n \"\"\"Check if the connectors is correct.\"\"\"\n return message.connector and (message.connector.name in connectors)\n\n func = add_skill_attributes(func)\n if invert:\n constraint_callback = invert_wrapper(constraint_callback)\n func.constraints.append(constraint_callback)\n return func\n\n return constraint_decorator\n", "path": "opsdroid/constraints.py"}]}
| 825 | 426 |
gh_patches_debug_8106
|
rasdani/github-patches
|
git_diff
|
aws__aws-sam-cli-815
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Region from Env Vars or profile are not respected for ALL commands but package and deploy
The region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container.
Current workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services.
Fix is in #811
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samcli/cli/options.py`
Content:
```
1 """
2 This file contains common CLI options common to all commands. As we add more commands, this will
3 become a repository of options that other commands could use when needed.
4 """
5
6 import click
7
8 from .context import Context
9
10
11 def debug_option(f):
12 """
13 Configures --debug option for CLI
14
15 :param f: Callback Function to be passed to Click
16 """
17 def callback(ctx, param, value):
18 state = ctx.ensure_object(Context)
19 state.debug = value
20 return value
21
22 return click.option('--debug',
23 expose_value=False,
24 is_flag=True,
25 envvar="SAM_DEBUG",
26 help='Turn on debug logging to print debug message generated by SAM CLI.',
27 callback=callback)(f)
28
29
30 def region_option(f):
31 """
32 Configures --region option for CLI
33
34 :param f: Callback Function to be passed to Click
35 """
36 def callback(ctx, param, value):
37 state = ctx.ensure_object(Context)
38 state.region = value
39 return value
40
41 return click.option('--region',
42 expose_value=False,
43 help='Set the AWS Region of the service (e.g. us-east-1).',
44 default='us-east-1',
45 callback=callback)(f)
46
47
48 def profile_option(f):
49 """
50 Configures --profile option for CLI
51
52 :param f: Callback Function to be passed to Click
53 """
54 def callback(ctx, param, value):
55 state = ctx.ensure_object(Context)
56 state.profile = value
57 return value
58
59 return click.option('--profile',
60 expose_value=False,
61 help='Select a specific profile from your credential file to get AWS credentials.',
62 callback=callback)(f)
63
```
Path: `samcli/__init__.py`
Content:
```
1 """
2 SAM CLI version
3 """
4
5 __version__ = '0.8.0'
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/samcli/__init__.py b/samcli/__init__.py
--- a/samcli/__init__.py
+++ b/samcli/__init__.py
@@ -2,4 +2,4 @@
SAM CLI version
"""
-__version__ = '0.8.0'
+__version__ = '0.8.1'
diff --git a/samcli/cli/options.py b/samcli/cli/options.py
--- a/samcli/cli/options.py
+++ b/samcli/cli/options.py
@@ -41,7 +41,6 @@
return click.option('--region',
expose_value=False,
help='Set the AWS Region of the service (e.g. us-east-1).',
- default='us-east-1',
callback=callback)(f)
|
{"golden_diff": "diff --git a/samcli/__init__.py b/samcli/__init__.py\n--- a/samcli/__init__.py\n+++ b/samcli/__init__.py\n@@ -2,4 +2,4 @@\n SAM CLI version\n \"\"\"\n \n-__version__ = '0.8.0'\n+__version__ = '0.8.1'\ndiff --git a/samcli/cli/options.py b/samcli/cli/options.py\n--- a/samcli/cli/options.py\n+++ b/samcli/cli/options.py\n@@ -41,7 +41,6 @@\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n- default='us-east-1',\n callback=callback)(f)\n", "issue": "Region from Env Vars or profile are not respected for ALL commands but package and deploy\nThe region option in SAM CLI was changed between 0.7.0 and 0.8.0 to add the default explicitly on the [command line option](https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/cli/options.py#L44). This causes the region to always be set and not allow boto3 to do its resolving of credentials and regions, which is used to set the correct values into the docker container.\r\n\r\nCurrent workaround is to explicitly set the region when invoking a function or interacting with commands that interact with AWS Services.\r\n\r\nFix is in #811\n", "before_files": [{"content": "\"\"\"\nThis file contains common CLI options common to all commands. As we add more commands, this will\nbecome a repository of options that other commands could use when needed.\n\"\"\"\n\nimport click\n\nfrom .context import Context\n\n\ndef debug_option(f):\n \"\"\"\n Configures --debug option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.debug = value\n return value\n\n return click.option('--debug',\n expose_value=False,\n is_flag=True,\n envvar=\"SAM_DEBUG\",\n help='Turn on debug logging to print debug message generated by SAM CLI.',\n callback=callback)(f)\n\n\ndef region_option(f):\n \"\"\"\n Configures --region option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n default='us-east-1',\n callback=callback)(f)\n\n\ndef profile_option(f):\n \"\"\"\n Configures --profile option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.profile = value\n return value\n\n return click.option('--profile',\n expose_value=False,\n help='Select a specific profile from your credential file to get AWS credentials.',\n callback=callback)(f)\n", "path": "samcli/cli/options.py"}, {"content": "\"\"\"\nSAM CLI version\n\"\"\"\n\n__version__ = '0.8.0'\n", "path": "samcli/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis file contains common CLI options common to all commands. As we add more commands, this will\nbecome a repository of options that other commands could use when needed.\n\"\"\"\n\nimport click\n\nfrom .context import Context\n\n\ndef debug_option(f):\n \"\"\"\n Configures --debug option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.debug = value\n return value\n\n return click.option('--debug',\n expose_value=False,\n is_flag=True,\n envvar=\"SAM_DEBUG\",\n help='Turn on debug logging to print debug message generated by SAM CLI.',\n callback=callback)(f)\n\n\ndef region_option(f):\n \"\"\"\n Configures --region option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option('--region',\n expose_value=False,\n help='Set the AWS Region of the service (e.g. us-east-1).',\n callback=callback)(f)\n\n\ndef profile_option(f):\n \"\"\"\n Configures --profile option for CLI\n\n :param f: Callback Function to be passed to Click\n \"\"\"\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.profile = value\n return value\n\n return click.option('--profile',\n expose_value=False,\n help='Select a specific profile from your credential file to get AWS credentials.',\n callback=callback)(f)\n", "path": "samcli/cli/options.py"}, {"content": "\"\"\"\nSAM CLI version\n\"\"\"\n\n__version__ = '0.8.1'\n", "path": "samcli/__init__.py"}]}
| 921 | 176 |
gh_patches_debug_29338
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-498
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bar_plot_stacked example unfinished?
**Problem Description**
The example in https://github.com/enthought/chaco/blob/master/examples/demo/basic/bar_plot_stacked.py
doesn't do any stacking.
**Expected behavior:**
I the bars were really stacked, I would expect the sum of all bars to reach (10+5+2) * array([1,2,3,4,5]) (the sum of all values) respectively. Instead, I am getting the following:

Looking at the code, it doesn't use the bar plot's `starting_value` as expected, so the demo doesn't even seem to try to do the right thing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/demo/basic/bar_plot_stacked.py`
Content:
```
1 """
2 Simple example of a stacked bar chart
3 """
4
5 # Major library imports
6 import numpy
7
8 # Enthought library imports
9 from enable.api import ComponentEditor
10 from traits.api import HasTraits, Instance
11 from traitsui.api import UItem, View
12
13 # Chaco imports
14 from chaco.api import LabelAxis, Plot, ArrayPlotData
15
16 class PlotExample(HasTraits):
17 plot = Instance(Plot)
18 traits_view = View(UItem('plot', editor=ComponentEditor()),
19 width=400, height=400, resizable=True,
20 )
21
22 def __init__(self, index, series_a, series_b, series_c, **kw):
23 super(PlotExample, self).__init__(**kw)
24
25 plot_data = ArrayPlotData(index=index)
26 plot_data.set_data('series_a', series_a)
27 plot_data.set_data('series_b', series_b)
28 plot_data.set_data('series_c', series_c)
29 self.plot = Plot(plot_data)
30 self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')
31 self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')
32 self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')
33
34 # set the plot's value range to 0, otherwise it may pad too much
35 self.plot.value_range.low = 0
36
37 # replace the index values with some nicer labels
38 label_axis = LabelAxis(self.plot, orientation='bottom',
39 title='Months',
40 positions = list(range(1, 10)),
41 labels = ['jan', 'feb', 'march', 'april', 'may'],
42 small_haxis_style=True)
43
44 self.plot.underlays.remove(self.plot.index_axis)
45 self.plot.index_axis = label_axis
46 self.plot.underlays.append(label_axis)
47
48
49 index = numpy.array([1,2,3,4,5])
50 demo = PlotExample(index, index*10, index*5, index*2)
51
52 if __name__ == "__main__":
53 demo.configure_traits()
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/demo/basic/bar_plot_stacked.py b/examples/demo/basic/bar_plot_stacked.py
--- a/examples/demo/basic/bar_plot_stacked.py
+++ b/examples/demo/basic/bar_plot_stacked.py
@@ -11,7 +11,7 @@
from traitsui.api import UItem, View
# Chaco imports
-from chaco.api import LabelAxis, Plot, ArrayPlotData
+from chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource
class PlotExample(HasTraits):
plot = Instance(Plot)
@@ -22,14 +22,18 @@
def __init__(self, index, series_a, series_b, series_c, **kw):
super(PlotExample, self).__init__(**kw)
+ # Stack them up
+ series_c = series_c + series_b + series_a
+ series_b = series_b + series_a
+
plot_data = ArrayPlotData(index=index)
plot_data.set_data('series_a', series_a)
plot_data.set_data('series_b', series_b)
plot_data.set_data('series_c', series_c)
self.plot = Plot(plot_data)
self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')
- self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')
- self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')
+ self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))
+ self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))
# set the plot's value range to 0, otherwise it may pad too much
self.plot.value_range.low = 0
|
{"golden_diff": "diff --git a/examples/demo/basic/bar_plot_stacked.py b/examples/demo/basic/bar_plot_stacked.py\n--- a/examples/demo/basic/bar_plot_stacked.py\n+++ b/examples/demo/basic/bar_plot_stacked.py\n@@ -11,7 +11,7 @@\n from traitsui.api import UItem, View\n \n # Chaco imports\n-from chaco.api import LabelAxis, Plot, ArrayPlotData\n+from chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource\n \n class PlotExample(HasTraits):\n plot = Instance(Plot)\n@@ -22,14 +22,18 @@\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n \n+ # Stack them up\n+ series_c = series_c + series_b + series_a\n+ series_b = series_b + series_a\n+\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n- self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')\n- self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')\n+ self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))\n+ self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))\n \n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n", "issue": "bar_plot_stacked example unfinished?\n**Problem Description**\r\nThe example in https://github.com/enthought/chaco/blob/master/examples/demo/basic/bar_plot_stacked.py\r\ndoesn't do any stacking.\r\n\r\n**Expected behavior:**\r\nI the bars were really stacked, I would expect the sum of all bars to reach (10+5+2) * array([1,2,3,4,5]) (the sum of all values) respectively. Instead, I am getting the following:\r\n\r\n\r\nLooking at the code, it doesn't use the bar plot's `starting_value` as expected, so the demo doesn't even seem to try to do the right thing.\r\n\n", "before_files": [{"content": "\"\"\"\nSimple example of a stacked bar chart\n\"\"\"\n\n# Major library imports\nimport numpy\n\n# Enthought library imports\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import UItem, View\n\n# Chaco imports\nfrom chaco.api import LabelAxis, Plot, ArrayPlotData\n\nclass PlotExample(HasTraits):\n plot = Instance(Plot)\n traits_view = View(UItem('plot', editor=ComponentEditor()),\n width=400, height=400, resizable=True, \n )\n\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto')\n\n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n\n # replace the index values with some nicer labels\n label_axis = LabelAxis(self.plot, orientation='bottom',\n title='Months',\n positions = list(range(1, 10)),\n labels = ['jan', 'feb', 'march', 'april', 'may'],\n small_haxis_style=True)\n\n self.plot.underlays.remove(self.plot.index_axis)\n self.plot.index_axis = label_axis\n self.plot.underlays.append(label_axis)\n\n\nindex = numpy.array([1,2,3,4,5])\ndemo = PlotExample(index, index*10, index*5, index*2)\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/demo/basic/bar_plot_stacked.py"}], "after_files": [{"content": "\"\"\"\nSimple example of a stacked bar chart\n\"\"\"\n\n# Major library imports\nimport numpy\n\n# Enthought library imports\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance\nfrom traitsui.api import UItem, View\n\n# Chaco imports\nfrom chaco.api import LabelAxis, Plot, ArrayPlotData, ArrayDataSource\n\nclass PlotExample(HasTraits):\n plot = Instance(Plot)\n traits_view = View(UItem('plot', editor=ComponentEditor()),\n width=400, height=400, resizable=True, \n )\n\n def __init__(self, index, series_a, series_b, series_c, **kw):\n super(PlotExample, self).__init__(**kw)\n\n # Stack them up\n series_c = series_c + series_b + series_a\n series_b = series_b + series_a\n\n plot_data = ArrayPlotData(index=index)\n plot_data.set_data('series_a', series_a)\n plot_data.set_data('series_b', series_b)\n plot_data.set_data('series_c', series_c)\n self.plot = Plot(plot_data)\n self.plot.plot(('index', 'series_a'), type='bar', bar_width=0.8, color='auto')\n self.plot.plot(('index', 'series_b'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_a))\n self.plot.plot(('index', 'series_c'), type='bar', bar_width=0.8, color='auto', starting_value=ArrayDataSource(series_b))\n\n # set the plot's value range to 0, otherwise it may pad too much\n self.plot.value_range.low = 0\n\n # replace the index values with some nicer labels\n label_axis = LabelAxis(self.plot, orientation='bottom',\n title='Months',\n positions = list(range(1, 10)),\n labels = ['jan', 'feb', 'march', 'april', 'may'],\n small_haxis_style=True)\n\n self.plot.underlays.remove(self.plot.index_axis)\n self.plot.index_axis = label_axis\n self.plot.underlays.append(label_axis)\n\n\nindex = numpy.array([1,2,3,4,5])\ndemo = PlotExample(index, index*10, index*5, index*2)\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/demo/basic/bar_plot_stacked.py"}]}
| 1,055 | 433 |
gh_patches_debug_44
|
rasdani/github-patches
|
git_diff
|
zestedesavoir__zds-site-6179
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Retirer les dernier restes de Travis
**Description du bug**
J'ai l'impression qu'il reste quelques miettes de Travis :
* https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py
* https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66
**Comportement attendu**
A priori, on ne se sert plus de Travis, donc tout ça devrait disparaître.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/settings/travis_fixture.py`
Content:
```
1 from .ci_test import *
2
3 LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = {
4 "level": "INFO",
5 "handlers": ["console"],
6 }
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py
deleted file mode 100644
--- a/zds/settings/travis_fixture.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .ci_test import *
-
-LOGGING["loggers"]["zds.utils.templatetags.emarkdown"] = {
- "level": "INFO",
- "handlers": ["console"],
-}
|
{"golden_diff": "diff --git a/zds/settings/travis_fixture.py b/zds/settings/travis_fixture.py\ndeleted file mode 100644\n--- a/zds/settings/travis_fixture.py\n+++ /dev/null\n@@ -1,6 +0,0 @@\n-from .ci_test import *\n-\n-LOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n- \"level\": \"INFO\",\n- \"handlers\": [\"console\"],\n-}\n", "issue": "Retirer les dernier restes de Travis\n**Description du bug**\r\n\r\nJ'ai l'impression qu'il reste quelques miettes de Travis :\r\n\r\n* https://github.com/zestedesavoir/zds-site/blob/dev/zds/settings/travis_fixture.py\r\n* https://github.com/zestedesavoir/zds-site/blob/fe854d9b006e5ca500a911c48e3b25b11154d926/scripts/define_function.sh#L13-L66\r\n\r\n**Comportement attendu**\r\n\r\nA priori, on ne se sert plus de Travis, donc tout \u00e7a devrait dispara\u00eetre.\r\n\n", "before_files": [{"content": "from .ci_test import *\n\nLOGGING[\"loggers\"][\"zds.utils.templatetags.emarkdown\"] = {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n}\n", "path": "zds/settings/travis_fixture.py"}], "after_files": [{"content": null, "path": "zds/settings/travis_fixture.py"}]}
| 463 | 102 |
gh_patches_debug_10578
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-1167
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
We should disable the remap_user_ids command for the time being
We have changes on Staging that need to make their way to Production soon.
The `remap_user_ids` command is not working properly (#1165).
We should disable the command for now so we can deploy recent changes to Production.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/management/commands/remap_user_ids.py`
Content:
```
1 from main_app.models import Source, Chant
2 from django.contrib.auth import get_user_model
3 from django.core.management.base import BaseCommand
4 from sys import stdout
5 from django.db.models.query import QuerySet
6 from typing import Optional
7
8 User = get_user_model()
9
10 USER_ID_MAPPING = {
11 # Fake user accounts with sequential numbering were created on NewCantus
12 # for OldCantus Indexers. In the time since user accounts were
13 # programmatically synced, new user accounts were created on OldCantus,
14 # which duplicated these IDs. Then, we manually created new user accounts
15 # on NewCantus for these newer users, with new IDs that don't match those
16 # in OldCantus.
17 #
18 # In this dictionary:
19 # - Keys represent the IDs of users recently created on OldCantus, which collide
20 # with those of NewCantus Indexers
21 # - Values represent the IDs of manually-created users in NewCantus.
22 251610: 251660,
23 251611: 251661,
24 251612: 251662,
25 251613: 251663,
26 251614: 251664,
27 251616: 251665,
28 251617: 251666,
29 251618: 251667,
30 251619: 251668,
31 251620: 251669,
32 251621: 251670,
33 251622: 251671,
34 251623: 251672,
35 251624: 251673,
36 251625: 251674,
37 251626: 251657,
38 251627: 251675,
39 251630: 251676,
40 251632: 251678,
41 251633: 251679,
42 251638: 251656,
43 251639: 251680,
44 251640: 251681,
45 251641: 251682,
46 251642: 251683,
47 251643: 251684,
48 251645: 251685,
49 }
50
51
52 def reassign_sources() -> None:
53 CHUNK_SIZE = 1_000
54 sources: QuerySet[Source] = Source.objects.all()
55 sources_count: int = sources.count()
56 start_index: int = 0
57 while start_index <= sources_count:
58 stdout.write(f"processing chunk with {start_index=}\n")
59 chunk: QuerySet[Source] = sources[start_index : start_index + CHUNK_SIZE]
60 for source in chunk:
61 old_creator: Optional[User] = source.created_by
62
63 updated_id: Optional[int] = None
64 try:
65 updated_id: int = USER_ID_MAPPING[old_creator.id]
66 except (
67 KeyError, # old_creator.id not in USER_ID_MAPPING
68 AttributeError, # old_creator is None
69 ):
70 pass
71
72 if updated_id is None:
73 # user ID doesn't need to be remapped
74 continue
75
76 updated_creator: Optional[User] = None
77 try:
78 updated_creator = User.objects.get(id=updated_id)
79 except (
80 User.DoesNotExist,
81 AttributeError,
82 ):
83 pass
84
85 source.created_by = updated_creator
86 source.save()
87 start_index += CHUNK_SIZE
88
89
90 def reassign_chants() -> None:
91 CHUNK_SIZE = 1_000
92 chants: QuerySet[Chant] = Chant.objects.all()
93 chants_count: int = chants.count()
94 start_index: int = 0
95 while start_index <= chants_count:
96 stdout.write(f"processing chunk with {start_index=}\n")
97 chunk: QuerySet[Chant] = chants[start_index : start_index + CHUNK_SIZE]
98 for chant in chunk:
99 old_creator: Optional[User] = chant.created_by
100
101 updated_id: Optional[int] = None
102 try:
103 updated_id: int = USER_ID_MAPPING[old_creator.id]
104 except (
105 KeyError, # old_creator.id not in USER_ID_MAPPING
106 AttributeError, # old_creator is None
107 ):
108 pass
109
110 if updated_id is None:
111 # user ID doesn't need to be remapped
112 continue
113
114 updated_creator: Optional[User] = None
115 try:
116 updated_creator = User.objects.get(id=updated_id)
117 except User.DoesNotExist:
118 pass
119
120 chant.created_by = updated_creator
121 chant.save()
122 start_index += CHUNK_SIZE
123
124
125 class Command(BaseCommand):
126 def handle(self, *args, **kwargs) -> None:
127 stdout.write("\n\n==== Reassigning Sources ====\n")
128 reassign_sources()
129 stdout.write("\n== All sources successfully remapped! ==\n")
130 stdout.write("\n\n==== Reassigning Chants ====\n")
131 reassign_chants()
132 stdout.write("\n== All chants successfully remapped! ==\n")
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py
--- a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py
+++ b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py
@@ -124,6 +124,11 @@
class Command(BaseCommand):
def handle(self, *args, **kwargs) -> None:
+ error_message = (
+ "As of late November 2023, this command is not working. "
+ "It has been temporarily disabled until the bugs have been worked out."
+ )
+ raise NotImplementedError(error_message)
stdout.write("\n\n==== Reassigning Sources ====\n")
reassign_sources()
stdout.write("\n== All sources successfully remapped! ==\n")
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py\n--- a/django/cantusdb_project/main_app/management/commands/remap_user_ids.py\n+++ b/django/cantusdb_project/main_app/management/commands/remap_user_ids.py\n@@ -124,6 +124,11 @@\n \n class Command(BaseCommand):\n def handle(self, *args, **kwargs) -> None:\n+ error_message = (\n+ \"As of late November 2023, this command is not working. \"\n+ \"It has been temporarily disabled until the bugs have been worked out.\"\n+ )\n+ raise NotImplementedError(error_message)\n stdout.write(\"\\n\\n==== Reassigning Sources ====\\n\")\n reassign_sources()\n stdout.write(\"\\n== All sources successfully remapped! ==\\n\")\n", "issue": "We should disable the remap_user_ids command for the time being\nWe have changes on Staging that need to make their way to Production soon.\r\n\r\nThe `remap_user_ids` command is not working properly (#1165).\r\n\r\nWe should disable the command for now so we can deploy recent changes to Production.\n", "before_files": [{"content": "from main_app.models import Source, Chant\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom sys import stdout\nfrom django.db.models.query import QuerySet\nfrom typing import Optional\n\nUser = get_user_model()\n\nUSER_ID_MAPPING = {\n # Fake user accounts with sequential numbering were created on NewCantus\n # for OldCantus Indexers. In the time since user accounts were\n # programmatically synced, new user accounts were created on OldCantus,\n # which duplicated these IDs. Then, we manually created new user accounts\n # on NewCantus for these newer users, with new IDs that don't match those\n # in OldCantus.\n #\n # In this dictionary:\n # - Keys represent the IDs of users recently created on OldCantus, which collide\n # with those of NewCantus Indexers\n # - Values represent the IDs of manually-created users in NewCantus.\n 251610: 251660,\n 251611: 251661,\n 251612: 251662,\n 251613: 251663,\n 251614: 251664,\n 251616: 251665,\n 251617: 251666,\n 251618: 251667,\n 251619: 251668,\n 251620: 251669,\n 251621: 251670,\n 251622: 251671,\n 251623: 251672,\n 251624: 251673,\n 251625: 251674,\n 251626: 251657,\n 251627: 251675,\n 251630: 251676,\n 251632: 251678,\n 251633: 251679,\n 251638: 251656,\n 251639: 251680,\n 251640: 251681,\n 251641: 251682,\n 251642: 251683,\n 251643: 251684,\n 251645: 251685,\n}\n\n\ndef reassign_sources() -> None:\n CHUNK_SIZE = 1_000\n sources: QuerySet[Source] = Source.objects.all()\n sources_count: int = sources.count()\n start_index: int = 0\n while start_index <= sources_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Source] = sources[start_index : start_index + CHUNK_SIZE]\n for source in chunk:\n old_creator: Optional[User] = source.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except (\n User.DoesNotExist,\n AttributeError,\n ):\n pass\n\n source.created_by = updated_creator\n source.save()\n start_index += CHUNK_SIZE\n\n\ndef reassign_chants() -> None:\n CHUNK_SIZE = 1_000\n chants: QuerySet[Chant] = Chant.objects.all()\n chants_count: int = chants.count()\n start_index: int = 0\n while start_index <= chants_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Chant] = chants[start_index : start_index + CHUNK_SIZE]\n for chant in chunk:\n old_creator: Optional[User] = chant.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except User.DoesNotExist:\n pass\n\n chant.created_by = updated_creator\n chant.save()\n start_index += CHUNK_SIZE\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs) -> None:\n stdout.write(\"\\n\\n==== Reassigning Sources ====\\n\")\n reassign_sources()\n stdout.write(\"\\n== All sources successfully remapped! ==\\n\")\n stdout.write(\"\\n\\n==== Reassigning Chants ====\\n\")\n reassign_chants()\n stdout.write(\"\\n== All chants successfully remapped! ==\\n\")\n", "path": "django/cantusdb_project/main_app/management/commands/remap_user_ids.py"}], "after_files": [{"content": "from main_app.models import Source, Chant\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\nfrom sys import stdout\nfrom django.db.models.query import QuerySet\nfrom typing import Optional\n\nUser = get_user_model()\n\nUSER_ID_MAPPING = {\n # Fake user accounts with sequential numbering were created on NewCantus\n # for OldCantus Indexers. In the time since user accounts were\n # programmatically synced, new user accounts were created on OldCantus,\n # which duplicated these IDs. Then, we manually created new user accounts\n # on NewCantus for these newer users, with new IDs that don't match those\n # in OldCantus.\n #\n # In this dictionary:\n # - Keys represent the IDs of users recently created on OldCantus, which collide\n # with those of NewCantus Indexers\n # - Values represent the IDs of manually-created users in NewCantus.\n 251610: 251660,\n 251611: 251661,\n 251612: 251662,\n 251613: 251663,\n 251614: 251664,\n 251616: 251665,\n 251617: 251666,\n 251618: 251667,\n 251619: 251668,\n 251620: 251669,\n 251621: 251670,\n 251622: 251671,\n 251623: 251672,\n 251624: 251673,\n 251625: 251674,\n 251626: 251657,\n 251627: 251675,\n 251630: 251676,\n 251632: 251678,\n 251633: 251679,\n 251638: 251656,\n 251639: 251680,\n 251640: 251681,\n 251641: 251682,\n 251642: 251683,\n 251643: 251684,\n 251645: 251685,\n}\n\n\ndef reassign_sources() -> None:\n CHUNK_SIZE = 1_000\n sources: QuerySet[Source] = Source.objects.all()\n sources_count: int = sources.count()\n start_index: int = 0\n while start_index <= sources_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Source] = sources[start_index : start_index + CHUNK_SIZE]\n for source in chunk:\n old_creator: Optional[User] = source.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except (\n User.DoesNotExist,\n AttributeError,\n ):\n pass\n\n source.created_by = updated_creator\n source.save()\n start_index += CHUNK_SIZE\n\n\ndef reassign_chants() -> None:\n CHUNK_SIZE = 1_000\n chants: QuerySet[Chant] = Chant.objects.all()\n chants_count: int = chants.count()\n start_index: int = 0\n while start_index <= chants_count:\n stdout.write(f\"processing chunk with {start_index=}\\n\")\n chunk: QuerySet[Chant] = chants[start_index : start_index + CHUNK_SIZE]\n for chant in chunk:\n old_creator: Optional[User] = chant.created_by\n\n updated_id: Optional[int] = None\n try:\n updated_id: int = USER_ID_MAPPING[old_creator.id]\n except (\n KeyError, # old_creator.id not in USER_ID_MAPPING\n AttributeError, # old_creator is None\n ):\n pass\n\n if updated_id is None:\n # user ID doesn't need to be remapped\n continue\n\n updated_creator: Optional[User] = None\n try:\n updated_creator = User.objects.get(id=updated_id)\n except User.DoesNotExist:\n pass\n\n chant.created_by = updated_creator\n chant.save()\n start_index += CHUNK_SIZE\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs) -> None:\n error_message = (\n \"As of late November 2023, this command is not working. \"\n \"It has been temporarily disabled until the bugs have been worked out.\"\n )\n raise NotImplementedError(error_message)\n stdout.write(\"\\n\\n==== Reassigning Sources ====\\n\")\n reassign_sources()\n stdout.write(\"\\n== All sources successfully remapped! ==\\n\")\n stdout.write(\"\\n\\n==== Reassigning Chants ====\\n\")\n reassign_chants()\n stdout.write(\"\\n== All chants successfully remapped! ==\\n\")\n", "path": "django/cantusdb_project/main_app/management/commands/remap_user_ids.py"}]}
| 1,909 | 207 |
gh_patches_debug_5603
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-15611
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Empty search entries are being created for device asset tags
### Deployment Type
NetBox Cloud
### NetBox Version
v3.7.4
### Python Version
3.11
### Steps to Reproduce
1. Create a new device and note its database ID
2. In the NetBox shell, inspect all search entries associated with it:
```python
ct = ContentType.objects.get_for_model(Device)
device_id = 107
entries = CachedValue.objects.filter(object_type=ct, object_id=device_id)
for entry in entries:
print(f'{entry.field}: {entry.value}')
```
### Expected Behavior
Only fields which have a meaningful value set should have search entries created.
### Observed Behavior
After creating a device with a description, I see three entries for it:
```
asset_tag: None
name: device1
description: asdasdasd
```
The value of `asset_tag` is null.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/netbox/search/__init__.py`
Content:
```
1 from collections import namedtuple
2
3 from django.db import models
4
5 from ipam.fields import IPAddressField, IPNetworkField
6 from netbox.registry import registry
7
8 ObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))
9
10
11 class FieldTypes:
12 FLOAT = 'float'
13 INTEGER = 'int'
14 STRING = 'str'
15 INET = 'inet'
16 CIDR = 'cidr'
17
18
19 class LookupTypes:
20 PARTIAL = 'icontains'
21 EXACT = 'iexact'
22 STARTSWITH = 'istartswith'
23 ENDSWITH = 'iendswith'
24 REGEX = 'iregex'
25
26
27 class SearchIndex:
28 """
29 Base class for building search indexes.
30
31 Attributes:
32 model: The model class for which this index is used.
33 category: The label of the group under which this indexer is categorized (for form field display). If none,
34 the name of the model's app will be used.
35 fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.
36 display_attrs: An iterable of additional object attributes to include when displaying search results.
37 """
38 model = None
39 category = None
40 fields = ()
41 display_attrs = ()
42
43 @staticmethod
44 def get_field_type(instance, field_name):
45 """
46 Return the data type of the specified model field.
47 """
48 field_cls = instance._meta.get_field(field_name).__class__
49 if issubclass(field_cls, (models.FloatField, models.DecimalField)):
50 return FieldTypes.FLOAT
51 if issubclass(field_cls, IPAddressField):
52 return FieldTypes.INET
53 if issubclass(field_cls, IPNetworkField):
54 return FieldTypes.CIDR
55 if issubclass(field_cls, models.IntegerField):
56 return FieldTypes.INTEGER
57 return FieldTypes.STRING
58
59 @staticmethod
60 def get_field_value(instance, field_name):
61 """
62 Return the value of the specified model field as a string.
63 """
64 return str(getattr(instance, field_name))
65
66 @classmethod
67 def get_category(cls):
68 return cls.category or cls.model._meta.app_config.verbose_name
69
70 @classmethod
71 def to_cache(cls, instance, custom_fields=None):
72 """
73 Return a list of ObjectFieldValue representing the instance fields to be cached.
74
75 Args:
76 instance: The instance being cached.
77 custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields
78 defined for the model will be included. (This can also be provided during bulk caching to avoid looking
79 up the available custom fields for each instance.)
80 """
81 values = []
82
83 # Capture built-in fields
84 for name, weight in cls.fields:
85 type_ = cls.get_field_type(instance, name)
86 value = cls.get_field_value(instance, name)
87 if type_ and value:
88 values.append(
89 ObjectFieldValue(name, type_, weight, value)
90 )
91
92 # Capture custom fields
93 if getattr(instance, 'custom_field_data', None):
94 if custom_fields is None:
95 custom_fields = instance.custom_fields
96 for cf in custom_fields:
97 type_ = cf.search_type
98 value = instance.custom_field_data.get(cf.name)
99 weight = cf.search_weight
100 if type_ and value and weight:
101 values.append(
102 ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)
103 )
104
105 return values
106
107
108 def get_indexer(model):
109 """
110 Get the SearchIndex class for the given model.
111 """
112 label = f'{model._meta.app_label}.{model._meta.model_name}'
113
114 return registry['search'][label]
115
116
117 def register_search(cls):
118 """
119 Decorator for registering a SearchIndex class.
120 """
121 model = cls.model
122 label = f'{model._meta.app_label}.{model._meta.model_name}'
123 registry['search'][label] = cls
124
125 return cls
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/netbox/netbox/search/__init__.py b/netbox/netbox/search/__init__.py
--- a/netbox/netbox/search/__init__.py
+++ b/netbox/netbox/search/__init__.py
@@ -59,9 +59,10 @@
@staticmethod
def get_field_value(instance, field_name):
"""
- Return the value of the specified model field as a string.
+ Return the value of the specified model field as a string (or None).
"""
- return str(getattr(instance, field_name))
+ if value := getattr(instance, field_name):
+ return str(value)
@classmethod
def get_category(cls):
|
{"golden_diff": "diff --git a/netbox/netbox/search/__init__.py b/netbox/netbox/search/__init__.py\n--- a/netbox/netbox/search/__init__.py\n+++ b/netbox/netbox/search/__init__.py\n@@ -59,9 +59,10 @@\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n- Return the value of the specified model field as a string.\n+ Return the value of the specified model field as a string (or None).\n \"\"\"\n- return str(getattr(instance, field_name))\n+ if value := getattr(instance, field_name):\n+ return str(value)\n \n @classmethod\n def get_category(cls):\n", "issue": "Empty search entries are being created for device asset tags\n### Deployment Type\n\nNetBox Cloud\n\n### NetBox Version\n\nv3.7.4\n\n### Python Version\n\n3.11\n\n### Steps to Reproduce\n\n1. Create a new device and note its database ID\r\n2. In the NetBox shell, inspect all search entries associated with it:\r\n\r\n```python\r\nct = ContentType.objects.get_for_model(Device)\r\ndevice_id = 107\r\nentries = CachedValue.objects.filter(object_type=ct, object_id=device_id)\r\nfor entry in entries:\r\n print(f'{entry.field}: {entry.value}')\r\n```\n\n### Expected Behavior\n\nOnly fields which have a meaningful value set should have search entries created.\n\n### Observed Behavior\n\nAfter creating a device with a description, I see three entries for it:\r\n\r\n```\r\nasset_tag: None\r\nname: device1\r\ndescription: asdasdasd\r\n```\r\n\r\nThe value of `asset_tag` is null.\n", "before_files": [{"content": "from collections import namedtuple\n\nfrom django.db import models\n\nfrom ipam.fields import IPAddressField, IPNetworkField\nfrom netbox.registry import registry\n\nObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))\n\n\nclass FieldTypes:\n FLOAT = 'float'\n INTEGER = 'int'\n STRING = 'str'\n INET = 'inet'\n CIDR = 'cidr'\n\n\nclass LookupTypes:\n PARTIAL = 'icontains'\n EXACT = 'iexact'\n STARTSWITH = 'istartswith'\n ENDSWITH = 'iendswith'\n REGEX = 'iregex'\n\n\nclass SearchIndex:\n \"\"\"\n Base class for building search indexes.\n\n Attributes:\n model: The model class for which this index is used.\n category: The label of the group under which this indexer is categorized (for form field display). If none,\n the name of the model's app will be used.\n fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.\n display_attrs: An iterable of additional object attributes to include when displaying search results.\n \"\"\"\n model = None\n category = None\n fields = ()\n display_attrs = ()\n\n @staticmethod\n def get_field_type(instance, field_name):\n \"\"\"\n Return the data type of the specified model field.\n \"\"\"\n field_cls = instance._meta.get_field(field_name).__class__\n if issubclass(field_cls, (models.FloatField, models.DecimalField)):\n return FieldTypes.FLOAT\n if issubclass(field_cls, IPAddressField):\n return FieldTypes.INET\n if issubclass(field_cls, IPNetworkField):\n return FieldTypes.CIDR\n if issubclass(field_cls, models.IntegerField):\n return FieldTypes.INTEGER\n return FieldTypes.STRING\n\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n Return the value of the specified model field as a string.\n \"\"\"\n return str(getattr(instance, field_name))\n\n @classmethod\n def get_category(cls):\n return cls.category or cls.model._meta.app_config.verbose_name\n\n @classmethod\n def to_cache(cls, instance, custom_fields=None):\n \"\"\"\n Return a list of ObjectFieldValue representing the instance fields to be cached.\n\n Args:\n instance: The instance being cached.\n custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields\n defined for the model will be included. (This can also be provided during bulk caching to avoid looking\n up the available custom fields for each instance.)\n \"\"\"\n values = []\n\n # Capture built-in fields\n for name, weight in cls.fields:\n type_ = cls.get_field_type(instance, name)\n value = cls.get_field_value(instance, name)\n if type_ and value:\n values.append(\n ObjectFieldValue(name, type_, weight, value)\n )\n\n # Capture custom fields\n if getattr(instance, 'custom_field_data', None):\n if custom_fields is None:\n custom_fields = instance.custom_fields\n for cf in custom_fields:\n type_ = cf.search_type\n value = instance.custom_field_data.get(cf.name)\n weight = cf.search_weight\n if type_ and value and weight:\n values.append(\n ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)\n )\n\n return values\n\n\ndef get_indexer(model):\n \"\"\"\n Get the SearchIndex class for the given model.\n \"\"\"\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n\n return registry['search'][label]\n\n\ndef register_search(cls):\n \"\"\"\n Decorator for registering a SearchIndex class.\n \"\"\"\n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls\n", "path": "netbox/netbox/search/__init__.py"}], "after_files": [{"content": "from collections import namedtuple\n\nfrom django.db import models\n\nfrom ipam.fields import IPAddressField, IPNetworkField\nfrom netbox.registry import registry\n\nObjectFieldValue = namedtuple('ObjectFieldValue', ('name', 'type', 'weight', 'value'))\n\n\nclass FieldTypes:\n FLOAT = 'float'\n INTEGER = 'int'\n STRING = 'str'\n INET = 'inet'\n CIDR = 'cidr'\n\n\nclass LookupTypes:\n PARTIAL = 'icontains'\n EXACT = 'iexact'\n STARTSWITH = 'istartswith'\n ENDSWITH = 'iendswith'\n REGEX = 'iregex'\n\n\nclass SearchIndex:\n \"\"\"\n Base class for building search indexes.\n\n Attributes:\n model: The model class for which this index is used.\n category: The label of the group under which this indexer is categorized (for form field display). If none,\n the name of the model's app will be used.\n fields: An iterable of two-tuples defining the model fields to be indexed and the weight associated with each.\n display_attrs: An iterable of additional object attributes to include when displaying search results.\n \"\"\"\n model = None\n category = None\n fields = ()\n display_attrs = ()\n\n @staticmethod\n def get_field_type(instance, field_name):\n \"\"\"\n Return the data type of the specified model field.\n \"\"\"\n field_cls = instance._meta.get_field(field_name).__class__\n if issubclass(field_cls, (models.FloatField, models.DecimalField)):\n return FieldTypes.FLOAT\n if issubclass(field_cls, IPAddressField):\n return FieldTypes.INET\n if issubclass(field_cls, IPNetworkField):\n return FieldTypes.CIDR\n if issubclass(field_cls, models.IntegerField):\n return FieldTypes.INTEGER\n return FieldTypes.STRING\n\n @staticmethod\n def get_field_value(instance, field_name):\n \"\"\"\n Return the value of the specified model field as a string (or None).\n \"\"\"\n if value := getattr(instance, field_name):\n return str(value)\n\n @classmethod\n def get_category(cls):\n return cls.category or cls.model._meta.app_config.verbose_name\n\n @classmethod\n def to_cache(cls, instance, custom_fields=None):\n \"\"\"\n Return a list of ObjectFieldValue representing the instance fields to be cached.\n\n Args:\n instance: The instance being cached.\n custom_fields: An iterable of CustomFields to include when caching the instance. If None, all custom fields\n defined for the model will be included. (This can also be provided during bulk caching to avoid looking\n up the available custom fields for each instance.)\n \"\"\"\n values = []\n\n # Capture built-in fields\n for name, weight in cls.fields:\n type_ = cls.get_field_type(instance, name)\n value = cls.get_field_value(instance, name)\n if type_ and value:\n values.append(\n ObjectFieldValue(name, type_, weight, value)\n )\n\n # Capture custom fields\n if getattr(instance, 'custom_field_data', None):\n if custom_fields is None:\n custom_fields = instance.custom_fields\n for cf in custom_fields:\n type_ = cf.search_type\n value = instance.custom_field_data.get(cf.name)\n weight = cf.search_weight\n if type_ and value and weight:\n values.append(\n ObjectFieldValue(f'cf_{cf.name}', type_, weight, value)\n )\n\n return values\n\n\ndef get_indexer(model):\n \"\"\"\n Get the SearchIndex class for the given model.\n \"\"\"\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n\n return registry['search'][label]\n\n\ndef register_search(cls):\n \"\"\"\n Decorator for registering a SearchIndex class.\n \"\"\"\n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls\n", "path": "netbox/netbox/search/__init__.py"}]}
| 1,573 | 149 |
gh_patches_debug_11540
|
rasdani/github-patches
|
git_diff
|
plotly__dash-1493
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] dash doesn't get imported when a file named "org.py", "dash.py", or "test.py" with specific content is present in the current directory // "AttributeError: module 'dash' has no attribute 'Dash'"
**Describe your context**
```
dash (1.9.1)
dash-core-components (1.8.1)
dash-html-components (1.0.2)
dash-renderer (1.2.4)
dash-table (4.6.1)
```
**Describe the bug**
If a file named ``org.py`` is present in the current directory with the following content:
```
import dash_core_components as dcc
```
then dash doesn't import and I get the following message:
```
>>> import dash
Dash was not successfully imported. Make sure you don't have a file named
'dash.py' in your current directory.
```
**Expected behavior**
dash should import without any error.
**Additional info**
- The org.py is never imported
- If I rename the file to a different name dash get imported without any problem.
- The problem is shown also with ``import dash_html_components as html```
- The problem is shown either on Windows and in Linux
- Tested with python3.4, python3.6, python3.8
**Steps to replicate the problem on Linux**
```
$ mkdir mytest
$ cd mytest
$ echo "import dash_core_components as dcc" > org.py
$ python3 -m venv venv
$ . venv/bin/activate
(venv) $ pip install dash
(venv) $ python
Python 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import dash
Dash was not successfully imported. Make sure you don't have a file named
'dash.py' in your current directory.
(venv) $
```
if I rename the file the import works:
```
(venv) $ mv org.py othername.py
(venv) $ python
Python 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import dash
>>>
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/__init__.py`
Content:
```
1 from .dash import Dash, no_update # noqa: F401
2 from . import dependencies # noqa: F401
3 from . import development # noqa: F401
4 from . import exceptions # noqa: F401
5 from . import resources # noqa: F401
6 from .version import __version__ # noqa: F401
7 from ._callback_context import callback_context # noqa: F401
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dash/__init__.py b/dash/__init__.py
--- a/dash/__init__.py
+++ b/dash/__init__.py
@@ -1,7 +1,11 @@
-from .dash import Dash, no_update # noqa: F401
-from . import dependencies # noqa: F401
-from . import development # noqa: F401
-from . import exceptions # noqa: F401
-from . import resources # noqa: F401
-from .version import __version__ # noqa: F401
-from ._callback_context import callback_context # noqa: F401
+# pylint: disable=C0413
+# __plotly_dash is for the "make sure you don't have a dash.py" check
+# must come before any other imports.
+__plotly_dash = True
+from .dash import Dash, no_update # noqa: F401,E402
+from . import dependencies # noqa: F401,E402
+from . import development # noqa: F401,E402
+from . import exceptions # noqa: F401,E402
+from . import resources # noqa: F401,E402
+from .version import __version__ # noqa: F401,E402
+from ._callback_context import callback_context # noqa: F401,E402
|
{"golden_diff": "diff --git a/dash/__init__.py b/dash/__init__.py\n--- a/dash/__init__.py\n+++ b/dash/__init__.py\n@@ -1,7 +1,11 @@\n-from .dash import Dash, no_update # noqa: F401\n-from . import dependencies # noqa: F401\n-from . import development # noqa: F401\n-from . import exceptions # noqa: F401\n-from . import resources # noqa: F401\n-from .version import __version__ # noqa: F401\n-from ._callback_context import callback_context # noqa: F401\n+# pylint: disable=C0413\n+# __plotly_dash is for the \"make sure you don't have a dash.py\" check\n+# must come before any other imports.\n+__plotly_dash = True\n+from .dash import Dash, no_update # noqa: F401,E402\n+from . import dependencies # noqa: F401,E402\n+from . import development # noqa: F401,E402\n+from . import exceptions # noqa: F401,E402\n+from . import resources # noqa: F401,E402\n+from .version import __version__ # noqa: F401,E402\n+from ._callback_context import callback_context # noqa: F401,E402\n", "issue": "[BUG] dash doesn't get imported when a file named \"org.py\", \"dash.py\", or \"test.py\" with specific content is present in the current directory // \"AttributeError: module 'dash' has no attribute 'Dash'\"\n**Describe your context**\r\n\r\n```\r\ndash (1.9.1)\r\ndash-core-components (1.8.1)\r\ndash-html-components (1.0.2)\r\ndash-renderer (1.2.4)\r\ndash-table (4.6.1)\r\n\r\n```\r\n\r\n**Describe the bug**\r\n\r\nIf a file named ``org.py`` is present in the current directory with the following content:\r\n\r\n```\r\nimport dash_core_components as dcc\r\n```\r\n\r\nthen dash doesn't import and I get the following message:\r\n```\r\n>>> import dash\r\nDash was not successfully imported. Make sure you don't have a file named\r\n'dash.py' in your current directory.\r\n```\r\n\r\n**Expected behavior**\r\ndash should import without any error.\r\n\r\n**Additional info**\r\n- The org.py is never imported\r\n- If I rename the file to a different name dash get imported without any problem.\r\n- The problem is shown also with ``import dash_html_components as html```\r\n- The problem is shown either on Windows and in Linux\r\n- Tested with python3.4, python3.6, python3.8\r\n\r\n**Steps to replicate the problem on Linux**\r\n```\r\n$ mkdir mytest\r\n$ cd mytest\r\n$ echo \"import dash_core_components as dcc\" > org.py\r\n$ python3 -m venv venv\r\n$ . venv/bin/activate\r\n(venv) $ pip install dash\r\n(venv) $ python\r\nPython 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import dash\r\nDash was not successfully imported. Make sure you don't have a file named\r\n'dash.py' in your current directory.\r\n(venv) $\r\n```\r\n\r\nif I rename the file the import works:\r\n```\r\n(venv) $ mv org.py othername.py\r\n(venv) $ python\r\nPython 3.4.6 (default, Mar 01 2017, 16:52:22) [GCC] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import dash\r\n>>>\r\n```\r\n\n", "before_files": [{"content": "from .dash import Dash, no_update # noqa: F401\nfrom . import dependencies # noqa: F401\nfrom . import development # noqa: F401\nfrom . import exceptions # noqa: F401\nfrom . import resources # noqa: F401\nfrom .version import __version__ # noqa: F401\nfrom ._callback_context import callback_context # noqa: F401\n", "path": "dash/__init__.py"}], "after_files": [{"content": "# pylint: disable=C0413\n# __plotly_dash is for the \"make sure you don't have a dash.py\" check\n# must come before any other imports.\n__plotly_dash = True\nfrom .dash import Dash, no_update # noqa: F401,E402\nfrom . import dependencies # noqa: F401,E402\nfrom . import development # noqa: F401,E402\nfrom . import exceptions # noqa: F401,E402\nfrom . import resources # noqa: F401,E402\nfrom .version import __version__ # noqa: F401,E402\nfrom ._callback_context import callback_context # noqa: F401,E402\n", "path": "dash/__init__.py"}]}
| 893 | 331 |
gh_patches_debug_25606
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-521
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: all_members_are_administrators fails
I don;t know if telegram api changed but the parameter to tell if all administrators in a group are admin has changed: to `all_members_are_administrators` Chat's fail to update with this parameter
### Steps to reproduce
1. Create a group with "all members are administrators enabled
2. Add a bot to it
3. send the bot a message
4. ` assert print(update.message.chat.all_members_are_admins)==True`
### Expected behaviour
It should pass the assert
### Actual behaviour
This failes the assert
### Way to fix
rename to `all_members_are_administrators`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/chat.py`
Content:
```
1 #!/usr/bin/env python
2 # pylint: disable=C0103,W0622
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2016
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram Chat."""
21
22 from telegram import TelegramObject
23
24
25 class Chat(TelegramObject):
26 """This object represents a Telegram Chat.
27
28 Attributes:
29 id (int):
30 type (str): Can be 'private', 'group', 'supergroup' or 'channel'
31 title (str): Title, for channels and group chats
32 username (str): Username, for private chats and channels if available
33 first_name (str): First name of the other party in a private chat
34 last_name (str): Last name of the other party in a private chat
35 all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.
36
37 Args:
38 id (int):
39 type (str):
40 title (Optional[str]):
41 username(Optional[str]):
42 first_name(Optional[str]):
43 last_name(Optional[str]):
44 bot (Optional[Bot]): The Bot to use for instance methods
45 **kwargs (dict): Arbitrary keyword arguments.
46
47 """
48 PRIVATE = 'private'
49 GROUP = 'group'
50 SUPERGROUP = 'supergroup'
51 CHANNEL = 'channel'
52
53 def __init__(self,
54 id,
55 type,
56 title='',
57 username='',
58 first_name='',
59 last_name='',
60 all_members_are_admins=False,
61 bot=None,
62 **kwargs):
63 # Required
64 self.id = int(id)
65 self.type = type
66 # Optionals
67 self.title = title
68 self.username = username
69 self.first_name = first_name
70 self.last_name = last_name
71 self.all_members_are_admins = all_members_are_admins
72
73 self.bot = bot
74
75 @staticmethod
76 def de_json(data, bot):
77 """
78 Args:
79 data (dict):
80 bot (telegram.Bot):
81
82 Returns:
83 telegram.Chat:
84 """
85 if not data:
86 return None
87
88 return Chat(bot=bot, **data)
89
90 def send_action(self, *args, **kwargs):
91 """Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``"""
92 return self.bot.sendChatAction(self.id, *args, **kwargs)
93
94 def leave(self, *args, **kwargs):
95 """Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``"""
96 return self.bot.leaveChat(self.id, *args, **kwargs)
97
98 def get_administrators(self, *args, **kwargs):
99 """Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``"""
100 return self.bot.getChatAdministrators(self.id, *args, **kwargs)
101
102 def get_members_count(self, *args, **kwargs):
103 """Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``"""
104 return self.bot.getChatMembersCount(self.id, *args, **kwargs)
105
106 def get_member(self, *args, **kwargs):
107 """Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``"""
108 return self.bot.getChatMember(self.id, *args, **kwargs)
109
110 def kick_member(self, *args, **kwargs):
111 """Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``"""
112 return self.bot.kickChatMember(self.id, *args, **kwargs)
113
114 def unban_member(self, *args, **kwargs):
115 """Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``"""
116 return self.bot.unbanChatMember(self.id, *args, **kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram/chat.py b/telegram/chat.py
--- a/telegram/chat.py
+++ b/telegram/chat.py
@@ -32,7 +32,7 @@
username (str): Username, for private chats and channels if available
first_name (str): First name of the other party in a private chat
last_name (str): Last name of the other party in a private chat
- all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.
+ all_members_are_administrators (bool): True if group has 'All Members Are Administrators'
Args:
id (int):
@@ -57,7 +57,7 @@
username='',
first_name='',
last_name='',
- all_members_are_admins=False,
+ all_members_are_administrators=False,
bot=None,
**kwargs):
# Required
@@ -68,7 +68,7 @@
self.username = username
self.first_name = first_name
self.last_name = last_name
- self.all_members_are_admins = all_members_are_admins
+ self.all_members_are_administrators = all_members_are_administrators
self.bot = bot
|
{"golden_diff": "diff --git a/telegram/chat.py b/telegram/chat.py\n--- a/telegram/chat.py\n+++ b/telegram/chat.py\n@@ -32,7 +32,7 @@\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n- all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.\n+ all_members_are_administrators (bool): True if group has 'All Members Are Administrators'\n \n Args:\n id (int):\n@@ -57,7 +57,7 @@\n username='',\n first_name='',\n last_name='',\n- all_members_are_admins=False,\n+ all_members_are_administrators=False,\n bot=None,\n **kwargs):\n # Required\n@@ -68,7 +68,7 @@\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n- self.all_members_are_admins = all_members_are_admins\n+ self.all_members_are_administrators = all_members_are_administrators\n \n self.bot = bot\n", "issue": "BUG: all_members_are_administrators fails\nI don;t know if telegram api changed but the parameter to tell if all administrators in a group are admin has changed: to `all_members_are_administrators` Chat's fail to update with this parameter\r\n\r\n### Steps to reproduce\r\n1. Create a group with \"all members are administrators enabled\r\n2. Add a bot to it\r\n3. send the bot a message\r\n4. ` assert print(update.message.chat.all_members_are_admins)==True`\r\n\r\n### Expected behaviour\r\nIt should pass the assert\r\n\r\n### Actual behaviour\r\nThis failes the assert\r\n\r\n### Way to fix\r\nrename to `all_members_are_administrators`\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram Chat.\"\"\"\n\nfrom telegram import TelegramObject\n\n\nclass Chat(TelegramObject):\n \"\"\"This object represents a Telegram Chat.\n\n Attributes:\n id (int):\n type (str): Can be 'private', 'group', 'supergroup' or 'channel'\n title (str): Title, for channels and group chats\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n all_members_are_admins (bool): True if a group has 'All Members Are Admins' enabled.\n\n Args:\n id (int):\n type (str):\n title (Optional[str]):\n username(Optional[str]):\n first_name(Optional[str]):\n last_name(Optional[str]):\n bot (Optional[Bot]): The Bot to use for instance methods\n **kwargs (dict): Arbitrary keyword arguments.\n\n \"\"\"\n PRIVATE = 'private'\n GROUP = 'group'\n SUPERGROUP = 'supergroup'\n CHANNEL = 'channel'\n\n def __init__(self,\n id,\n type,\n title='',\n username='',\n first_name='',\n last_name='',\n all_members_are_admins=False,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.type = type\n # Optionals\n self.title = title\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.all_members_are_admins = all_members_are_admins\n\n self.bot = bot\n\n @staticmethod\n def de_json(data, bot):\n \"\"\"\n Args:\n data (dict):\n bot (telegram.Bot):\n\n Returns:\n telegram.Chat:\n \"\"\"\n if not data:\n return None\n\n return Chat(bot=bot, **data)\n\n def send_action(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.sendChatAction(self.id, *args, **kwargs)\n\n def leave(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.leaveChat(self.id, *args, **kwargs)\n\n def get_administrators(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatAdministrators(self.id, *args, **kwargs)\n\n def get_members_count(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMembersCount(self.id, *args, **kwargs)\n\n def get_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMember(self.id, *args, **kwargs)\n\n def kick_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.kickChatMember(self.id, *args, **kwargs)\n\n def unban_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.unbanChatMember(self.id, *args, **kwargs)\n", "path": "telegram/chat.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2016\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram Chat.\"\"\"\n\nfrom telegram import TelegramObject\n\n\nclass Chat(TelegramObject):\n \"\"\"This object represents a Telegram Chat.\n\n Attributes:\n id (int):\n type (str): Can be 'private', 'group', 'supergroup' or 'channel'\n title (str): Title, for channels and group chats\n username (str): Username, for private chats and channels if available\n first_name (str): First name of the other party in a private chat\n last_name (str): Last name of the other party in a private chat\n all_members_are_administrators (bool): True if group has 'All Members Are Administrators'\n\n Args:\n id (int):\n type (str):\n title (Optional[str]):\n username(Optional[str]):\n first_name(Optional[str]):\n last_name(Optional[str]):\n bot (Optional[Bot]): The Bot to use for instance methods\n **kwargs (dict): Arbitrary keyword arguments.\n\n \"\"\"\n PRIVATE = 'private'\n GROUP = 'group'\n SUPERGROUP = 'supergroup'\n CHANNEL = 'channel'\n\n def __init__(self,\n id,\n type,\n title='',\n username='',\n first_name='',\n last_name='',\n all_members_are_administrators=False,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.type = type\n # Optionals\n self.title = title\n self.username = username\n self.first_name = first_name\n self.last_name = last_name\n self.all_members_are_administrators = all_members_are_administrators\n\n self.bot = bot\n\n @staticmethod\n def de_json(data, bot):\n \"\"\"\n Args:\n data (dict):\n bot (telegram.Bot):\n\n Returns:\n telegram.Chat:\n \"\"\"\n if not data:\n return None\n\n return Chat(bot=bot, **data)\n\n def send_action(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.sendChatAction(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.sendChatAction(self.id, *args, **kwargs)\n\n def leave(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.leaveChat(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.leaveChat(self.id, *args, **kwargs)\n\n def get_administrators(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatAdministrators(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatAdministrators(self.id, *args, **kwargs)\n\n def get_members_count(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMembersCount(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMembersCount(self.id, *args, **kwargs)\n\n def get_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.getChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.getChatMember(self.id, *args, **kwargs)\n\n def kick_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.kickChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.kickChatMember(self.id, *args, **kwargs)\n\n def unban_member(self, *args, **kwargs):\n \"\"\"Shortcut for ``bot.unbanChatMember(update.message.chat.id, *args, **kwargs)``\"\"\"\n return self.bot.unbanChatMember(self.id, *args, **kwargs)\n", "path": "telegram/chat.py"}]}
| 1,635 | 270 |
gh_patches_debug_32416
|
rasdani/github-patches
|
git_diff
|
linz__geostore-1651
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use latest of each STAC extension version
### Enabler
So that we don't have to manually update the code to use the latest version, we want to automatically use the latest version available in the relevant Git submodule.
Need to check what happens when a file is submitted that references and old version of a stac schema
#### Acceptance Criteria
- [ ] Dependabot PRs for any of the STAC submodules run tests with the latest version of all the extensions in that submodule
- [ ] Add a note to the release documentation about notifying users which STAC extension versions are supported
#### Additional context
This avoids manual work like [this PR to use the latest LINZ STAC extensions](https://github.com/linz/geostore/pull/1444).
Caveat: We currently only support one version of each extension. When extensions release breaking changes this could affect our existing users, and we need to notify them.
#### Tasks
<!-- Tasks needed to complete this enabler -->
- [ ] ...
- [ ] ...
#### Definition of Ready
- [ ] This story is **ready** to work on
- [ ] Negotiable (team can decide how to design and implement)
- [ ] Valuable (from a user perspective)
- [ ] Estimate value applied (agreed by team)
- [ ] Small (so as to fit within an iteration)
- [ ] Testable (in principle, even if there isn't a test for it yet)
- [ ] Environments are ready to meet definition of done
- [ ] Resources required to implement will be ready
- [ ] Everyone understands and agrees with the tasks to complete the story
- [ ] Release value (e.g. Iteration 3) applied
- [ ] Sprint value (e.g. Aug 1 - Aug 15) applied
#### Definition of Done
- [ ] This story is **done**:
- [ ] Acceptance criteria completed
- [ ] Automated tests are passing
- [ ] Code is peer reviewed and pushed to master
- [ ] Deployed successfully to test environment
- [ ] Checked against [CODING guidelines](https://github.com/linz/geostore/blob/master/CODING.md)
- [ ] Relevant new tasks are added to backlog and communicated to the team
- [ ] Important decisions recorded in the issue ticket
- [ ] Readme/Changelog/Diagrams are updated
- [ ] Product Owner has approved acceptance criteria as complete
- [ ] Meets non-functional requirements:
- [ ] Scalability (data): Can scale to 300TB of data and 100,000,000 files and ability to
increase 10% every year
- [ ] Scability (users): Can scale to 100 concurrent users
- [ ] Cost: Data can be stored at < 0.5 NZD per GB per year
- [ ] Performance: A large dataset (500 GB and 50,000 files - e.g. Akl aerial imagery) can be
validated, imported and stored within 24 hours
- [ ] Accessibility: Can be used from LINZ networks and the public internet
- [ ] Availability: System available 24 hours a day and 7 days a week, this does not include
maintenance windows < 4 hours and does not include operational support
- [ ] Recoverability: RPO of fully imported datasets < 4 hours, RTO of a single 3 TB dataset <
12 hours
<!-- Please add one or more of these labels: 'spike', 'refactor', 'architecture', 'infrastructure', 'compliance' -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geostore/check_stac_metadata/stac_validators.py`
Content:
```
1 from functools import cached_property
2 from json import load
3 from os.path import dirname, join
4
5 from jsonschema import Draft7Validator, FormatChecker, RefResolver
6 from jsonschema._utils import URIDict
7 from jsonschema.validators import extend
8
9 from ..stac_format import LINZ_STAC_EXTENSIONS_LOCAL_PATH
10 from ..types import JsonObject
11
12
13 class Schema:
14 def __init__(self, path: str):
15 self.path = path
16
17 @cached_property
18 def as_dict(self) -> JsonObject:
19 with open(join(dirname(__file__), self.path), encoding="utf-8") as file_pointer:
20 result: JsonObject = load(file_pointer)
21 return result
22
23 @cached_property
24 def schema_id(self) -> str:
25 id_: str = self.as_dict["$id"]
26 return id_
27
28 @cached_property
29 def uri(self) -> str:
30 uri_: str = URIDict().normalize(self.schema_id)
31 return uri_
32
33
34 FILE_STAC_SCHEMA_PATH = "file/v2.0.0/schema.json"
35 PROJECTION_STAC_SCHEMA_PATH = "projection/v1.0.0/schema.json"
36 VERSION_STAC_SCHEMA_PATH = "version/v1.0.0/schema.json"
37 FILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)
38
39 STAC_VERSION = "1.0.0"
40 STAC_SPEC_PATH = f"stac-spec/v{STAC_VERSION}"
41 CATALOG_SCHEMA = Schema(f"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json")
42 LINZ_STAC_EXTENSIONS_URL_PATH = "v0.0.14"
43 LINZ_SCHEMA_URL_DIRECTORY = f"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz"
44 LINZ_SCHEMA_URL_PATH = f"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json"
45 LINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))
46 STAC_ITEM_SPEC_PATH = f"{STAC_SPEC_PATH}/item-spec/json-schema"
47 ITEM_SCHEMA = Schema(f"{STAC_ITEM_SPEC_PATH}/item.json")
48 QUALITY_SCHEMA_PATH = f"{LINZ_STAC_EXTENSIONS_URL_PATH}/quality/schema.json"
49
50 schema_store = {}
51 for schema in [
52 CATALOG_SCHEMA,
53 Schema(f"{STAC_SPEC_PATH}/collection-spec/json-schema/collection.json"),
54 FILE_SCHEMA,
55 Schema("geojson-spec/Feature.json"),
56 Schema("geojson-spec/Geometry.json"),
57 ITEM_SCHEMA,
58 Schema(f"{STAC_ITEM_SPEC_PATH}/basics.json"),
59 Schema(f"{STAC_ITEM_SPEC_PATH}/datetime.json"),
60 Schema(f"{STAC_ITEM_SPEC_PATH}/instrument.json"),
61 Schema(f"{STAC_ITEM_SPEC_PATH}/licensing.json"),
62 Schema(f"{STAC_ITEM_SPEC_PATH}/provider.json"),
63 LINZ_SCHEMA,
64 Schema(PROJECTION_STAC_SCHEMA_PATH),
65 Schema(VERSION_STAC_SCHEMA_PATH),
66 Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, QUALITY_SCHEMA_PATH)),
67 ]:
68 # Normalize URLs the same way as jsonschema does
69 schema_store[schema.uri] = schema.as_dict
70
71 BaseSTACValidator = extend(Draft7Validator)
72 BaseSTACValidator.format_checker = FormatChecker()
73
74 STACCatalogSchemaValidator = extend(BaseSTACValidator)(
75 resolver=RefResolver.from_schema(CATALOG_SCHEMA.as_dict, store=schema_store),
76 schema=CATALOG_SCHEMA.as_dict,
77 )
78
79 STACCollectionSchemaValidator = extend(BaseSTACValidator)(
80 resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),
81 schema=LINZ_SCHEMA.as_dict,
82 )
83
84 STACItemSchemaValidator = extend(BaseSTACValidator)(
85 resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),
86 schema=LINZ_SCHEMA.as_dict,
87 )
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geostore/check_stac_metadata/stac_validators.py b/geostore/check_stac_metadata/stac_validators.py
--- a/geostore/check_stac_metadata/stac_validators.py
+++ b/geostore/check_stac_metadata/stac_validators.py
@@ -1,6 +1,9 @@
-from functools import cached_property
+from distutils.version import StrictVersion
+from functools import cached_property, lru_cache
from json import load
+from os import scandir
from os.path import dirname, join
+from re import fullmatch
from jsonschema import Draft7Validator, FormatChecker, RefResolver
from jsonschema._utils import URIDict
@@ -31,15 +34,28 @@
return uri_
+@lru_cache
+def get_latest_extension_schema_version(extension_path: str) -> str:
+ directories = scandir(join(dirname(__file__), extension_path))
+ versions = []
+ for directory in directories:
+ if directory.is_dir() and fullmatch(r"v\d+\.\d+\.\d+", directory.name):
+ versions.append(directory.name[1:])
+ return sorted(versions, key=StrictVersion, reverse=True)[0]
+
+
FILE_STAC_SCHEMA_PATH = "file/v2.0.0/schema.json"
PROJECTION_STAC_SCHEMA_PATH = "projection/v1.0.0/schema.json"
VERSION_STAC_SCHEMA_PATH = "version/v1.0.0/schema.json"
FILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)
-STAC_VERSION = "1.0.0"
-STAC_SPEC_PATH = f"stac-spec/v{STAC_VERSION}"
+STAC_SPEC_EXTENSION_PATH = "stac-spec"
+STAC_VERSION = get_latest_extension_schema_version(STAC_SPEC_EXTENSION_PATH)
+STAC_SPEC_PATH = f"{STAC_SPEC_EXTENSION_PATH}/v{STAC_VERSION}"
CATALOG_SCHEMA = Schema(f"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json")
-LINZ_STAC_EXTENSIONS_URL_PATH = "v0.0.14"
+LINZ_STAC_EXTENSIONS_URL_PATH = (
+ f"v{get_latest_extension_schema_version(LINZ_STAC_EXTENSIONS_LOCAL_PATH)}"
+)
LINZ_SCHEMA_URL_DIRECTORY = f"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz"
LINZ_SCHEMA_URL_PATH = f"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json"
LINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))
|
{"golden_diff": "diff --git a/geostore/check_stac_metadata/stac_validators.py b/geostore/check_stac_metadata/stac_validators.py\n--- a/geostore/check_stac_metadata/stac_validators.py\n+++ b/geostore/check_stac_metadata/stac_validators.py\n@@ -1,6 +1,9 @@\n-from functools import cached_property\n+from distutils.version import StrictVersion\n+from functools import cached_property, lru_cache\n from json import load\n+from os import scandir\n from os.path import dirname, join\n+from re import fullmatch\n \n from jsonschema import Draft7Validator, FormatChecker, RefResolver\n from jsonschema._utils import URIDict\n@@ -31,15 +34,28 @@\n return uri_\n \n \n+@lru_cache\n+def get_latest_extension_schema_version(extension_path: str) -> str:\n+ directories = scandir(join(dirname(__file__), extension_path))\n+ versions = []\n+ for directory in directories:\n+ if directory.is_dir() and fullmatch(r\"v\\d+\\.\\d+\\.\\d+\", directory.name):\n+ versions.append(directory.name[1:])\n+ return sorted(versions, key=StrictVersion, reverse=True)[0]\n+\n+\n FILE_STAC_SCHEMA_PATH = \"file/v2.0.0/schema.json\"\n PROJECTION_STAC_SCHEMA_PATH = \"projection/v1.0.0/schema.json\"\n VERSION_STAC_SCHEMA_PATH = \"version/v1.0.0/schema.json\"\n FILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)\n \n-STAC_VERSION = \"1.0.0\"\n-STAC_SPEC_PATH = f\"stac-spec/v{STAC_VERSION}\"\n+STAC_SPEC_EXTENSION_PATH = \"stac-spec\"\n+STAC_VERSION = get_latest_extension_schema_version(STAC_SPEC_EXTENSION_PATH)\n+STAC_SPEC_PATH = f\"{STAC_SPEC_EXTENSION_PATH}/v{STAC_VERSION}\"\n CATALOG_SCHEMA = Schema(f\"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json\")\n-LINZ_STAC_EXTENSIONS_URL_PATH = \"v0.0.14\"\n+LINZ_STAC_EXTENSIONS_URL_PATH = (\n+ f\"v{get_latest_extension_schema_version(LINZ_STAC_EXTENSIONS_LOCAL_PATH)}\"\n+)\n LINZ_SCHEMA_URL_DIRECTORY = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz\"\n LINZ_SCHEMA_URL_PATH = f\"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json\"\n LINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))\n", "issue": "Use latest of each STAC extension version\n### Enabler\r\n\r\nSo that we don't have to manually update the code to use the latest version, we want to automatically use the latest version available in the relevant Git submodule.\r\n\r\nNeed to check what happens when a file is submitted that references and old version of a stac schema\r\n\r\n#### Acceptance Criteria\r\n\r\n- [ ] Dependabot PRs for any of the STAC submodules run tests with the latest version of all the extensions in that submodule\r\n- [ ] Add a note to the release documentation about notifying users which STAC extension versions are supported\r\n\r\n#### Additional context\r\n\r\nThis avoids manual work like [this PR to use the latest LINZ STAC extensions](https://github.com/linz/geostore/pull/1444).\r\n\r\nCaveat: We currently only support one version of each extension. When extensions release breaking changes this could affect our existing users, and we need to notify them.\r\n\r\n#### Tasks\r\n\r\n<!-- Tasks needed to complete this enabler -->\r\n\r\n- [ ] ...\r\n- [ ] ...\r\n\r\n#### Definition of Ready\r\n\r\n- [ ] This story is **ready** to work on\r\n - [ ] Negotiable (team can decide how to design and implement)\r\n - [ ] Valuable (from a user perspective)\r\n - [ ] Estimate value applied (agreed by team)\r\n - [ ] Small (so as to fit within an iteration)\r\n - [ ] Testable (in principle, even if there isn't a test for it yet)\r\n - [ ] Environments are ready to meet definition of done\r\n - [ ] Resources required to implement will be ready\r\n - [ ] Everyone understands and agrees with the tasks to complete the story\r\n - [ ] Release value (e.g. Iteration 3) applied\r\n - [ ] Sprint value (e.g. Aug 1 - Aug 15) applied\r\n\r\n#### Definition of Done\r\n\r\n- [ ] This story is **done**:\r\n - [ ] Acceptance criteria completed\r\n - [ ] Automated tests are passing\r\n - [ ] Code is peer reviewed and pushed to master\r\n - [ ] Deployed successfully to test environment\r\n - [ ] Checked against [CODING guidelines](https://github.com/linz/geostore/blob/master/CODING.md)\r\n - [ ] Relevant new tasks are added to backlog and communicated to the team\r\n - [ ] Important decisions recorded in the issue ticket\r\n - [ ] Readme/Changelog/Diagrams are updated\r\n - [ ] Product Owner has approved acceptance criteria as complete\r\n - [ ] Meets non-functional requirements:\r\n - [ ] Scalability (data): Can scale to 300TB of data and 100,000,000 files and ability to\r\n increase 10% every year\r\n - [ ] Scability (users): Can scale to 100 concurrent users\r\n - [ ] Cost: Data can be stored at < 0.5 NZD per GB per year\r\n - [ ] Performance: A large dataset (500 GB and 50,000 files - e.g. Akl aerial imagery) can be\r\n validated, imported and stored within 24 hours\r\n - [ ] Accessibility: Can be used from LINZ networks and the public internet\r\n - [ ] Availability: System available 24 hours a day and 7 days a week, this does not include\r\n maintenance windows < 4 hours and does not include operational support\r\n - [ ] Recoverability: RPO of fully imported datasets < 4 hours, RTO of a single 3 TB dataset <\r\n 12 hours\r\n\r\n<!-- Please add one or more of these labels: 'spike', 'refactor', 'architecture', 'infrastructure', 'compliance' -->\r\n\n", "before_files": [{"content": "from functools import cached_property\nfrom json import load\nfrom os.path import dirname, join\n\nfrom jsonschema import Draft7Validator, FormatChecker, RefResolver\nfrom jsonschema._utils import URIDict\nfrom jsonschema.validators import extend\n\nfrom ..stac_format import LINZ_STAC_EXTENSIONS_LOCAL_PATH\nfrom ..types import JsonObject\n\n\nclass Schema:\n def __init__(self, path: str):\n self.path = path\n\n @cached_property\n def as_dict(self) -> JsonObject:\n with open(join(dirname(__file__), self.path), encoding=\"utf-8\") as file_pointer:\n result: JsonObject = load(file_pointer)\n return result\n\n @cached_property\n def schema_id(self) -> str:\n id_: str = self.as_dict[\"$id\"]\n return id_\n\n @cached_property\n def uri(self) -> str:\n uri_: str = URIDict().normalize(self.schema_id)\n return uri_\n\n\nFILE_STAC_SCHEMA_PATH = \"file/v2.0.0/schema.json\"\nPROJECTION_STAC_SCHEMA_PATH = \"projection/v1.0.0/schema.json\"\nVERSION_STAC_SCHEMA_PATH = \"version/v1.0.0/schema.json\"\nFILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)\n\nSTAC_VERSION = \"1.0.0\"\nSTAC_SPEC_PATH = f\"stac-spec/v{STAC_VERSION}\"\nCATALOG_SCHEMA = Schema(f\"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json\")\nLINZ_STAC_EXTENSIONS_URL_PATH = \"v0.0.14\"\nLINZ_SCHEMA_URL_DIRECTORY = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz\"\nLINZ_SCHEMA_URL_PATH = f\"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json\"\nLINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))\nSTAC_ITEM_SPEC_PATH = f\"{STAC_SPEC_PATH}/item-spec/json-schema\"\nITEM_SCHEMA = Schema(f\"{STAC_ITEM_SPEC_PATH}/item.json\")\nQUALITY_SCHEMA_PATH = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/quality/schema.json\"\n\nschema_store = {}\nfor schema in [\n CATALOG_SCHEMA,\n Schema(f\"{STAC_SPEC_PATH}/collection-spec/json-schema/collection.json\"),\n FILE_SCHEMA,\n Schema(\"geojson-spec/Feature.json\"),\n Schema(\"geojson-spec/Geometry.json\"),\n ITEM_SCHEMA,\n Schema(f\"{STAC_ITEM_SPEC_PATH}/basics.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/datetime.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/instrument.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/licensing.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/provider.json\"),\n LINZ_SCHEMA,\n Schema(PROJECTION_STAC_SCHEMA_PATH),\n Schema(VERSION_STAC_SCHEMA_PATH),\n Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, QUALITY_SCHEMA_PATH)),\n]:\n # Normalize URLs the same way as jsonschema does\n schema_store[schema.uri] = schema.as_dict\n\nBaseSTACValidator = extend(Draft7Validator)\nBaseSTACValidator.format_checker = FormatChecker()\n\nSTACCatalogSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(CATALOG_SCHEMA.as_dict, store=schema_store),\n schema=CATALOG_SCHEMA.as_dict,\n)\n\nSTACCollectionSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n\nSTACItemSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n", "path": "geostore/check_stac_metadata/stac_validators.py"}], "after_files": [{"content": "from distutils.version import StrictVersion\nfrom functools import cached_property, lru_cache\nfrom json import load\nfrom os import scandir\nfrom os.path import dirname, join\nfrom re import fullmatch\n\nfrom jsonschema import Draft7Validator, FormatChecker, RefResolver\nfrom jsonschema._utils import URIDict\nfrom jsonschema.validators import extend\n\nfrom ..stac_format import LINZ_STAC_EXTENSIONS_LOCAL_PATH\nfrom ..types import JsonObject\n\n\nclass Schema:\n def __init__(self, path: str):\n self.path = path\n\n @cached_property\n def as_dict(self) -> JsonObject:\n with open(join(dirname(__file__), self.path), encoding=\"utf-8\") as file_pointer:\n result: JsonObject = load(file_pointer)\n return result\n\n @cached_property\n def schema_id(self) -> str:\n id_: str = self.as_dict[\"$id\"]\n return id_\n\n @cached_property\n def uri(self) -> str:\n uri_: str = URIDict().normalize(self.schema_id)\n return uri_\n\n\n@lru_cache\ndef get_latest_extension_schema_version(extension_path: str) -> str:\n directories = scandir(join(dirname(__file__), extension_path))\n versions = []\n for directory in directories:\n if directory.is_dir() and fullmatch(r\"v\\d+\\.\\d+\\.\\d+\", directory.name):\n versions.append(directory.name[1:])\n return sorted(versions, key=StrictVersion, reverse=True)[0]\n\n\nFILE_STAC_SCHEMA_PATH = \"file/v2.0.0/schema.json\"\nPROJECTION_STAC_SCHEMA_PATH = \"projection/v1.0.0/schema.json\"\nVERSION_STAC_SCHEMA_PATH = \"version/v1.0.0/schema.json\"\nFILE_SCHEMA = Schema(FILE_STAC_SCHEMA_PATH)\n\nSTAC_SPEC_EXTENSION_PATH = \"stac-spec\"\nSTAC_VERSION = get_latest_extension_schema_version(STAC_SPEC_EXTENSION_PATH)\nSTAC_SPEC_PATH = f\"{STAC_SPEC_EXTENSION_PATH}/v{STAC_VERSION}\"\nCATALOG_SCHEMA = Schema(f\"{STAC_SPEC_PATH}/catalog-spec/json-schema/catalog.json\")\nLINZ_STAC_EXTENSIONS_URL_PATH = (\n f\"v{get_latest_extension_schema_version(LINZ_STAC_EXTENSIONS_LOCAL_PATH)}\"\n)\nLINZ_SCHEMA_URL_DIRECTORY = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/linz\"\nLINZ_SCHEMA_URL_PATH = f\"{LINZ_SCHEMA_URL_DIRECTORY}/schema.json\"\nLINZ_SCHEMA = Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, LINZ_SCHEMA_URL_PATH))\nSTAC_ITEM_SPEC_PATH = f\"{STAC_SPEC_PATH}/item-spec/json-schema\"\nITEM_SCHEMA = Schema(f\"{STAC_ITEM_SPEC_PATH}/item.json\")\nQUALITY_SCHEMA_PATH = f\"{LINZ_STAC_EXTENSIONS_URL_PATH}/quality/schema.json\"\n\nschema_store = {}\nfor schema in [\n CATALOG_SCHEMA,\n Schema(f\"{STAC_SPEC_PATH}/collection-spec/json-schema/collection.json\"),\n FILE_SCHEMA,\n Schema(\"geojson-spec/Feature.json\"),\n Schema(\"geojson-spec/Geometry.json\"),\n ITEM_SCHEMA,\n Schema(f\"{STAC_ITEM_SPEC_PATH}/basics.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/datetime.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/instrument.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/licensing.json\"),\n Schema(f\"{STAC_ITEM_SPEC_PATH}/provider.json\"),\n LINZ_SCHEMA,\n Schema(PROJECTION_STAC_SCHEMA_PATH),\n Schema(VERSION_STAC_SCHEMA_PATH),\n Schema(join(LINZ_STAC_EXTENSIONS_LOCAL_PATH, QUALITY_SCHEMA_PATH)),\n]:\n # Normalize URLs the same way as jsonschema does\n schema_store[schema.uri] = schema.as_dict\n\nBaseSTACValidator = extend(Draft7Validator)\nBaseSTACValidator.format_checker = FormatChecker()\n\nSTACCatalogSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(CATALOG_SCHEMA.as_dict, store=schema_store),\n schema=CATALOG_SCHEMA.as_dict,\n)\n\nSTACCollectionSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n\nSTACItemSchemaValidator = extend(BaseSTACValidator)(\n resolver=RefResolver.from_schema(LINZ_SCHEMA.as_dict, store=schema_store),\n schema=LINZ_SCHEMA.as_dict,\n)\n", "path": "geostore/check_stac_metadata/stac_validators.py"}]}
| 2,018 | 529 |
gh_patches_debug_14246
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1554
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
raises_unsupported_algorithm gives very unhelpful errors
When the error tag is wrong you get errors along the lines of `assert <object object at 0xf0000000> is not <object object as 0xb0000000>`. This is not very helpful, it's not even particularly obvious that the error tag is actually what's wrong until you go and read the code.
Should probably generate a useful error message or somehow give the tag objects a more useful `repr` output.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/exceptions.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7
8 class _Reasons(object):
9 BACKEND_MISSING_INTERFACE = object()
10 UNSUPPORTED_HASH = object()
11 UNSUPPORTED_CIPHER = object()
12 UNSUPPORTED_PADDING = object()
13 UNSUPPORTED_MGF = object()
14 UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()
15 UNSUPPORTED_ELLIPTIC_CURVE = object()
16 UNSUPPORTED_SERIALIZATION = object()
17 UNSUPPORTED_X509 = object()
18
19
20 class UnsupportedAlgorithm(Exception):
21 def __init__(self, message, reason=None):
22 super(UnsupportedAlgorithm, self).__init__(message)
23 self._reason = reason
24
25
26 class AlreadyFinalized(Exception):
27 pass
28
29
30 class AlreadyUpdated(Exception):
31 pass
32
33
34 class NotYetFinalized(Exception):
35 pass
36
37
38 class InvalidTag(Exception):
39 pass
40
41
42 class InvalidSignature(Exception):
43 pass
44
45
46 class InternalError(Exception):
47 pass
48
49
50 class InvalidKey(Exception):
51 pass
52
53
54 class InvalidToken(Exception):
55 pass
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cryptography/exceptions.py b/src/cryptography/exceptions.py
--- a/src/cryptography/exceptions.py
+++ b/src/cryptography/exceptions.py
@@ -4,17 +4,19 @@
from __future__ import absolute_import, division, print_function
-
-class _Reasons(object):
- BACKEND_MISSING_INTERFACE = object()
- UNSUPPORTED_HASH = object()
- UNSUPPORTED_CIPHER = object()
- UNSUPPORTED_PADDING = object()
- UNSUPPORTED_MGF = object()
- UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()
- UNSUPPORTED_ELLIPTIC_CURVE = object()
- UNSUPPORTED_SERIALIZATION = object()
- UNSUPPORTED_X509 = object()
+from enum import Enum
+
+
+class _Reasons(Enum):
+ BACKEND_MISSING_INTERFACE = 0
+ UNSUPPORTED_HASH = 1
+ UNSUPPORTED_CIPHER = 2
+ UNSUPPORTED_PADDING = 3
+ UNSUPPORTED_MGF = 4
+ UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5
+ UNSUPPORTED_ELLIPTIC_CURVE = 6
+ UNSUPPORTED_SERIALIZATION = 7
+ UNSUPPORTED_X509 = 8
class UnsupportedAlgorithm(Exception):
|
{"golden_diff": "diff --git a/src/cryptography/exceptions.py b/src/cryptography/exceptions.py\n--- a/src/cryptography/exceptions.py\n+++ b/src/cryptography/exceptions.py\n@@ -4,17 +4,19 @@\n \n from __future__ import absolute_import, division, print_function\n \n-\n-class _Reasons(object):\n- BACKEND_MISSING_INTERFACE = object()\n- UNSUPPORTED_HASH = object()\n- UNSUPPORTED_CIPHER = object()\n- UNSUPPORTED_PADDING = object()\n- UNSUPPORTED_MGF = object()\n- UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()\n- UNSUPPORTED_ELLIPTIC_CURVE = object()\n- UNSUPPORTED_SERIALIZATION = object()\n- UNSUPPORTED_X509 = object()\n+from enum import Enum\n+\n+\n+class _Reasons(Enum):\n+ BACKEND_MISSING_INTERFACE = 0\n+ UNSUPPORTED_HASH = 1\n+ UNSUPPORTED_CIPHER = 2\n+ UNSUPPORTED_PADDING = 3\n+ UNSUPPORTED_MGF = 4\n+ UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5\n+ UNSUPPORTED_ELLIPTIC_CURVE = 6\n+ UNSUPPORTED_SERIALIZATION = 7\n+ UNSUPPORTED_X509 = 8\n \n \n class UnsupportedAlgorithm(Exception):\n", "issue": "raises_unsupported_algorithm gives very unhelpful errors\nWhen the error tag is wrong you get errors along the lines of `assert <object object at 0xf0000000> is not <object object as 0xb0000000>`. This is not very helpful, it's not even particularly obvious that the error tag is actually what's wrong until you go and read the code.\n\nShould probably generate a useful error message or somehow give the tag objects a more useful `repr` output.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n\nclass _Reasons(object):\n BACKEND_MISSING_INTERFACE = object()\n UNSUPPORTED_HASH = object()\n UNSUPPORTED_CIPHER = object()\n UNSUPPORTED_PADDING = object()\n UNSUPPORTED_MGF = object()\n UNSUPPORTED_PUBLIC_KEY_ALGORITHM = object()\n UNSUPPORTED_ELLIPTIC_CURVE = object()\n UNSUPPORTED_SERIALIZATION = object()\n UNSUPPORTED_X509 = object()\n\n\nclass UnsupportedAlgorithm(Exception):\n def __init__(self, message, reason=None):\n super(UnsupportedAlgorithm, self).__init__(message)\n self._reason = reason\n\n\nclass AlreadyFinalized(Exception):\n pass\n\n\nclass AlreadyUpdated(Exception):\n pass\n\n\nclass NotYetFinalized(Exception):\n pass\n\n\nclass InvalidTag(Exception):\n pass\n\n\nclass InvalidSignature(Exception):\n pass\n\n\nclass InternalError(Exception):\n pass\n\n\nclass InvalidKey(Exception):\n pass\n\n\nclass InvalidToken(Exception):\n pass\n", "path": "src/cryptography/exceptions.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom enum import Enum\n\n\nclass _Reasons(Enum):\n BACKEND_MISSING_INTERFACE = 0\n UNSUPPORTED_HASH = 1\n UNSUPPORTED_CIPHER = 2\n UNSUPPORTED_PADDING = 3\n UNSUPPORTED_MGF = 4\n UNSUPPORTED_PUBLIC_KEY_ALGORITHM = 5\n UNSUPPORTED_ELLIPTIC_CURVE = 6\n UNSUPPORTED_SERIALIZATION = 7\n UNSUPPORTED_X509 = 8\n\n\nclass UnsupportedAlgorithm(Exception):\n def __init__(self, message, reason=None):\n super(UnsupportedAlgorithm, self).__init__(message)\n self._reason = reason\n\n\nclass AlreadyFinalized(Exception):\n pass\n\n\nclass AlreadyUpdated(Exception):\n pass\n\n\nclass NotYetFinalized(Exception):\n pass\n\n\nclass InvalidTag(Exception):\n pass\n\n\nclass InvalidSignature(Exception):\n pass\n\n\nclass InternalError(Exception):\n pass\n\n\nclass InvalidKey(Exception):\n pass\n\n\nclass InvalidToken(Exception):\n pass\n", "path": "src/cryptography/exceptions.py"}]}
| 737 | 275 |
gh_patches_debug_3149
|
rasdani/github-patches
|
git_diff
|
huggingface__dataset-viewer-479
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use main instead of master to load the datasets
The main branch in https://github.com/huggingface/datasets is now `main`, not `master` anymore. Note that it's backward compatible, so no need to hurry
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `services/worker/src/worker/constants.py`
Content:
```
1 from typing import Optional
2
3 DEFAULT_ASSETS_BASE_URL: str = "assets"
4 DEFAULT_ASSETS_DIRECTORY: None = None
5 DEFAULT_DATASETS_REVISION: str = "master"
6 DEFAULT_HF_TOKEN: Optional[str] = None
7 DEFAULT_LOG_LEVEL: str = "INFO"
8 DEFAULT_MAX_JOB_RETRIES: int = 3
9 DEFAULT_MAX_JOBS_PER_DATASET: int = 1
10 DEFAULT_MAX_LOAD_PCT: int = 70
11 DEFAULT_MAX_MEMORY_PCT: int = 80
12 DEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000
13 DEFAULT_MIN_CELL_BYTES: int = 100
14 DEFAULT_MONGO_CACHE_DATABASE: str = "datasets_server_cache"
15 DEFAULT_MONGO_QUEUE_DATABASE: str = "datasets_server_queue"
16 DEFAULT_MONGO_URL: str = "mongodb://localhost:27018"
17 DEFAULT_ROWS_MAX_BYTES: int = 1_000_000
18 DEFAULT_ROWS_MAX_NUMBER: int = 100
19 DEFAULT_ROWS_MIN_NUMBER: int = 10
20 DEFAULT_WORKER_SLEEP_SECONDS: int = 15
21 DEFAULT_WORKER_QUEUE: str = "datasets"
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/services/worker/src/worker/constants.py b/services/worker/src/worker/constants.py
--- a/services/worker/src/worker/constants.py
+++ b/services/worker/src/worker/constants.py
@@ -2,7 +2,7 @@
DEFAULT_ASSETS_BASE_URL: str = "assets"
DEFAULT_ASSETS_DIRECTORY: None = None
-DEFAULT_DATASETS_REVISION: str = "master"
+DEFAULT_DATASETS_REVISION: str = "main"
DEFAULT_HF_TOKEN: Optional[str] = None
DEFAULT_LOG_LEVEL: str = "INFO"
DEFAULT_MAX_JOB_RETRIES: int = 3
|
{"golden_diff": "diff --git a/services/worker/src/worker/constants.py b/services/worker/src/worker/constants.py\n--- a/services/worker/src/worker/constants.py\n+++ b/services/worker/src/worker/constants.py\n@@ -2,7 +2,7 @@\n \n DEFAULT_ASSETS_BASE_URL: str = \"assets\"\n DEFAULT_ASSETS_DIRECTORY: None = None\n-DEFAULT_DATASETS_REVISION: str = \"master\"\n+DEFAULT_DATASETS_REVISION: str = \"main\"\n DEFAULT_HF_TOKEN: Optional[str] = None\n DEFAULT_LOG_LEVEL: str = \"INFO\"\n DEFAULT_MAX_JOB_RETRIES: int = 3\n", "issue": "Use main instead of master to load the datasets\nThe main branch in https://github.com/huggingface/datasets is now `main`, not `master` anymore. Note that it's backward compatible, so no need to hurry\n", "before_files": [{"content": "from typing import Optional\n\nDEFAULT_ASSETS_BASE_URL: str = \"assets\"\nDEFAULT_ASSETS_DIRECTORY: None = None\nDEFAULT_DATASETS_REVISION: str = \"master\"\nDEFAULT_HF_TOKEN: Optional[str] = None\nDEFAULT_LOG_LEVEL: str = \"INFO\"\nDEFAULT_MAX_JOB_RETRIES: int = 3\nDEFAULT_MAX_JOBS_PER_DATASET: int = 1\nDEFAULT_MAX_LOAD_PCT: int = 70\nDEFAULT_MAX_MEMORY_PCT: int = 80\nDEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000\nDEFAULT_MIN_CELL_BYTES: int = 100\nDEFAULT_MONGO_CACHE_DATABASE: str = \"datasets_server_cache\"\nDEFAULT_MONGO_QUEUE_DATABASE: str = \"datasets_server_queue\"\nDEFAULT_MONGO_URL: str = \"mongodb://localhost:27018\"\nDEFAULT_ROWS_MAX_BYTES: int = 1_000_000\nDEFAULT_ROWS_MAX_NUMBER: int = 100\nDEFAULT_ROWS_MIN_NUMBER: int = 10\nDEFAULT_WORKER_SLEEP_SECONDS: int = 15\nDEFAULT_WORKER_QUEUE: str = \"datasets\"\n", "path": "services/worker/src/worker/constants.py"}], "after_files": [{"content": "from typing import Optional\n\nDEFAULT_ASSETS_BASE_URL: str = \"assets\"\nDEFAULT_ASSETS_DIRECTORY: None = None\nDEFAULT_DATASETS_REVISION: str = \"main\"\nDEFAULT_HF_TOKEN: Optional[str] = None\nDEFAULT_LOG_LEVEL: str = \"INFO\"\nDEFAULT_MAX_JOB_RETRIES: int = 3\nDEFAULT_MAX_JOBS_PER_DATASET: int = 1\nDEFAULT_MAX_LOAD_PCT: int = 70\nDEFAULT_MAX_MEMORY_PCT: int = 80\nDEFAULT_MAX_SIZE_FALLBACK: int = 100_000_000\nDEFAULT_MIN_CELL_BYTES: int = 100\nDEFAULT_MONGO_CACHE_DATABASE: str = \"datasets_server_cache\"\nDEFAULT_MONGO_QUEUE_DATABASE: str = \"datasets_server_queue\"\nDEFAULT_MONGO_URL: str = \"mongodb://localhost:27018\"\nDEFAULT_ROWS_MAX_BYTES: int = 1_000_000\nDEFAULT_ROWS_MAX_NUMBER: int = 100\nDEFAULT_ROWS_MIN_NUMBER: int = 10\nDEFAULT_WORKER_SLEEP_SECONDS: int = 15\nDEFAULT_WORKER_QUEUE: str = \"datasets\"\n", "path": "services/worker/src/worker/constants.py"}]}
| 590 | 132 |
gh_patches_debug_11689
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-1140
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry fails to install p4python due to read-only files
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: Windows 10
- **Poetry version**: poetry 0.12.2
- **Link of a [pyproject.toml Gist](https://gist.github.com/epage/5f28e3b1e5eeb9a30697363e369a5fde)
- **Link of a [backtrace Gist](https://gist.github.com/epage/2584ad981ff5d9f175d55212b0192987)
## Issue
In digging into the problem, it seems that p4python's files are all marked read-only, causing windows to error when trying to delete them via `shutil.rmtree` which is invoked by poetry's custom temp directory handling.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/utils/helpers.py`
Content:
```
1 import os
2 import re
3 import shutil
4 import stat
5 import tempfile
6
7 from contextlib import contextmanager
8 from typing import List
9 from typing import Optional
10
11 from poetry.config.config import Config
12 from poetry.utils._compat import Path
13 from poetry.version import Version
14
15
16 try:
17 from collections.abc import Mapping
18 except ImportError:
19 from collections import Mapping
20
21
22 _canonicalize_regex = re.compile("[-_]+")
23
24
25 def canonicalize_name(name): # type: (str) -> str
26 return _canonicalize_regex.sub("-", name).lower()
27
28
29 def module_name(name): # type: (str) -> str
30 return canonicalize_name(name).replace(".", "_").replace("-", "_")
31
32
33 def normalize_version(version): # type: (str) -> str
34 return str(Version(version))
35
36
37 @contextmanager
38 def temporary_directory(*args, **kwargs):
39 try:
40 from tempfile import TemporaryDirectory
41
42 with TemporaryDirectory(*args, **kwargs) as name:
43 yield name
44 except ImportError:
45 name = tempfile.mkdtemp(*args, **kwargs)
46
47 yield name
48
49 shutil.rmtree(name)
50
51
52 def parse_requires(requires): # type: (str) -> List[str]
53 lines = requires.split("\n")
54
55 requires_dist = []
56 in_section = False
57 current_marker = None
58 for line in lines:
59 line = line.strip()
60 if not line:
61 if in_section:
62 in_section = False
63
64 continue
65
66 if line.startswith("["):
67 # extras or conditional dependencies
68 marker = line.lstrip("[").rstrip("]")
69 if ":" not in marker:
70 extra, marker = marker, None
71 else:
72 extra, marker = marker.split(":")
73
74 if extra:
75 if marker:
76 marker = '{} and extra == "{}"'.format(marker, extra)
77 else:
78 marker = 'extra == "{}"'.format(extra)
79
80 if marker:
81 current_marker = marker
82
83 continue
84
85 if current_marker:
86 line = "{} ; {}".format(line, current_marker)
87
88 requires_dist.append(line)
89
90 return requires_dist
91
92
93 def get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]
94 cert = config.get("certificates.{}.cert".format(repository_name))
95 if cert:
96 return Path(cert)
97 else:
98 return None
99
100
101 def get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]
102 client_cert = config.get("certificates.{}.client-cert".format(repository_name))
103 if client_cert:
104 return Path(client_cert)
105 else:
106 return None
107
108
109 def _on_rm_error(func, path, exc_info):
110 if not os.path.exists(path):
111 return
112
113 os.chmod(path, stat.S_IWRITE)
114 func(path)
115
116
117 def safe_rmtree(path):
118 if Path(path).is_symlink():
119 return os.unlink(str(path))
120
121 shutil.rmtree(path, onerror=_on_rm_error)
122
123
124 def merge_dicts(d1, d2):
125 for k, v in d2.items():
126 if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):
127 merge_dicts(d1[k], d2[k])
128 else:
129 d1[k] = d2[k]
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/poetry/utils/helpers.py b/poetry/utils/helpers.py
--- a/poetry/utils/helpers.py
+++ b/poetry/utils/helpers.py
@@ -34,19 +34,18 @@
return str(Version(version))
+def _del_ro(action, name, exc):
+ os.chmod(name, stat.S_IWRITE)
+ os.remove(name)
+
+
@contextmanager
def temporary_directory(*args, **kwargs):
- try:
- from tempfile import TemporaryDirectory
-
- with TemporaryDirectory(*args, **kwargs) as name:
- yield name
- except ImportError:
- name = tempfile.mkdtemp(*args, **kwargs)
+ name = tempfile.mkdtemp(*args, **kwargs)
- yield name
+ yield name
- shutil.rmtree(name)
+ shutil.rmtree(name, onerror=_del_ro)
def parse_requires(requires): # type: (str) -> List[str]
|
{"golden_diff": "diff --git a/poetry/utils/helpers.py b/poetry/utils/helpers.py\n--- a/poetry/utils/helpers.py\n+++ b/poetry/utils/helpers.py\n@@ -34,19 +34,18 @@\n return str(Version(version))\n \n \n+def _del_ro(action, name, exc):\n+ os.chmod(name, stat.S_IWRITE)\n+ os.remove(name)\n+\n+\n @contextmanager\n def temporary_directory(*args, **kwargs):\n- try:\n- from tempfile import TemporaryDirectory\n-\n- with TemporaryDirectory(*args, **kwargs) as name:\n- yield name\n- except ImportError:\n- name = tempfile.mkdtemp(*args, **kwargs)\n+ name = tempfile.mkdtemp(*args, **kwargs)\n \n- yield name\n+ yield name\n \n- shutil.rmtree(name)\n+ shutil.rmtree(name, onerror=_del_ro)\n \n \n def parse_requires(requires): # type: (str) -> List[str]\n", "issue": "Poetry fails to install p4python due to read-only files\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Windows 10\r\n- **Poetry version**: poetry 0.12.2\r\n- **Link of a [pyproject.toml Gist](https://gist.github.com/epage/5f28e3b1e5eeb9a30697363e369a5fde)\r\n- **Link of a [backtrace Gist](https://gist.github.com/epage/2584ad981ff5d9f175d55212b0192987)\r\n\r\n## Issue\r\n\r\nIn digging into the problem, it seems that p4python's files are all marked read-only, causing windows to error when trying to delete them via `shutil.rmtree` which is invoked by poetry's custom temp directory handling.\n", "before_files": [{"content": "import os\nimport re\nimport shutil\nimport stat\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom typing import List\nfrom typing import Optional\n\nfrom poetry.config.config import Config\nfrom poetry.utils._compat import Path\nfrom poetry.version import Version\n\n\ntry:\n from collections.abc import Mapping\nexcept ImportError:\n from collections import Mapping\n\n\n_canonicalize_regex = re.compile(\"[-_]+\")\n\n\ndef canonicalize_name(name): # type: (str) -> str\n return _canonicalize_regex.sub(\"-\", name).lower()\n\n\ndef module_name(name): # type: (str) -> str\n return canonicalize_name(name).replace(\".\", \"_\").replace(\"-\", \"_\")\n\n\ndef normalize_version(version): # type: (str) -> str\n return str(Version(version))\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n try:\n from tempfile import TemporaryDirectory\n\n with TemporaryDirectory(*args, **kwargs) as name:\n yield name\n except ImportError:\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name)\n\n\ndef parse_requires(requires): # type: (str) -> List[str]\n lines = requires.split(\"\\n\")\n\n requires_dist = []\n in_section = False\n current_marker = None\n for line in lines:\n line = line.strip()\n if not line:\n if in_section:\n in_section = False\n\n continue\n\n if line.startswith(\"[\"):\n # extras or conditional dependencies\n marker = line.lstrip(\"[\").rstrip(\"]\")\n if \":\" not in marker:\n extra, marker = marker, None\n else:\n extra, marker = marker.split(\":\")\n\n if extra:\n if marker:\n marker = '{} and extra == \"{}\"'.format(marker, extra)\n else:\n marker = 'extra == \"{}\"'.format(extra)\n\n if marker:\n current_marker = marker\n\n continue\n\n if current_marker:\n line = \"{} ; {}\".format(line, current_marker)\n\n requires_dist.append(line)\n\n return requires_dist\n\n\ndef get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n cert = config.get(\"certificates.{}.cert\".format(repository_name))\n if cert:\n return Path(cert)\n else:\n return None\n\n\ndef get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n client_cert = config.get(\"certificates.{}.client-cert\".format(repository_name))\n if client_cert:\n return Path(client_cert)\n else:\n return None\n\n\ndef _on_rm_error(func, path, exc_info):\n if not os.path.exists(path):\n return\n\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef safe_rmtree(path):\n if Path(path).is_symlink():\n return os.unlink(str(path))\n\n shutil.rmtree(path, onerror=_on_rm_error)\n\n\ndef merge_dicts(d1, d2):\n for k, v in d2.items():\n if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):\n merge_dicts(d1[k], d2[k])\n else:\n d1[k] = d2[k]\n", "path": "poetry/utils/helpers.py"}], "after_files": [{"content": "import os\nimport re\nimport shutil\nimport stat\nimport tempfile\n\nfrom contextlib import contextmanager\nfrom typing import List\nfrom typing import Optional\n\nfrom poetry.config.config import Config\nfrom poetry.utils._compat import Path\nfrom poetry.version import Version\n\n\ntry:\n from collections.abc import Mapping\nexcept ImportError:\n from collections import Mapping\n\n\n_canonicalize_regex = re.compile(\"[-_]+\")\n\n\ndef canonicalize_name(name): # type: (str) -> str\n return _canonicalize_regex.sub(\"-\", name).lower()\n\n\ndef module_name(name): # type: (str) -> str\n return canonicalize_name(name).replace(\".\", \"_\").replace(\"-\", \"_\")\n\n\ndef normalize_version(version): # type: (str) -> str\n return str(Version(version))\n\n\ndef _del_ro(action, name, exc):\n os.chmod(name, stat.S_IWRITE)\n os.remove(name)\n\n\n@contextmanager\ndef temporary_directory(*args, **kwargs):\n name = tempfile.mkdtemp(*args, **kwargs)\n\n yield name\n\n shutil.rmtree(name, onerror=_del_ro)\n\n\ndef parse_requires(requires): # type: (str) -> List[str]\n lines = requires.split(\"\\n\")\n\n requires_dist = []\n in_section = False\n current_marker = None\n for line in lines:\n line = line.strip()\n if not line:\n if in_section:\n in_section = False\n\n continue\n\n if line.startswith(\"[\"):\n # extras or conditional dependencies\n marker = line.lstrip(\"[\").rstrip(\"]\")\n if \":\" not in marker:\n extra, marker = marker, None\n else:\n extra, marker = marker.split(\":\")\n\n if extra:\n if marker:\n marker = '{} and extra == \"{}\"'.format(marker, extra)\n else:\n marker = 'extra == \"{}\"'.format(extra)\n\n if marker:\n current_marker = marker\n\n continue\n\n if current_marker:\n line = \"{} ; {}\".format(line, current_marker)\n\n requires_dist.append(line)\n\n return requires_dist\n\n\ndef get_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n cert = config.get(\"certificates.{}.cert\".format(repository_name))\n if cert:\n return Path(cert)\n else:\n return None\n\n\ndef get_client_cert(config, repository_name): # type: (Config, str) -> Optional[Path]\n client_cert = config.get(\"certificates.{}.client-cert\".format(repository_name))\n if client_cert:\n return Path(client_cert)\n else:\n return None\n\n\ndef _on_rm_error(func, path, exc_info):\n if not os.path.exists(path):\n return\n\n os.chmod(path, stat.S_IWRITE)\n func(path)\n\n\ndef safe_rmtree(path):\n if Path(path).is_symlink():\n return os.unlink(str(path))\n\n shutil.rmtree(path, onerror=_on_rm_error)\n\n\ndef merge_dicts(d1, d2):\n for k, v in d2.items():\n if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):\n merge_dicts(d1[k], d2[k])\n else:\n d1[k] = d2[k]\n", "path": "poetry/utils/helpers.py"}]}
| 1,656 | 214 |
gh_patches_debug_23207
|
rasdani/github-patches
|
git_diff
|
getsentry__snuba-1794
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Snuba cleanup for sentry onpremise
### Environment
Sentry self-hosted 21.3.0 (based on docker-compose from here https://github.com/getsentry/onpremise/blob/21.3.0/docker-compose.yml)
### Steps to Reproduce
1) Setup all containers and up snuba-cleanup container
2) Check logs for snuba-cleanup: Every 5 minutes in log - `Dropped 0 partitions on None`
It looks like variable CLICKHOUSE_HOST is ignored here
https://github.com/getsentry/snuba/blob/41d7fe76aaf8a594e8f6e84015607dcde3f67ad4/snuba/cli/cleanup.py#L13
After manual run command in container - `snuba cleanup --clickhouse-host CLICKHOUSE_HOST_HERE --dry-run True`
i got `Dropped 0 partitions on CLICKHOUSE_HOST_HERE`
### Expected Result
Pass variable https://github.com/getsentry/onpremise/blob/bdd2686021cfea07507bc07d2756ac34a775c680/docker-compose.yml#L44 into cleanup command
### Actual Result
variable is `None` instead of clickhouse host
I'am not sure, bug this or not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snuba/cli/cleanup.py`
Content:
```
1 from typing import Optional
2
3 import click
4
5 from snuba.clusters.cluster import ClickhouseClientSettings
6 from snuba.datasets.storages import StorageKey
7 from snuba.datasets.storages.factory import get_writable_storage
8 from snuba.environment import setup_logging
9
10
11 @click.command()
12 @click.option(
13 "--clickhouse-host", help="Clickhouse server to write to.",
14 )
15 @click.option(
16 "--clickhouse-port", type=int, help="Clickhouse native port to write to.",
17 )
18 @click.option(
19 "--dry-run",
20 type=bool,
21 default=True,
22 help="If true, only print which partitions would be dropped.",
23 )
24 @click.option(
25 "--storage",
26 "storage_name",
27 default="events",
28 type=click.Choice(["events", "errors", "transactions"]),
29 help="The storage to target",
30 )
31 @click.option("--log-level", help="Logging level to use.")
32 def cleanup(
33 *,
34 clickhouse_host: Optional[str],
35 clickhouse_port: Optional[int],
36 dry_run: bool,
37 storage_name: str,
38 log_level: Optional[str] = None,
39 ) -> None:
40 """
41 Deletes stale partitions for ClickHouse tables
42 """
43
44 setup_logging(log_level)
45
46 from snuba.cleanup import run_cleanup, logger
47 from snuba.clickhouse.native import ClickhousePool
48
49 storage = get_writable_storage(StorageKey(storage_name))
50
51 (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()
52
53 database = storage.get_cluster().get_database()
54
55 if clickhouse_host and clickhouse_port:
56 connection = ClickhousePool(
57 clickhouse_host,
58 clickhouse_port,
59 clickhouse_user,
60 clickhouse_password,
61 database,
62 )
63 elif not storage.get_cluster().is_single_node():
64 raise click.ClickException("Provide ClickHouse host and port for cleanup")
65 else:
66 connection = storage.get_cluster().get_query_connection(
67 ClickhouseClientSettings.CLEANUP
68 )
69
70 num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)
71 logger.info("Dropped %s partitions on %s" % (num_dropped, clickhouse_host))
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/snuba/cli/cleanup.py b/snuba/cli/cleanup.py
--- a/snuba/cli/cleanup.py
+++ b/snuba/cli/cleanup.py
@@ -50,7 +50,8 @@
(clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()
- database = storage.get_cluster().get_database()
+ cluster = storage.get_cluster()
+ database = cluster.get_database()
if clickhouse_host and clickhouse_port:
connection = ClickhousePool(
@@ -60,12 +61,12 @@
clickhouse_password,
database,
)
- elif not storage.get_cluster().is_single_node():
+ elif not cluster.is_single_node():
raise click.ClickException("Provide ClickHouse host and port for cleanup")
else:
- connection = storage.get_cluster().get_query_connection(
+ connection = cluster.get_query_connection(
ClickhouseClientSettings.CLEANUP
)
num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)
- logger.info("Dropped %s partitions on %s" % (num_dropped, clickhouse_host))
+ logger.info("Dropped %s partitions on %s" % (num_dropped, cluster))
|
{"golden_diff": "diff --git a/snuba/cli/cleanup.py b/snuba/cli/cleanup.py\n--- a/snuba/cli/cleanup.py\n+++ b/snuba/cli/cleanup.py\n@@ -50,7 +50,8 @@\n \n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n \n- database = storage.get_cluster().get_database()\n+ cluster = storage.get_cluster()\n+ database = cluster.get_database()\n \n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n@@ -60,12 +61,12 @@\n clickhouse_password,\n database,\n )\n- elif not storage.get_cluster().is_single_node():\n+ elif not cluster.is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n- connection = storage.get_cluster().get_query_connection(\n+ connection = cluster.get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n \n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n- logger.info(\"Dropped %s partitions on %s\" % (num_dropped, clickhouse_host))\n+ logger.info(\"Dropped %s partitions on %s\" % (num_dropped, cluster))\n", "issue": "Snuba cleanup for sentry onpremise\n### Environment\r\n\r\nSentry self-hosted 21.3.0 (based on docker-compose from here https://github.com/getsentry/onpremise/blob/21.3.0/docker-compose.yml)\r\n\r\n### Steps to Reproduce\r\n\r\n1) Setup all containers and up snuba-cleanup container\r\n2) Check logs for snuba-cleanup: Every 5 minutes in log - `Dropped 0 partitions on None`\r\nIt looks like variable CLICKHOUSE_HOST is ignored here\r\nhttps://github.com/getsentry/snuba/blob/41d7fe76aaf8a594e8f6e84015607dcde3f67ad4/snuba/cli/cleanup.py#L13\r\nAfter manual run command in container - `snuba cleanup --clickhouse-host CLICKHOUSE_HOST_HERE --dry-run True`\r\ni got `Dropped 0 partitions on CLICKHOUSE_HOST_HERE`\r\n\r\n### Expected Result\r\n\r\nPass variable https://github.com/getsentry/onpremise/blob/bdd2686021cfea07507bc07d2756ac34a775c680/docker-compose.yml#L44 into cleanup command\r\n\r\n### Actual Result\r\n\r\nvariable is `None` instead of clickhouse host\r\n\r\nI'am not sure, bug this or not.\n", "before_files": [{"content": "from typing import Optional\n\nimport click\n\nfrom snuba.clusters.cluster import ClickhouseClientSettings\nfrom snuba.datasets.storages import StorageKey\nfrom snuba.datasets.storages.factory import get_writable_storage\nfrom snuba.environment import setup_logging\n\n\[email protected]()\[email protected](\n \"--clickhouse-host\", help=\"Clickhouse server to write to.\",\n)\[email protected](\n \"--clickhouse-port\", type=int, help=\"Clickhouse native port to write to.\",\n)\[email protected](\n \"--dry-run\",\n type=bool,\n default=True,\n help=\"If true, only print which partitions would be dropped.\",\n)\[email protected](\n \"--storage\",\n \"storage_name\",\n default=\"events\",\n type=click.Choice([\"events\", \"errors\", \"transactions\"]),\n help=\"The storage to target\",\n)\[email protected](\"--log-level\", help=\"Logging level to use.\")\ndef cleanup(\n *,\n clickhouse_host: Optional[str],\n clickhouse_port: Optional[int],\n dry_run: bool,\n storage_name: str,\n log_level: Optional[str] = None,\n) -> None:\n \"\"\"\n Deletes stale partitions for ClickHouse tables\n \"\"\"\n\n setup_logging(log_level)\n\n from snuba.cleanup import run_cleanup, logger\n from snuba.clickhouse.native import ClickhousePool\n\n storage = get_writable_storage(StorageKey(storage_name))\n\n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n\n database = storage.get_cluster().get_database()\n\n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n clickhouse_host,\n clickhouse_port,\n clickhouse_user,\n clickhouse_password,\n database,\n )\n elif not storage.get_cluster().is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n connection = storage.get_cluster().get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n\n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n logger.info(\"Dropped %s partitions on %s\" % (num_dropped, clickhouse_host))\n", "path": "snuba/cli/cleanup.py"}], "after_files": [{"content": "from typing import Optional\n\nimport click\n\nfrom snuba.clusters.cluster import ClickhouseClientSettings\nfrom snuba.datasets.storages import StorageKey\nfrom snuba.datasets.storages.factory import get_writable_storage\nfrom snuba.environment import setup_logging\n\n\[email protected]()\[email protected](\n \"--clickhouse-host\", help=\"Clickhouse server to write to.\",\n)\[email protected](\n \"--clickhouse-port\", type=int, help=\"Clickhouse native port to write to.\",\n)\[email protected](\n \"--dry-run\",\n type=bool,\n default=True,\n help=\"If true, only print which partitions would be dropped.\",\n)\[email protected](\n \"--storage\",\n \"storage_name\",\n default=\"events\",\n type=click.Choice([\"events\", \"errors\", \"transactions\"]),\n help=\"The storage to target\",\n)\[email protected](\"--log-level\", help=\"Logging level to use.\")\ndef cleanup(\n *,\n clickhouse_host: Optional[str],\n clickhouse_port: Optional[int],\n dry_run: bool,\n storage_name: str,\n log_level: Optional[str] = None,\n) -> None:\n \"\"\"\n Deletes stale partitions for ClickHouse tables\n \"\"\"\n\n setup_logging(log_level)\n\n from snuba.cleanup import run_cleanup, logger\n from snuba.clickhouse.native import ClickhousePool\n\n storage = get_writable_storage(StorageKey(storage_name))\n\n (clickhouse_user, clickhouse_password,) = storage.get_cluster().get_credentials()\n\n cluster = storage.get_cluster()\n database = cluster.get_database()\n\n if clickhouse_host and clickhouse_port:\n connection = ClickhousePool(\n clickhouse_host,\n clickhouse_port,\n clickhouse_user,\n clickhouse_password,\n database,\n )\n elif not cluster.is_single_node():\n raise click.ClickException(\"Provide ClickHouse host and port for cleanup\")\n else:\n connection = cluster.get_query_connection(\n ClickhouseClientSettings.CLEANUP\n )\n\n num_dropped = run_cleanup(connection, storage, database, dry_run=dry_run)\n logger.info(\"Dropped %s partitions on %s\" % (num_dropped, cluster))\n", "path": "snuba/cli/cleanup.py"}]}
| 1,154 | 274 |
gh_patches_debug_11354
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-2523
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Nikola generates invalid html5 when markdown footnote extension is used
The [default output type](http://pythonhosted.org/Markdown/reference.html#markdown) of the python markdown library is xhtml1. The 4 templates that ship with Nikola have <!DOCTYPE html> which defines them as html5, so I'm assuming that we're intending to generate html5.
When the footnote markdown extension is used, it generates invalid html5 according to the w3c validator.
`<a class="footnote-ref" href="..." rev="footnote">...</a>`
(rev="footnote" is valid html4, but not html5)
The markdown library indicates that this is invalid html5 (https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/footnotes.py#L149) so we can trigger the correct behaviour by setting the output_format.
Given the markdown library does not make much use of the output_format variable, I don't think this is likely to materially change the output for many people at all - https://github.com/waylan/Python-Markdown/search?utf8=%E2%9C%93&q=output_format)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/markdown/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2016 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Implementation of compile_html based on markdown."""
28
29 from __future__ import unicode_literals
30
31 import io
32 import os
33
34 try:
35 from markdown import markdown
36 except ImportError:
37 markdown = None # NOQA
38 nikola_extension = None
39 gist_extension = None
40 podcast_extension = None
41
42 from nikola.plugin_categories import PageCompiler
43 from nikola.utils import makedirs, req_missing, write_metadata
44
45
46 class CompileMarkdown(PageCompiler):
47 """Compile Markdown into HTML."""
48
49 name = "markdown"
50 friendly_name = "Markdown"
51 demote_headers = True
52 extensions = []
53 site = None
54
55 def set_site(self, site):
56 """Set Nikola site."""
57 super(CompileMarkdown, self).set_site(site)
58 self.config_dependencies = []
59 for plugin_info in self.get_compiler_extensions():
60 self.config_dependencies.append(plugin_info.name)
61 self.extensions.append(plugin_info.plugin_object)
62 plugin_info.plugin_object.short_help = plugin_info.description
63
64 self.config_dependencies.append(str(sorted(site.config.get("MARKDOWN_EXTENSIONS"))))
65
66 def compile_html(self, source, dest, is_two_file=True):
67 """Compile source file into HTML and save as dest."""
68 if markdown is None:
69 req_missing(['markdown'], 'build this site (compile Markdown)')
70 makedirs(os.path.dirname(dest))
71 self.extensions += self.site.config.get("MARKDOWN_EXTENSIONS")
72 try:
73 post = self.site.post_per_input_file[source]
74 except KeyError:
75 post = None
76 with io.open(dest, "w+", encoding="utf8") as out_file:
77 with io.open(source, "r", encoding="utf8") as in_file:
78 data = in_file.read()
79 if not is_two_file:
80 _, data = self.split_metadata(data)
81 output = markdown(data, self.extensions)
82 output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))
83 out_file.write(output)
84 if post is None:
85 if shortcode_deps:
86 self.logger.error(
87 "Cannot save dependencies for post {0} due to unregistered source file name",
88 source)
89 else:
90 post._depfile[dest] += shortcode_deps
91
92 def create_post(self, path, **kw):
93 """Create a new post."""
94 content = kw.pop('content', None)
95 onefile = kw.pop('onefile', False)
96 # is_page is not used by create_post as of now.
97 kw.pop('is_page', False)
98
99 metadata = {}
100 metadata.update(self.default_metadata)
101 metadata.update(kw)
102 makedirs(os.path.dirname(path))
103 if not content.endswith('\n'):
104 content += '\n'
105 with io.open(path, "w+", encoding="utf8") as fd:
106 if onefile:
107 fd.write('<!-- \n')
108 fd.write(write_metadata(metadata))
109 fd.write('-->\n\n')
110 fd.write(content)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py
--- a/nikola/plugins/compile/markdown/__init__.py
+++ b/nikola/plugins/compile/markdown/__init__.py
@@ -78,7 +78,7 @@
data = in_file.read()
if not is_two_file:
_, data = self.split_metadata(data)
- output = markdown(data, self.extensions)
+ output = markdown(data, self.extensions, output_format="html5")
output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))
out_file.write(output)
if post is None:
|
{"golden_diff": "diff --git a/nikola/plugins/compile/markdown/__init__.py b/nikola/plugins/compile/markdown/__init__.py\n--- a/nikola/plugins/compile/markdown/__init__.py\n+++ b/nikola/plugins/compile/markdown/__init__.py\n@@ -78,7 +78,7 @@\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n- output = markdown(data, self.extensions)\n+ output = markdown(data, self.extensions, output_format=\"html5\")\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n", "issue": "Nikola generates invalid html5 when markdown footnote extension is used\nThe [default output type](http://pythonhosted.org/Markdown/reference.html#markdown) of the python markdown library is xhtml1. The 4 templates that ship with Nikola have <!DOCTYPE html> which defines them as html5, so I'm assuming that we're intending to generate html5.\n\nWhen the footnote markdown extension is used, it generates invalid html5 according to the w3c validator.\n\n`<a class=\"footnote-ref\" href=\"...\" rev=\"footnote\">...</a>`\n\n(rev=\"footnote\" is valid html4, but not html5)\n\nThe markdown library indicates that this is invalid html5 (https://github.com/waylan/Python-Markdown/blob/master/markdown/extensions/footnotes.py#L149) so we can trigger the correct behaviour by setting the output_format.\n\nGiven the markdown library does not make much use of the output_format variable, I don't think this is likely to materially change the output for many people at all - https://github.com/waylan/Python-Markdown/search?utf8=%E2%9C%93&q=output_format)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on markdown.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport io\nimport os\n\ntry:\n from markdown import markdown\nexcept ImportError:\n markdown = None # NOQA\n nikola_extension = None\n gist_extension = None\n podcast_extension = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, write_metadata\n\n\nclass CompileMarkdown(PageCompiler):\n \"\"\"Compile Markdown into HTML.\"\"\"\n\n name = \"markdown\"\n friendly_name = \"Markdown\"\n demote_headers = True\n extensions = []\n site = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n super(CompileMarkdown, self).set_site(site)\n self.config_dependencies = []\n for plugin_info in self.get_compiler_extensions():\n self.config_dependencies.append(plugin_info.name)\n self.extensions.append(plugin_info.plugin_object)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.config_dependencies.append(str(sorted(site.config.get(\"MARKDOWN_EXTENSIONS\"))))\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n if markdown is None:\n req_missing(['markdown'], 'build this site (compile Markdown)')\n makedirs(os.path.dirname(dest))\n self.extensions += self.site.config.get(\"MARKDOWN_EXTENSIONS\")\n try:\n post = self.site.post_per_input_file[source]\n except KeyError:\n post = None\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n output = markdown(data, self.extensions)\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} due to unregistered source file name\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write('<!-- \\n')\n fd.write(write_metadata(metadata))\n fd.write('-->\\n\\n')\n fd.write(content)\n", "path": "nikola/plugins/compile/markdown/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2016 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on markdown.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport io\nimport os\n\ntry:\n from markdown import markdown\nexcept ImportError:\n markdown = None # NOQA\n nikola_extension = None\n gist_extension = None\n podcast_extension = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing, write_metadata\n\n\nclass CompileMarkdown(PageCompiler):\n \"\"\"Compile Markdown into HTML.\"\"\"\n\n name = \"markdown\"\n friendly_name = \"Markdown\"\n demote_headers = True\n extensions = []\n site = None\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n super(CompileMarkdown, self).set_site(site)\n self.config_dependencies = []\n for plugin_info in self.get_compiler_extensions():\n self.config_dependencies.append(plugin_info.name)\n self.extensions.append(plugin_info.plugin_object)\n plugin_info.plugin_object.short_help = plugin_info.description\n\n self.config_dependencies.append(str(sorted(site.config.get(\"MARKDOWN_EXTENSIONS\"))))\n\n def compile_html(self, source, dest, is_two_file=True):\n \"\"\"Compile source file into HTML and save as dest.\"\"\"\n if markdown is None:\n req_missing(['markdown'], 'build this site (compile Markdown)')\n makedirs(os.path.dirname(dest))\n self.extensions += self.site.config.get(\"MARKDOWN_EXTENSIONS\")\n try:\n post = self.site.post_per_input_file[source]\n except KeyError:\n post = None\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n data = in_file.read()\n if not is_two_file:\n _, data = self.split_metadata(data)\n output = markdown(data, self.extensions, output_format=\"html5\")\n output, shortcode_deps = self.site.apply_shortcodes(output, filename=source, with_dependencies=True, extra_context=dict(post=post))\n out_file.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} due to unregistered source file name\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write('<!-- \\n')\n fd.write(write_metadata(metadata))\n fd.write('-->\\n\\n')\n fd.write(content)\n", "path": "nikola/plugins/compile/markdown/__init__.py"}]}
| 1,619 | 160 |
gh_patches_debug_10501
|
rasdani/github-patches
|
git_diff
|
pypa__virtualenv-1964
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'NoneType' object has no attribute 'group' with virtualenv 20.0.32 on CygWin
**Issue**
We are also testing on CygWin (using appveyor), and since this morning, tox fails creating a virtualenv with an AttributeError. Unfortunately, tox does not display the entire traceback, but just the exception.
Since virtualenv 20.0.32 was released just 4h ago, I suspect that to be the culprit.
From https://ci.appveyor.com/project/andy-maier/pywbem/builds/35526352/job/l3k6a2vb39bweqsw#L936:
```
if "%UNIX_PATH%"=="C:\cygwin64\bin" ( bash -c "which tox && tox -vv -e %TOX_ENV% && echo appveyor.yml: tox rc=$?" )
/usr/bin/tox
using tox.ini: /cygdrive/c/projects/pywbem/tox.ini (pid 1822)
using tox-3.20.0 from /usr/lib/python3.8/site-packages/tox/__init__.py (pid 1822)
skipping sdist step
cygwin64_py38 uses /usr/bin/python3.8.exe
cygwin64_py38 start: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38
cygwin64_py38 cannot reuse: no previous config /cygdrive/c/projects/pywbem/.tox/cygwin64_py38/.tox-config1
cygwin64_py38 create: /cygdrive/c/projects/pywbem/.tox/cygwin64_py38
setting PATH=/cygdrive/c/projects/pywbem/.tox/cygwin64_py38/bin:/usr/bin:/cygdrive/c/Windows/system32:/cygdrive/c/Windows:/cygdrive/c/ProgramData/chocolatey/bin
[1825] /cygdrive/c/projects/pywbem/.tox$ /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38
AttributeError: 'NoneType' object has no attribute 'group'
ERROR: invocation failed (exit code 1)
ERROR: InvocationError for command /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 (exited with code 1)
cygwin64_py38 finish: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 after 4.23 seconds
```
I am setting up a direct invocation of virtualenv in that environment, in order to get the full traceback, and will post that here.
**Environment**
Provide at least:
- OS: CygWin64
- ``pip list`` of the host python where ``virtualenv`` is installed:
```console
See next comment, below
```
**Output of the virtual environment creation**
Make sure to run the creation with `-vvv --with-traceback`:
```console
See next comment, below
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/virtualenv/activation/via_template.py`
Content:
```
1 from __future__ import absolute_import, unicode_literals
2
3 import os
4 import re
5 import sys
6 import sysconfig
7 from abc import ABCMeta, abstractmethod
8
9 from six import add_metaclass
10
11 from virtualenv.util.six import ensure_text
12
13 from .activator import Activator
14
15 if sys.version_info >= (3, 7):
16 from importlib.resources import read_binary
17 else:
18 from importlib_resources import read_binary
19
20
21 @add_metaclass(ABCMeta)
22 class ViaTemplateActivator(Activator):
23 @abstractmethod
24 def templates(self):
25 raise NotImplementedError
26
27 def generate(self, creator):
28 dest_folder = creator.bin_dir
29 replacements = self.replacements(creator, dest_folder)
30 generated = self._generate(replacements, self.templates(), dest_folder, creator)
31 if self.flag_prompt is not None:
32 creator.pyenv_cfg["prompt"] = self.flag_prompt
33 return generated
34
35 def replacements(self, creator, dest_folder):
36 current_platform = sysconfig.get_platform()
37 platforms = ["mingw", "cygwin", "msys"]
38 if any(platform in current_platform for platform in platforms):
39 pattern = re.compile("^([A-Za-z]):(.*)")
40 match = pattern.match(str(creator.dest))
41 virtual_env = "/" + match.group(1).lower() + match.group(2)
42 else:
43 virtual_env = str(creator.dest)
44 return {
45 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt,
46 "__VIRTUAL_ENV__": ensure_text(virtual_env),
47 "__VIRTUAL_NAME__": creator.env_name,
48 "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),
49 "__PATH_SEP__": ensure_text(os.pathsep),
50 }
51
52 def _generate(self, replacements, templates, to_folder, creator):
53 generated = []
54 for template in templates:
55 text = self.instantiate_template(replacements, template, creator)
56 dest = to_folder / self.as_name(template)
57 # use write_bytes to avoid platform specific line normalization (\n -> \r\n)
58 dest.write_bytes(text.encode("utf-8"))
59 generated.append(dest)
60 return generated
61
62 def as_name(self, template):
63 return template.name
64
65 def instantiate_template(self, replacements, template, creator):
66 # read content as binary to avoid platform specific line normalization (\n -> \r\n)
67 binary = read_binary(self.__module__, str(template))
68 text = binary.decode("utf-8", errors="strict")
69 for key, value in replacements.items():
70 value = self._repr_unicode(creator, value)
71 text = text.replace(key, value)
72 return text
73
74 @staticmethod
75 def _repr_unicode(creator, value):
76 # by default we just let it be unicode
77 return value
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py
--- a/src/virtualenv/activation/via_template.py
+++ b/src/virtualenv/activation/via_template.py
@@ -38,7 +38,10 @@
if any(platform in current_platform for platform in platforms):
pattern = re.compile("^([A-Za-z]):(.*)")
match = pattern.match(str(creator.dest))
- virtual_env = "/" + match.group(1).lower() + match.group(2)
+ if match:
+ virtual_env = "/" + match.group(1).lower() + match.group(2)
+ else:
+ virtual_env = str(creator.dest)
else:
virtual_env = str(creator.dest)
return {
|
{"golden_diff": "diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -38,7 +38,10 @@\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n- virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n+ if match:\n+ virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n+ else:\n+ virtual_env = str(creator.dest)\n else:\n virtual_env = str(creator.dest)\n return {\n", "issue": "AttributeError: 'NoneType' object has no attribute 'group' with virtualenv 20.0.32 on CygWin\n**Issue**\r\nWe are also testing on CygWin (using appveyor), and since this morning, tox fails creating a virtualenv with an AttributeError. Unfortunately, tox does not display the entire traceback, but just the exception.\r\nSince virtualenv 20.0.32 was released just 4h ago, I suspect that to be the culprit.\r\n\r\nFrom https://ci.appveyor.com/project/andy-maier/pywbem/builds/35526352/job/l3k6a2vb39bweqsw#L936:\r\n```\r\nif \"%UNIX_PATH%\"==\"C:\\cygwin64\\bin\" ( bash -c \"which tox && tox -vv -e %TOX_ENV% && echo appveyor.yml: tox rc=$?\" )\r\n/usr/bin/tox\r\nusing tox.ini: /cygdrive/c/projects/pywbem/tox.ini (pid 1822)\r\nusing tox-3.20.0 from /usr/lib/python3.8/site-packages/tox/__init__.py (pid 1822)\r\nskipping sdist step\r\ncygwin64_py38 uses /usr/bin/python3.8.exe\r\ncygwin64_py38 start: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38\r\ncygwin64_py38 cannot reuse: no previous config /cygdrive/c/projects/pywbem/.tox/cygwin64_py38/.tox-config1\r\ncygwin64_py38 create: /cygdrive/c/projects/pywbem/.tox/cygwin64_py38\r\nsetting PATH=/cygdrive/c/projects/pywbem/.tox/cygwin64_py38/bin:/usr/bin:/cygdrive/c/Windows/system32:/cygdrive/c/Windows:/cygdrive/c/ProgramData/chocolatey/bin\r\n[1825] /cygdrive/c/projects/pywbem/.tox$ /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38\r\nAttributeError: 'NoneType' object has no attribute 'group'\r\nERROR: invocation failed (exit code 1)\r\nERROR: InvocationError for command /usr/bin/python3.8.exe -m virtualenv --no-download --python /usr/bin/python3.8.exe cygwin64_py38 (exited with code 1)\r\ncygwin64_py38 finish: getenv /cygdrive/c/projects/pywbem/.tox/cygwin64_py38 after 4.23 seconds\r\n```\r\n\r\nI am setting up a direct invocation of virtualenv in that environment, in order to get the full traceback, and will post that here.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: CygWin64\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n ```console\r\n See next comment, below\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nMake sure to run the creation with `-vvv --with-traceback`:\r\n\r\n```console\r\nSee next comment, below\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport re\nimport sys\nimport sysconfig\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_binary\nelse:\n from importlib_resources import read_binary\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n generated = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return generated\n\n def replacements(self, creator, dest_folder):\n current_platform = sysconfig.get_platform()\n platforms = [\"mingw\", \"cygwin\", \"msys\"]\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n else:\n virtual_env = str(creator.dest)\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(virtual_env),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n generated = []\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n # use write_bytes to avoid platform specific line normalization (\\n -> \\r\\n)\n dest.write_bytes(text.encode(\"utf-8\"))\n generated.append(dest)\n return generated\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read content as binary to avoid platform specific line normalization (\\n -> \\r\\n)\n binary = read_binary(self.__module__, str(template))\n text = binary.decode(\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport re\nimport sys\nimport sysconfig\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_binary\nelse:\n from importlib_resources import read_binary\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n generated = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return generated\n\n def replacements(self, creator, dest_folder):\n current_platform = sysconfig.get_platform()\n platforms = [\"mingw\", \"cygwin\", \"msys\"]\n if any(platform in current_platform for platform in platforms):\n pattern = re.compile(\"^([A-Za-z]):(.*)\")\n match = pattern.match(str(creator.dest))\n if match:\n virtual_env = \"/\" + match.group(1).lower() + match.group(2)\n else:\n virtual_env = str(creator.dest)\n else:\n virtual_env = str(creator.dest)\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(virtual_env),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n generated = []\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n # use write_bytes to avoid platform specific line normalization (\\n -> \\r\\n)\n dest.write_bytes(text.encode(\"utf-8\"))\n generated.append(dest)\n return generated\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read content as binary to avoid platform specific line normalization (\\n -> \\r\\n)\n binary = read_binary(self.__module__, str(template))\n text = binary.decode(\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]}
| 1,734 | 175 |
gh_patches_debug_22280
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-878
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix download_mmdet_configs
No need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/models/mmdet/download_configs.py`
Content:
```
1 __all__ = ["download_mmdet_configs"]
2
3 from icevision.imports import *
4 from icevision.utils import *
5
6 VERSION = "v2.10.0"
7 BASE_URL = "https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags"
8
9
10 def download_mmdet_configs() -> Path:
11 save_dir = get_root_dir() / f"mmdetection_configs"
12 save_dir.mkdir(parents=True, exist_ok=True)
13
14 download_path = save_dir / f"{VERSION}.zip"
15 if not download_path.exists():
16 logger.info("Downloading mmdet configs")
17
18 download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
19
20 return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py
--- a/icevision/models/mmdet/download_configs.py
+++ b/icevision/models/mmdet/download_configs.py
@@ -9,12 +9,26 @@
def download_mmdet_configs() -> Path:
save_dir = get_root_dir() / f"mmdetection_configs"
- save_dir.mkdir(parents=True, exist_ok=True)
+ mmdet_config_path = save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
download_path = save_dir / f"{VERSION}.zip"
- if not download_path.exists():
- logger.info("Downloading mmdet configs")
- download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
+ if mmdet_config_path.exists():
+ logger.info(
+ f"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}"
+ )
+ elif download_path.exists():
+ # The zip file was downloaded by not extracted yet
+ # Extract zip file
+ logger.info(f"Extracting the {VERSION}.zip file.")
+ save_dir = Path(download_path).parent
+ shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))
+ else:
+ save_dir.mkdir(parents=True, exist_ok=True)
- return save_dir / f"mmdetection_configs-{VERSION[1:]}/configs"
+ download_path = save_dir / f"{VERSION}.zip"
+ if not download_path.exists():
+ logger.info("Downloading mmdet configs")
+ download_and_extract(f"{BASE_URL}/{VERSION}", download_path)
+
+ return mmdet_config_path
|
{"golden_diff": "diff --git a/icevision/models/mmdet/download_configs.py b/icevision/models/mmdet/download_configs.py\n--- a/icevision/models/mmdet/download_configs.py\n+++ b/icevision/models/mmdet/download_configs.py\n@@ -9,12 +9,26 @@\n \n def download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n- save_dir.mkdir(parents=True, exist_ok=True)\n \n+ mmdet_config_path = save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n download_path = save_dir / f\"{VERSION}.zip\"\n- if not download_path.exists():\n- logger.info(\"Downloading mmdet configs\")\n \n- download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n+ if mmdet_config_path.exists():\n+ logger.info(\n+ f\"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}\"\n+ )\n+ elif download_path.exists():\n+ # The zip file was downloaded by not extracted yet\n+ # Extract zip file\n+ logger.info(f\"Extracting the {VERSION}.zip file.\")\n+ save_dir = Path(download_path).parent\n+ shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))\n+ else:\n+ save_dir.mkdir(parents=True, exist_ok=True)\n \n- return save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n+ download_path = save_dir / f\"{VERSION}.zip\"\n+ if not download_path.exists():\n+ logger.info(\"Downloading mmdet configs\")\n+ download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n+\n+ return mmdet_config_path\n", "issue": "Fix download_mmdet_configs\nNo need to download the zip file if it exists. This will solve the issue encountered in the Kaggle offline installation.\r\n\n", "before_files": [{"content": "__all__ = [\"download_mmdet_configs\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\n\nVERSION = \"v2.10.0\"\nBASE_URL = \"https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags\"\n\n\ndef download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n save_dir.mkdir(parents=True, exist_ok=True)\n\n download_path = save_dir / f\"{VERSION}.zip\"\n if not download_path.exists():\n logger.info(\"Downloading mmdet configs\")\n\n download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n\n return save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n", "path": "icevision/models/mmdet/download_configs.py"}], "after_files": [{"content": "__all__ = [\"download_mmdet_configs\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\n\nVERSION = \"v2.10.0\"\nBASE_URL = \"https://codeload.github.com/airctic/mmdetection_configs/zip/refs/tags\"\n\n\ndef download_mmdet_configs() -> Path:\n save_dir = get_root_dir() / f\"mmdetection_configs\"\n\n mmdet_config_path = save_dir / f\"mmdetection_configs-{VERSION[1:]}/configs\"\n download_path = save_dir / f\"{VERSION}.zip\"\n\n if mmdet_config_path.exists():\n logger.info(\n f\"The mmdet config folder already exists. No need to downloaded it. Path : {mmdet_config_path}\"\n )\n elif download_path.exists():\n # The zip file was downloaded by not extracted yet\n # Extract zip file\n logger.info(f\"Extracting the {VERSION}.zip file.\")\n save_dir = Path(download_path).parent\n shutil.unpack_archive(filename=str(download_path), extract_dir=str(save_dir))\n else:\n save_dir.mkdir(parents=True, exist_ok=True)\n\n download_path = save_dir / f\"{VERSION}.zip\"\n if not download_path.exists():\n logger.info(\"Downloading mmdet configs\")\n download_and_extract(f\"{BASE_URL}/{VERSION}\", download_path)\n\n return mmdet_config_path\n", "path": "icevision/models/mmdet/download_configs.py"}]}
| 496 | 388 |
gh_patches_debug_28282
|
rasdani/github-patches
|
git_diff
|
getpelican__pelican-1040
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow nice URLs on devserver
When you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:
```
index index.php index.html index.htm index.shtml;
root /var/www/example.com/public_html;
try_files $uri $uri.html $uri/index.html index.html;
```
Now the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.
```
/output/index.html http://localhost:8000
/output/contact.html http://localhost:8000/contact
/output/sales/index.html http://localhost:8000/sales
/output/sales/index.html http://localhost:8000/sales/ (yes the same)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/server.py`
Content:
```
1 from __future__ import print_function
2 import sys
3 try:
4 import SimpleHTTPServer as srvmod
5 except ImportError:
6 import http.server as srvmod # NOQA
7
8 try:
9 import SocketServer as socketserver
10 except ImportError:
11 import socketserver # NOQA
12
13 PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000
14
15 Handler = srvmod.SimpleHTTPRequestHandler
16
17 try:
18 httpd = socketserver.TCPServer(("", PORT), Handler)
19 except OSError as e:
20 print("Could not listen on port", PORT)
21 sys.exit(getattr(e, 'exitcode', 1))
22
23
24 print("serving at port", PORT)
25 try:
26 httpd.serve_forever()
27 except KeyboardInterrupt as e:
28 print("shutting down server")
29 httpd.socket.close()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pelican/server.py b/pelican/server.py
--- a/pelican/server.py
+++ b/pelican/server.py
@@ -1,5 +1,7 @@
from __future__ import print_function
+import os
import sys
+import logging
try:
import SimpleHTTPServer as srvmod
except ImportError:
@@ -11,19 +13,36 @@
import socketserver # NOQA
PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000
+SUFFIXES = ['','.html','/index.html']
-Handler = srvmod.SimpleHTTPRequestHandler
+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):
+ def do_GET(self):
+ # we are trying to detect the file by having a fallback mechanism
+ r = None
+ for suffix in SUFFIXES:
+ if not hasattr(self,'original_path'):
+ self.original_path = self.path
+ self.path = self.original_path + suffix
+ path = self.translate_path(self.path)
+ if os.path.exists(path):
+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)
+ if r is not None:
+ break
+ logging.warning("Unable to find %s file." % self.path)
+ return r
+
+Handler = ComplexHTTPRequestHandler
try:
httpd = socketserver.TCPServer(("", PORT), Handler)
except OSError as e:
- print("Could not listen on port", PORT)
+ logging.error("Could not listen on port %s" % PORT)
sys.exit(getattr(e, 'exitcode', 1))
-print("serving at port", PORT)
+logging.info("serving at port %s" % PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt as e:
- print("shutting down server")
- httpd.socket.close()
+ logging.info("shutting down server")
+ httpd.socket.close()
\ No newline at end of file
|
{"golden_diff": "diff --git a/pelican/server.py b/pelican/server.py\n--- a/pelican/server.py\n+++ b/pelican/server.py\n@@ -1,5 +1,7 @@\n from __future__ import print_function\n+import os\n import sys\n+import logging\n try:\n import SimpleHTTPServer as srvmod\n except ImportError:\n@@ -11,19 +13,36 @@\n import socketserver # NOQA\n \n PORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n+SUFFIXES = ['','.html','/index.html']\n \n-Handler = srvmod.SimpleHTTPRequestHandler\n+class ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n+ def do_GET(self):\n+ # we are trying to detect the file by having a fallback mechanism\n+ r = None\n+ for suffix in SUFFIXES:\n+ if not hasattr(self,'original_path'):\n+ self.original_path = self.path\n+ self.path = self.original_path + suffix\n+ path = self.translate_path(self.path)\n+ if os.path.exists(path):\n+ r = srvmod.SimpleHTTPRequestHandler.do_GET(self)\n+ if r is not None:\n+ break\n+ logging.warning(\"Unable to find %s file.\" % self.path)\n+ return r\n+\n+Handler = ComplexHTTPRequestHandler\n \n try:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\n except OSError as e:\n- print(\"Could not listen on port\", PORT)\n+ logging.error(\"Could not listen on port %s\" % PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n \n \n-print(\"serving at port\", PORT)\n+logging.info(\"serving at port %s\" % PORT)\n try:\n httpd.serve_forever()\n except KeyboardInterrupt as e:\n- print(\"shutting down server\")\n- httpd.socket.close()\n+ logging.info(\"shutting down server\")\n+ httpd.socket.close()\n\\ No newline at end of file\n", "issue": "Allow nice URLs on devserver\nWhen you publish the website is quite easy to implement nice URLs. For example an nginx configuraiton like this would provide you with nice URLs for your website:\n\n```\nindex index.php index.html index.htm index.shtml;\nroot /var/www/example.com/public_html;\ntry_files $uri $uri.html $uri/index.html index.html;\n```\n\nNow the problem is tha the devserver does not have the save behaviour, and so far I am now aware of any way to configure it to do this.\n\n```\n/output/index.html http://localhost:8000\n/output/contact.html http://localhost:8000/contact\n/output/sales/index.html http://localhost:8000/sales\n/output/sales/index.html http://localhost:8000/sales/ (yes the same) \n```\n\n", "before_files": [{"content": "from __future__ import print_function\nimport sys\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\n\nHandler = srvmod.SimpleHTTPRequestHandler\n\ntry:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\nexcept OSError as e:\n print(\"Could not listen on port\", PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nprint(\"serving at port\", PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n print(\"shutting down server\")\n httpd.socket.close()\n", "path": "pelican/server.py"}], "after_files": [{"content": "from __future__ import print_function\nimport os\nimport sys\nimport logging\ntry:\n import SimpleHTTPServer as srvmod\nexcept ImportError:\n import http.server as srvmod # NOQA\n\ntry:\n import SocketServer as socketserver\nexcept ImportError:\n import socketserver # NOQA\n\nPORT = len(sys.argv) == 2 and int(sys.argv[1]) or 8000\nSUFFIXES = ['','.html','/index.html']\n\nclass ComplexHTTPRequestHandler(srvmod.SimpleHTTPRequestHandler):\n def do_GET(self):\n # we are trying to detect the file by having a fallback mechanism\n r = None\n for suffix in SUFFIXES:\n if not hasattr(self,'original_path'):\n self.original_path = self.path\n self.path = self.original_path + suffix\n path = self.translate_path(self.path)\n if os.path.exists(path):\n r = srvmod.SimpleHTTPRequestHandler.do_GET(self)\n if r is not None:\n break\n logging.warning(\"Unable to find %s file.\" % self.path)\n return r\n\nHandler = ComplexHTTPRequestHandler\n\ntry:\n httpd = socketserver.TCPServer((\"\", PORT), Handler)\nexcept OSError as e:\n logging.error(\"Could not listen on port %s\" % PORT)\n sys.exit(getattr(e, 'exitcode', 1))\n\n\nlogging.info(\"serving at port %s\" % PORT)\ntry:\n httpd.serve_forever()\nexcept KeyboardInterrupt as e:\n logging.info(\"shutting down server\")\n httpd.socket.close()", "path": "pelican/server.py"}]}
| 671 | 440 |
gh_patches_debug_25372
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-739
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks
## 🐛 Bug
If an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return "done" as True.
## To Reproduce
Steps to reproduce the behavior:
```
env = TimeLimit(env, step_limit)
env = CycleOverBenchmarks(env, benchmarks)
_, done, _, _ = env.reset()
while not done:
_, done, _, _ = env.step(0)
```
This will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior.
## Additional context
Assign it to me, I will fix it when I got time.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `compiler_gym/wrappers/time_limit.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 from typing import Optional
6
7 from compiler_gym.envs import CompilerEnv
8 from compiler_gym.util.gym_type_hints import ActionType
9 from compiler_gym.wrappers.core import CompilerEnvWrapper
10
11
12 class TimeLimit(CompilerEnvWrapper):
13 """A step-limited wrapper that is compatible with CompilerGym.
14
15 Example usage:
16
17 >>> env = TimeLimit(env, max_episode_steps=3)
18 >>> env.reset()
19 >>> _, _, done, _ = env.step(0)
20 >>> _, _, done, _ = env.step(0)
21 >>> _, _, done, _ = env.step(0)
22 >>> done
23 True
24 """
25
26 def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):
27 super().__init__(env=env)
28 if max_episode_steps is None and self.env.spec is not None:
29 max_episode_steps = env.spec.max_episode_steps
30 if self.env.spec is not None:
31 self.env.spec.max_episode_steps = max_episode_steps
32 self._max_episode_steps = max_episode_steps
33 self._elapsed_steps = None
34
35 def step(self, action: ActionType, **kwargs):
36 assert (
37 self._elapsed_steps is not None
38 ), "Cannot call env.step() before calling reset()"
39 observation, reward, done, info = self.env.step(action, **kwargs)
40 self._elapsed_steps += 1
41 if self._elapsed_steps >= self._max_episode_steps:
42 info["TimeLimit.truncated"] = not done
43 done = True
44 return observation, reward, done, info
45
46 def reset(self, **kwargs):
47 self._elapsed_steps = 0
48 return self.env.reset(**kwargs)
49
50 def fork(self) -> "TimeLimit":
51 """Fork the wrapped environment.
52
53 The time limit state of the forked environment is the same as the source
54 state.
55 """
56 fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)
57 fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access
58 return fkd
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py
--- a/compiler_gym/wrappers/time_limit.py
+++ b/compiler_gym/wrappers/time_limit.py
@@ -2,7 +2,7 @@
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Optional
+from typing import Iterable, Optional
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
@@ -32,12 +32,13 @@
self._max_episode_steps = max_episode_steps
self._elapsed_steps = None
- def step(self, action: ActionType, **kwargs):
+ def multistep(self, actions: Iterable[ActionType], **kwargs):
+ actions = list(actions)
assert (
self._elapsed_steps is not None
), "Cannot call env.step() before calling reset()"
- observation, reward, done, info = self.env.step(action, **kwargs)
- self._elapsed_steps += 1
+ observation, reward, done, info = self.env.multistep(actions, **kwargs)
+ self._elapsed_steps += len(actions)
if self._elapsed_steps >= self._max_episode_steps:
info["TimeLimit.truncated"] = not done
done = True
|
{"golden_diff": "diff --git a/compiler_gym/wrappers/time_limit.py b/compiler_gym/wrappers/time_limit.py\n--- a/compiler_gym/wrappers/time_limit.py\n+++ b/compiler_gym/wrappers/time_limit.py\n@@ -2,7 +2,7 @@\n #\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n-from typing import Optional\n+from typing import Iterable, Optional\n \n from compiler_gym.envs import CompilerEnv\n from compiler_gym.util.gym_type_hints import ActionType\n@@ -32,12 +32,13 @@\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n \n- def step(self, action: ActionType, **kwargs):\n+ def multistep(self, actions: Iterable[ActionType], **kwargs):\n+ actions = list(actions)\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n- observation, reward, done, info = self.env.step(action, **kwargs)\n- self._elapsed_steps += 1\n+ observation, reward, done, info = self.env.multistep(actions, **kwargs)\n+ self._elapsed_steps += len(actions)\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n", "issue": "Logical Failure when combing TimeLimit Wrapper with IterateOverBenchmarks\n## \ud83d\udc1b Bug\r\n\r\nIf an environment is first wrapper with TimeLimit before IterateOverBenchmarks, it will not return \"done\" as True. \r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```\r\nenv = TimeLimit(env, step_limit) \r\nenv = CycleOverBenchmarks(env, benchmarks) \r\n_, done, _, _ = env.reset()\r\nwhile not done:\r\n _, done, _, _ = env.step(0) \r\n```\r\nThis will not finish. However, if the TimeLimit happens after the Cycle, it has normal behavior. \r\n\r\n\r\n## Additional context\r\n\r\nAssign it to me, I will fix it when I got time. \n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Optional\n\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.gym_type_hints import ActionType\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\n\nclass TimeLimit(CompilerEnvWrapper):\n \"\"\"A step-limited wrapper that is compatible with CompilerGym.\n\n Example usage:\n\n >>> env = TimeLimit(env, max_episode_steps=3)\n >>> env.reset()\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> done\n True\n \"\"\"\n\n def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):\n super().__init__(env=env)\n if max_episode_steps is None and self.env.spec is not None:\n max_episode_steps = env.spec.max_episode_steps\n if self.env.spec is not None:\n self.env.spec.max_episode_steps = max_episode_steps\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n\n def step(self, action: ActionType, **kwargs):\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n observation, reward, done, info = self.env.step(action, **kwargs)\n self._elapsed_steps += 1\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n def fork(self) -> \"TimeLimit\":\n \"\"\"Fork the wrapped environment.\n\n The time limit state of the forked environment is the same as the source\n state.\n \"\"\"\n fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)\n fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access\n return fkd\n", "path": "compiler_gym/wrappers/time_limit.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nfrom typing import Iterable, Optional\n\nfrom compiler_gym.envs import CompilerEnv\nfrom compiler_gym.util.gym_type_hints import ActionType\nfrom compiler_gym.wrappers.core import CompilerEnvWrapper\n\n\nclass TimeLimit(CompilerEnvWrapper):\n \"\"\"A step-limited wrapper that is compatible with CompilerGym.\n\n Example usage:\n\n >>> env = TimeLimit(env, max_episode_steps=3)\n >>> env.reset()\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> _, _, done, _ = env.step(0)\n >>> done\n True\n \"\"\"\n\n def __init__(self, env: CompilerEnv, max_episode_steps: Optional[int] = None):\n super().__init__(env=env)\n if max_episode_steps is None and self.env.spec is not None:\n max_episode_steps = env.spec.max_episode_steps\n if self.env.spec is not None:\n self.env.spec.max_episode_steps = max_episode_steps\n self._max_episode_steps = max_episode_steps\n self._elapsed_steps = None\n\n def multistep(self, actions: Iterable[ActionType], **kwargs):\n actions = list(actions)\n assert (\n self._elapsed_steps is not None\n ), \"Cannot call env.step() before calling reset()\"\n observation, reward, done, info = self.env.multistep(actions, **kwargs)\n self._elapsed_steps += len(actions)\n if self._elapsed_steps >= self._max_episode_steps:\n info[\"TimeLimit.truncated\"] = not done\n done = True\n return observation, reward, done, info\n\n def reset(self, **kwargs):\n self._elapsed_steps = 0\n return self.env.reset(**kwargs)\n\n def fork(self) -> \"TimeLimit\":\n \"\"\"Fork the wrapped environment.\n\n The time limit state of the forked environment is the same as the source\n state.\n \"\"\"\n fkd = type(self)(env=self.env.fork(), max_episode_steps=self._max_episode_steps)\n fkd._elapsed_steps = self._elapsed_steps # pylint: disable=protected-access\n return fkd\n", "path": "compiler_gym/wrappers/time_limit.py"}]}
| 1,020 | 304 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.