problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_20873
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-2223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix translations in package
The compilemessages step for geotrek and mapentity is missing somewhere
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/python3
2 import os
3 import distutils.command.build
4 from setuptools import setup, find_packages
5
6 here = os.path.abspath(os.path.dirname(__file__))
7
8
9 class BuildCommand(distutils.command.build.build):
10 def run(self):
11 print("before")
12 distutils.command.build.build.run(self)
13 print("after")
14 from django.core.management import call_command
15 curdir = os.getcwd()
16 os.chdir(os.path.join(curdir, 'geotrek'))
17 call_command('compilemessages')
18 os.chdir(os.path.join(curdir, 'mapentity'))
19 call_command('compilemessages')
20 os.chdir(curdir)
21
22
23 setup(
24 name='geotrek',
25 version=open(os.path.join(here, 'VERSION')).read().strip(),
26 author='Makina Corpus',
27 author_email='[email protected]',
28 url='http://makina-corpus.com',
29 description="Geotrek",
30 long_description=(open(os.path.join(here, 'README.rst')).read() + '\n\n'
31 + open(os.path.join(here, 'docs', 'changelog.rst')).read()),
32 scripts=['manage.py'],
33 install_requires=[
34 # pinned by requirements.txt
35 'psycopg2',
36 'docutils',
37 'GDAL',
38 'Pillow',
39 'easy-thumbnails',
40 'simplekml',
41 'pygal',
42 'django-extended-choices',
43 'django-multiselectfield',
44 'geojson',
45 'tif2geojson',
46 'pytz',
47 'djangorestframework-gis',
48 'drf-dynamic-fields',
49 'django-rest-swagger',
50 'django-embed-video',
51 'xlrd',
52 'landez',
53 'redis',
54 'celery',
55 'django-celery-results',
56 'requests[security]',
57 'drf-extensions',
58 'django-colorfield',
59 'factory_boy',
60 ],
61 cmdclass={"build": BuildCommand},
62 include_package_data=True,
63 license='BSD, see LICENSE file.',
64 packages=find_packages(),
65 classifiers=['Natural Language :: English',
66 'Environment :: Web Environment',
67 'Framework :: Django',
68 'Development Status :: 5 - Production/Stable',
69 'Programming Language :: Python :: 2.7'],
70 )
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,23 +1,24 @@
#!/usr/bin/python3
import os
import distutils.command.build
+from pathlib import Path
from setuptools import setup, find_packages
+from shutil import copy
here = os.path.abspath(os.path.dirname(__file__))
class BuildCommand(distutils.command.build.build):
def run(self):
- print("before")
distutils.command.build.build.run(self)
- print("after")
from django.core.management import call_command
curdir = os.getcwd()
- os.chdir(os.path.join(curdir, 'geotrek'))
- call_command('compilemessages')
- os.chdir(os.path.join(curdir, 'mapentity'))
- call_command('compilemessages')
- os.chdir(curdir)
+ for subdir in ('geotrek', 'mapentity'):
+ os.chdir(subdir)
+ call_command('compilemessages')
+ for path in Path('.').rglob('*.mo'):
+ copy(path, os.path.join(curdir, self.build_lib, subdir, path))
+ os.chdir(curdir)
setup(
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,23 +1,24 @@\n #!/usr/bin/python3\n import os\n import distutils.command.build\n+from pathlib import Path\n from setuptools import setup, find_packages\n+from shutil import copy\n \n here = os.path.abspath(os.path.dirname(__file__))\n \n \n class BuildCommand(distutils.command.build.build):\n def run(self):\n- print(\"before\")\n distutils.command.build.build.run(self)\n- print(\"after\")\n from django.core.management import call_command\n curdir = os.getcwd()\n- os.chdir(os.path.join(curdir, 'geotrek'))\n- call_command('compilemessages')\n- os.chdir(os.path.join(curdir, 'mapentity'))\n- call_command('compilemessages')\n- os.chdir(curdir)\n+ for subdir in ('geotrek', 'mapentity'):\n+ os.chdir(subdir)\n+ call_command('compilemessages')\n+ for path in Path('.').rglob('*.mo'):\n+ copy(path, os.path.join(curdir, self.build_lib, subdir, path))\n+ os.chdir(curdir)\n \n \n setup(\n", "issue": "Fix translations in package\nThe compilemessages step for geotrek and mapentity is missing somewhere\n", "before_files": [{"content": "#!/usr/bin/python3\nimport os\nimport distutils.command.build\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass BuildCommand(distutils.command.build.build):\n def run(self):\n print(\"before\")\n distutils.command.build.build.run(self)\n print(\"after\")\n from django.core.management import call_command\n curdir = os.getcwd()\n os.chdir(os.path.join(curdir, 'geotrek'))\n call_command('compilemessages')\n os.chdir(os.path.join(curdir, 'mapentity'))\n call_command('compilemessages')\n os.chdir(curdir)\n\n\nsetup(\n name='geotrek',\n version=open(os.path.join(here, 'VERSION')).read().strip(),\n author='Makina Corpus',\n author_email='[email protected]',\n url='http://makina-corpus.com',\n description=\"Geotrek\",\n long_description=(open(os.path.join(here, 'README.rst')).read() + '\\n\\n'\n + open(os.path.join(here, 'docs', 'changelog.rst')).read()),\n scripts=['manage.py'],\n install_requires=[\n # pinned by requirements.txt\n 'psycopg2',\n 'docutils',\n 'GDAL',\n 'Pillow',\n 'easy-thumbnails',\n 'simplekml',\n 'pygal',\n 'django-extended-choices',\n 'django-multiselectfield',\n 'geojson',\n 'tif2geojson',\n 'pytz',\n 'djangorestframework-gis',\n 'drf-dynamic-fields',\n 'django-rest-swagger',\n 'django-embed-video',\n 'xlrd',\n 'landez',\n 'redis',\n 'celery',\n 'django-celery-results',\n 'requests[security]',\n 'drf-extensions',\n 'django-colorfield',\n 'factory_boy',\n ],\n cmdclass={\"build\": BuildCommand},\n include_package_data=True,\n license='BSD, see LICENSE file.',\n packages=find_packages(),\n classifiers=['Natural Language :: English',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/python3\nimport os\nimport distutils.command.build\nfrom pathlib import Path\nfrom setuptools import setup, find_packages\nfrom shutil import copy\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass BuildCommand(distutils.command.build.build):\n def run(self):\n distutils.command.build.build.run(self)\n from django.core.management import call_command\n curdir = os.getcwd()\n for subdir in ('geotrek', 'mapentity'):\n os.chdir(subdir)\n call_command('compilemessages')\n for path in Path('.').rglob('*.mo'):\n copy(path, os.path.join(curdir, self.build_lib, subdir, path))\n os.chdir(curdir)\n\n\nsetup(\n name='geotrek',\n version=open(os.path.join(here, 'VERSION')).read().strip(),\n author='Makina Corpus',\n author_email='[email protected]',\n url='http://makina-corpus.com',\n description=\"Geotrek\",\n long_description=(open(os.path.join(here, 'README.rst')).read() + '\\n\\n'\n + open(os.path.join(here, 'docs', 'changelog.rst')).read()),\n scripts=['manage.py'],\n install_requires=[\n # pinned by requirements.txt\n 'psycopg2',\n 'docutils',\n 'GDAL',\n 'Pillow',\n 'easy-thumbnails',\n 'simplekml',\n 'pygal',\n 'django-extended-choices',\n 'django-multiselectfield',\n 'geojson',\n 'tif2geojson',\n 'pytz',\n 'djangorestframework-gis',\n 'drf-dynamic-fields',\n 'django-rest-swagger',\n 'django-embed-video',\n 'xlrd',\n 'landez',\n 'redis',\n 'celery',\n 'django-celery-results',\n 'requests[security]',\n 'drf-extensions',\n 'django-colorfield',\n 'factory_boy',\n ],\n cmdclass={\"build\": BuildCommand},\n include_package_data=True,\n license='BSD, see LICENSE file.',\n packages=find_packages(),\n classifiers=['Natural Language :: English',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7'],\n)\n", "path": "setup.py"}]}
| 898 | 255 |
gh_patches_debug_26330
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1583
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Vaughnlive changed IP's to break Streamlink
This will be a very brief bug report... As of tonight the head vaughnlive.py references IPs which were disconnected by vaughn to thwart streamlinking. I've observed vaughn serving video now from "66.90.93.44","66.90.93.35" and have personally gotten it to work overwriting the IP's in rtmp_server_map with those two alternating. I would submit the commit but I think some more testing is needed as I only use streamlink with one occasional stream and don't know how far those IPs will get more frequent SL users.
#1187 contains lengthy discussion on the history of the war vaughn has waged against streamlink, this is probably not the last time the IPs will change.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/vaughnlive.py`
Content:
```
1 import random
2 import re
3 import itertools
4 import ssl
5 import websocket
6
7 from streamlink.plugin import Plugin
8 from streamlink.plugin.api import useragents, http
9 from streamlink.stream import RTMPStream
10
11 _url_re = re.compile(r"""
12 http(s)?://(\w+\.)?
13 (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv
14 (/embed/video)?
15 /(?P<channel>[^/&?]+)
16 """, re.VERBOSE)
17
18
19 class VLWebSocket(websocket.WebSocket):
20 def __init__(self, **_):
21 self.session = _.pop("session")
22 self.logger = self.session.logger.new_module("plugins.vaughnlive.websocket")
23 sslopt = _.pop("sslopt", {})
24 sslopt["cert_reqs"] = ssl.CERT_NONE
25 super(VLWebSocket, self).__init__(sslopt=sslopt, **_)
26
27 def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT):
28 self.logger.debug("Sending message: {0}", payload)
29 return super(VLWebSocket, self).send(payload + "\n\x00", opcode)
30
31 def recv(self):
32 d = super(VLWebSocket, self).recv().replace("\n", "").replace("\x00", "")
33 return d.split(" ", 1)
34
35
36 class VaughnLive(Plugin):
37 servers = ["wss://sapi-ws-{0}x{1:02}.vaughnlive.tv".format(x, y) for x, y in itertools.product(range(1, 3),
38 range(1, 6))]
39 origin = "https://vaughnlive.tv"
40 rtmp_server_map = {
41 "594140c69edad": "66.90.93.42",
42 "585c4cab1bef1": "66.90.93.34",
43 "5940d648b3929": "66.90.93.42",
44 "5941854b39bc4": "198.255.0.10"
45 }
46 name_remap = {"#vl": "live", "#btv": "btv", "#pt": "pt", "#igb": "instagib", "#vtv": "vtv"}
47 domain_map = {"vaughnlive": "#vl", "breakers": "#btv", "instagib": "#igb", "vapers": "#vtv", "pearltime": "#pt"}
48
49 @classmethod
50 def can_handle_url(cls, url):
51 return _url_re.match(url)
52
53 def api_url(self):
54 return random.choice(self.servers)
55
56 def parse_ack(self, action, message):
57 if action.endswith("3"):
58 channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(";")
59 is_live = is_live == "1"
60 viewers = int(viewers)
61 self.logger.debug("Viewers: {0}, isLive={1}", viewers, is_live)
62 domain, channel = channel.split("-", 1)
63 return is_live, server, domain, channel, token, ingest
64 else:
65 self.logger.error("Unhandled action format: {0}", action)
66
67 def _get_info(self, stream_name):
68 server = self.api_url()
69 self.logger.debug("Connecting to API: {0}", server)
70 ws = websocket.create_connection(server,
71 header=["User-Agent: {0}".format(useragents.CHROME)],
72 origin=self.origin,
73 class_=VLWebSocket,
74 session=self.session)
75 ws.send("MVN LOAD3 {0}".format(stream_name))
76 action, message = ws.recv()
77 return self.parse_ack(action, message)
78
79 def _get_rtmp_streams(self, server, domain, channel, token):
80 rtmp_server = self.rtmp_server_map.get(server, server)
81
82 url = "rtmp://{0}/live?{1}".format(rtmp_server, token)
83
84 yield "live", RTMPStream(self.session, params={
85 "rtmp": url,
86 "pageUrl": self.url,
87 "playpath": "{0}_{1}".format(self.name_remap.get(domain, "live"), channel),
88 "live": True
89 })
90
91 def _get_streams(self):
92 m = _url_re.match(self.url)
93 if m:
94 stream_name = "{0}-{1}".format(self.domain_map[(m.group("domain").lower())],
95 m.group("channel"))
96
97 is_live, server, domain, channel, token, ingest = self._get_info(stream_name)
98
99 if not is_live:
100 self.logger.info("Stream is currently off air")
101 else:
102 for s in self._get_rtmp_streams(server, domain, channel, token):
103 yield s
104
105
106 __plugin__ = VaughnLive
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/vaughnlive.py b/src/streamlink/plugins/vaughnlive.py
--- a/src/streamlink/plugins/vaughnlive.py
+++ b/src/streamlink/plugins/vaughnlive.py
@@ -38,10 +38,10 @@
range(1, 6))]
origin = "https://vaughnlive.tv"
rtmp_server_map = {
- "594140c69edad": "66.90.93.42",
- "585c4cab1bef1": "66.90.93.34",
- "5940d648b3929": "66.90.93.42",
- "5941854b39bc4": "198.255.0.10"
+ "594140c69edad": "192.240.105.171:1935",
+ "585c4cab1bef1": "192.240.105.171:1935",
+ "5940d648b3929": "192.240.105.171:1935",
+ "5941854b39bc4": "192.240.105.171:1935"
}
name_remap = {"#vl": "live", "#btv": "btv", "#pt": "pt", "#igb": "instagib", "#vtv": "vtv"}
domain_map = {"vaughnlive": "#vl", "breakers": "#btv", "instagib": "#igb", "vapers": "#vtv", "pearltime": "#pt"}
@@ -99,6 +99,7 @@
if not is_live:
self.logger.info("Stream is currently off air")
else:
+ self.logger.info("Stream powered by VaughnSoft - remember to support them.")
for s in self._get_rtmp_streams(server, domain, channel, token):
yield s
|
{"golden_diff": "diff --git a/src/streamlink/plugins/vaughnlive.py b/src/streamlink/plugins/vaughnlive.py\n--- a/src/streamlink/plugins/vaughnlive.py\n+++ b/src/streamlink/plugins/vaughnlive.py\n@@ -38,10 +38,10 @@\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n- \"594140c69edad\": \"66.90.93.42\",\n- \"585c4cab1bef1\": \"66.90.93.34\",\n- \"5940d648b3929\": \"66.90.93.42\",\n- \"5941854b39bc4\": \"198.255.0.10\"\n+ \"594140c69edad\": \"192.240.105.171:1935\",\n+ \"585c4cab1bef1\": \"192.240.105.171:1935\",\n+ \"5940d648b3929\": \"192.240.105.171:1935\",\n+ \"5941854b39bc4\": \"192.240.105.171:1935\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n@@ -99,6 +99,7 @@\n if not is_live:\n self.logger.info(\"Stream is currently off air\")\n else:\n+ self.logger.info(\"Stream powered by VaughnSoft - remember to support them.\")\n for s in self._get_rtmp_streams(server, domain, channel, token):\n yield s\n", "issue": "Vaughnlive changed IP's to break Streamlink\nThis will be a very brief bug report... As of tonight the head vaughnlive.py references IPs which were disconnected by vaughn to thwart streamlinking. I've observed vaughn serving video now from \"66.90.93.44\",\"66.90.93.35\" and have personally gotten it to work overwriting the IP's in rtmp_server_map with those two alternating. I would submit the commit but I think some more testing is needed as I only use streamlink with one occasional stream and don't know how far those IPs will get more frequent SL users.\r\n\r\n #1187 contains lengthy discussion on the history of the war vaughn has waged against streamlink, this is probably not the last time the IPs will change.\n", "before_files": [{"content": "import random\nimport re\nimport itertools\nimport ssl\nimport websocket\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, http\nfrom streamlink.stream import RTMPStream\n\n_url_re = re.compile(r\"\"\"\n http(s)?://(\\w+\\.)?\n (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv\n (/embed/video)?\n /(?P<channel>[^/&?]+)\n\"\"\", re.VERBOSE)\n\n\nclass VLWebSocket(websocket.WebSocket):\n def __init__(self, **_):\n self.session = _.pop(\"session\")\n self.logger = self.session.logger.new_module(\"plugins.vaughnlive.websocket\")\n sslopt = _.pop(\"sslopt\", {})\n sslopt[\"cert_reqs\"] = ssl.CERT_NONE\n super(VLWebSocket, self).__init__(sslopt=sslopt, **_)\n\n def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT):\n self.logger.debug(\"Sending message: {0}\", payload)\n return super(VLWebSocket, self).send(payload + \"\\n\\x00\", opcode)\n\n def recv(self):\n d = super(VLWebSocket, self).recv().replace(\"\\n\", \"\").replace(\"\\x00\", \"\")\n return d.split(\" \", 1)\n\n\nclass VaughnLive(Plugin):\n servers = [\"wss://sapi-ws-{0}x{1:02}.vaughnlive.tv\".format(x, y) for x, y in itertools.product(range(1, 3),\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n \"594140c69edad\": \"66.90.93.42\",\n \"585c4cab1bef1\": \"66.90.93.34\",\n \"5940d648b3929\": \"66.90.93.42\",\n \"5941854b39bc4\": \"198.255.0.10\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def api_url(self):\n return random.choice(self.servers)\n\n def parse_ack(self, action, message):\n if action.endswith(\"3\"):\n channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(\";\")\n is_live = is_live == \"1\"\n viewers = int(viewers)\n self.logger.debug(\"Viewers: {0}, isLive={1}\", viewers, is_live)\n domain, channel = channel.split(\"-\", 1)\n return is_live, server, domain, channel, token, ingest\n else:\n self.logger.error(\"Unhandled action format: {0}\", action)\n\n def _get_info(self, stream_name):\n server = self.api_url()\n self.logger.debug(\"Connecting to API: {0}\", server)\n ws = websocket.create_connection(server,\n header=[\"User-Agent: {0}\".format(useragents.CHROME)],\n origin=self.origin,\n class_=VLWebSocket,\n session=self.session)\n ws.send(\"MVN LOAD3 {0}\".format(stream_name))\n action, message = ws.recv()\n return self.parse_ack(action, message)\n\n def _get_rtmp_streams(self, server, domain, channel, token):\n rtmp_server = self.rtmp_server_map.get(server, server)\n\n url = \"rtmp://{0}/live?{1}\".format(rtmp_server, token)\n\n yield \"live\", RTMPStream(self.session, params={\n \"rtmp\": url,\n \"pageUrl\": self.url,\n \"playpath\": \"{0}_{1}\".format(self.name_remap.get(domain, \"live\"), channel),\n \"live\": True\n })\n\n def _get_streams(self):\n m = _url_re.match(self.url)\n if m:\n stream_name = \"{0}-{1}\".format(self.domain_map[(m.group(\"domain\").lower())],\n m.group(\"channel\"))\n\n is_live, server, domain, channel, token, ingest = self._get_info(stream_name)\n\n if not is_live:\n self.logger.info(\"Stream is currently off air\")\n else:\n for s in self._get_rtmp_streams(server, domain, channel, token):\n yield s\n\n\n__plugin__ = VaughnLive\n", "path": "src/streamlink/plugins/vaughnlive.py"}], "after_files": [{"content": "import random\nimport re\nimport itertools\nimport ssl\nimport websocket\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, http\nfrom streamlink.stream import RTMPStream\n\n_url_re = re.compile(r\"\"\"\n http(s)?://(\\w+\\.)?\n (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv\n (/embed/video)?\n /(?P<channel>[^/&?]+)\n\"\"\", re.VERBOSE)\n\n\nclass VLWebSocket(websocket.WebSocket):\n def __init__(self, **_):\n self.session = _.pop(\"session\")\n self.logger = self.session.logger.new_module(\"plugins.vaughnlive.websocket\")\n sslopt = _.pop(\"sslopt\", {})\n sslopt[\"cert_reqs\"] = ssl.CERT_NONE\n super(VLWebSocket, self).__init__(sslopt=sslopt, **_)\n\n def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT):\n self.logger.debug(\"Sending message: {0}\", payload)\n return super(VLWebSocket, self).send(payload + \"\\n\\x00\", opcode)\n\n def recv(self):\n d = super(VLWebSocket, self).recv().replace(\"\\n\", \"\").replace(\"\\x00\", \"\")\n return d.split(\" \", 1)\n\n\nclass VaughnLive(Plugin):\n servers = [\"wss://sapi-ws-{0}x{1:02}.vaughnlive.tv\".format(x, y) for x, y in itertools.product(range(1, 3),\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n \"594140c69edad\": \"192.240.105.171:1935\",\n \"585c4cab1bef1\": \"192.240.105.171:1935\",\n \"5940d648b3929\": \"192.240.105.171:1935\",\n \"5941854b39bc4\": \"192.240.105.171:1935\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def api_url(self):\n return random.choice(self.servers)\n\n def parse_ack(self, action, message):\n if action.endswith(\"3\"):\n channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(\";\")\n is_live = is_live == \"1\"\n viewers = int(viewers)\n self.logger.debug(\"Viewers: {0}, isLive={1}\", viewers, is_live)\n domain, channel = channel.split(\"-\", 1)\n return is_live, server, domain, channel, token, ingest\n else:\n self.logger.error(\"Unhandled action format: {0}\", action)\n\n def _get_info(self, stream_name):\n server = self.api_url()\n self.logger.debug(\"Connecting to API: {0}\", server)\n ws = websocket.create_connection(server,\n header=[\"User-Agent: {0}\".format(useragents.CHROME)],\n origin=self.origin,\n class_=VLWebSocket,\n session=self.session)\n ws.send(\"MVN LOAD3 {0}\".format(stream_name))\n action, message = ws.recv()\n return self.parse_ack(action, message)\n\n def _get_rtmp_streams(self, server, domain, channel, token):\n rtmp_server = self.rtmp_server_map.get(server, server)\n\n url = \"rtmp://{0}/live?{1}\".format(rtmp_server, token)\n\n yield \"live\", RTMPStream(self.session, params={\n \"rtmp\": url,\n \"pageUrl\": self.url,\n \"playpath\": \"{0}_{1}\".format(self.name_remap.get(domain, \"live\"), channel),\n \"live\": True\n })\n\n def _get_streams(self):\n m = _url_re.match(self.url)\n if m:\n stream_name = \"{0}-{1}\".format(self.domain_map[(m.group(\"domain\").lower())],\n m.group(\"channel\"))\n\n is_live, server, domain, channel, token, ingest = self._get_info(stream_name)\n\n if not is_live:\n self.logger.info(\"Stream is currently off air\")\n else:\n self.logger.info(\"Stream powered by VaughnSoft - remember to support them.\")\n for s in self._get_rtmp_streams(server, domain, channel, token):\n yield s\n\n\n__plugin__ = VaughnLive\n", "path": "src/streamlink/plugins/vaughnlive.py"}]}
| 1,737 | 518 |
gh_patches_debug_26363
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-786
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement showing and changing a column's type
## Problem
<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->
Users might want to change the data type of an existing column on their table.
## Proposed solution
<!-- A clear and concise description of your proposed solution or feature. -->
The ["Working with Columns" design spec](https://wiki.mathesar.org/en/design/specs/working-with-columns) has a solution for showing and changing column types, which we need to implement on the frontend.
Please note that we're only implementing changing the Mathesar data type in this milestone. Options specific to individual data types will be implemented in the next milestone.
Number data types should save as `NUMERIC`.
Text data types should save as `VARCHAR`.
Date/time data types can be disabled for now since they're not fully implemented on the backend.
## Additional context
<!-- Add any other context or screenshots about the feature request here.-->
- Backend work:
- #532 to get the list of types
- #199 to get valid target types and change types
- Design issue: #324
- Design discussion: #436
- #269
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/views.py`
Content:
```
1 from django.shortcuts import render, redirect, get_object_or_404
2
3 from mathesar.models import Database, Schema, Table
4 from mathesar.api.serializers.databases import DatabaseSerializer
5 from mathesar.api.serializers.schemas import SchemaSerializer
6 from mathesar.api.serializers.tables import TableSerializer
7
8
9 def get_schema_list(request, database):
10 schema_serializer = SchemaSerializer(
11 Schema.objects.filter(database=database),
12 many=True,
13 context={'request': request}
14 )
15 return schema_serializer.data
16
17
18 def get_database_list(request):
19 database_serializer = DatabaseSerializer(
20 Database.objects.all(),
21 many=True,
22 context={'request': request}
23 )
24 return database_serializer.data
25
26
27 def get_table_list(request, schema):
28 if schema is None:
29 return []
30 table_serializer = TableSerializer(
31 Table.objects.filter(schema=schema),
32 many=True,
33 context={'request': request}
34 )
35 return table_serializer.data
36
37
38 def get_common_data(request, database, schema=None):
39 return {
40 'current_db': database.name if database else None,
41 'current_schema': schema.id if schema else None,
42 'schemas': get_schema_list(request, database),
43 'databases': get_database_list(request),
44 'tables': get_table_list(request, schema)
45 }
46
47
48 def get_current_database(request, db_name):
49 # if there's a DB name passed in, try to retrieve the database, or return a 404 error.
50 if db_name is not None:
51 return get_object_or_404(Database, name=db_name)
52 else:
53 try:
54 # Try to get the first database available
55 return Database.objects.order_by('id').first()
56 except Database.DoesNotExist:
57 return None
58
59
60 def get_current_schema(request, schema_id, database):
61 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.
62 if schema_id is not None:
63 return get_object_or_404(Schema, id=schema_id)
64 else:
65 try:
66 # Try to get the first schema in the DB
67 return Schema.objects.filter(database=database).order_by('id').first()
68 except Schema.DoesNotExist:
69 return None
70
71
72 def render_schema(request, database, schema):
73 # if there's no schema available, redirect to the schemas page.
74 if not schema:
75 return redirect('schemas', db_name=database.name)
76 else:
77 # We are redirecting so that the correct URL is passed to the frontend.
78 return redirect('schema_home', db_name=database.name, schema_id=schema.id)
79
80
81 def home(request):
82 database = get_current_database(request, None)
83 schema = get_current_schema(request, None, database)
84 return render_schema(request, database, schema)
85
86
87 def db_home(request, db_name):
88 database = get_current_database(request, db_name)
89 schema = get_current_schema(request, None, database)
90 return render_schema(request, database, schema)
91
92
93 def schema_home(request, db_name, schema_id):
94 database = get_current_database(request, db_name)
95 schema = get_current_schema(request, schema_id, database)
96 return render(request, 'mathesar/index.html', {
97 'common_data': get_common_data(request, database, schema)
98 })
99
100
101 def schemas(request, db_name):
102 database = get_current_database(request, db_name)
103 schema = get_current_schema(request, None, database)
104 return render(request, 'mathesar/index.html', {
105 'common_data': get_common_data(request, database, schema)
106 })
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mathesar/views.py b/mathesar/views.py
--- a/mathesar/views.py
+++ b/mathesar/views.py
@@ -1,7 +1,7 @@
from django.shortcuts import render, redirect, get_object_or_404
from mathesar.models import Database, Schema, Table
-from mathesar.api.serializers.databases import DatabaseSerializer
+from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer
from mathesar.api.serializers.schemas import SchemaSerializer
from mathesar.api.serializers.tables import TableSerializer
@@ -35,13 +35,25 @@
return table_serializer.data
+def get_type_list(request, database):
+ if database is None:
+ return []
+ type_serializer = TypeSerializer(
+ database.supported_types,
+ many=True,
+ context={'request': request}
+ )
+ return type_serializer.data
+
+
def get_common_data(request, database, schema=None):
return {
'current_db': database.name if database else None,
'current_schema': schema.id if schema else None,
'schemas': get_schema_list(request, database),
'databases': get_database_list(request),
- 'tables': get_table_list(request, schema)
+ 'tables': get_table_list(request, schema),
+ 'abstract_types': get_type_list(request, database)
}
|
{"golden_diff": "diff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -1,7 +1,7 @@\n from django.shortcuts import render, redirect, get_object_or_404\n \n from mathesar.models import Database, Schema, Table\n-from mathesar.api.serializers.databases import DatabaseSerializer\n+from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\n from mathesar.api.serializers.schemas import SchemaSerializer\n from mathesar.api.serializers.tables import TableSerializer\n \n@@ -35,13 +35,25 @@\n return table_serializer.data\n \n \n+def get_type_list(request, database):\n+ if database is None:\n+ return []\n+ type_serializer = TypeSerializer(\n+ database.supported_types,\n+ many=True,\n+ context={'request': request}\n+ )\n+ return type_serializer.data\n+\n+\n def get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n- 'tables': get_table_list(request, schema)\n+ 'tables': get_table_list(request, schema),\n+ 'abstract_types': get_type_list(request, database)\n }\n", "issue": "Implement showing and changing a column's type\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers might want to change the data type of an existing column on their table.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe [\"Working with Columns\" design spec](https://wiki.mathesar.org/en/design/specs/working-with-columns) has a solution for showing and changing column types, which we need to implement on the frontend.\r\n\r\nPlease note that we're only implementing changing the Mathesar data type in this milestone. Options specific to individual data types will be implemented in the next milestone.\r\n\r\nNumber data types should save as `NUMERIC`.\r\nText data types should save as `VARCHAR`.\r\nDate/time data types can be disabled for now since they're not fully implemented on the backend.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- Backend work:\r\n - #532 to get the list of types \r\n - #199 to get valid target types and change types \r\n- Design issue: #324 \r\n- Design discussion: #436\r\n- #269 \n", "before_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\n\nfrom mathesar.models import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\ndef home(request):\n database = get_current_database(request, None)\n schema = get_current_schema(request, None, database)\n return render_schema(request, database, schema)\n\n\ndef db_home(request, db_name):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, None, database)\n return render_schema(request, database, schema)\n\n\ndef schema_home(request, db_name, schema_id):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, None, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n", "path": "mathesar/views.py"}], "after_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\n\nfrom mathesar.models import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n database.supported_types,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'abstract_types': get_type_list(request, database)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\ndef home(request):\n database = get_current_database(request, None)\n schema = get_current_schema(request, None, database)\n return render_schema(request, database, schema)\n\n\ndef db_home(request, db_name):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, None, database)\n return render_schema(request, database, schema)\n\n\ndef schema_home(request, db_name, schema_id):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, None, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n", "path": "mathesar/views.py"}]}
| 1,490 | 295 |
gh_patches_debug_34062
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1871
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]: Chichester District Council is not working
### I Have A Problem With:
A specific source
### What's Your Problem
The source has stopped working since Tuesday 13th February 2024. All the collection days no longer show on the calendar at all. The Chichester District Council website still shows me the days.
### Source (if relevant)
chichester_gov_uk
### Logs
```Shell
This error originated from a custom integration.
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
Integration: waste_collection_schedule (documentation)
First occurred: 11:36:47 (1 occurrences)
Last logged: 11:36:47
fetch failed for source Chichester District Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py", line 37, in fetch form_url = form["action"] ~~~~^^^^^^^^^^ TypeError: 'NoneType' object is not subscriptable
```
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: chichester_gov_uk
args:
uprn: 10002466648
```
### Checklist Source Error
- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [X] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py`
Content:
```
1 from datetime import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection
6
7 TITLE = "Chichester District Council"
8 DESCRIPTION = "Source for chichester.gov.uk services for Chichester"
9 URL = "chichester.gov.uk"
10
11 TEST_CASES = {
12 "Test_001": {"uprn": "010002476348"},
13 "Test_002": {"uprn": "100062612654"},
14 "Test_003": {"uprn": "100061745708"},
15 }
16
17 ICON_MAP = {
18 "General Waste": "mdi:trash-can",
19 "Recycling": "mdi:recycle",
20 "Garden Recycling": "mdi:leaf",
21 }
22
23
24 class Source:
25 def __init__(self, uprn):
26 self._uprn = uprn
27
28 def fetch(self):
29 session = requests.Session()
30 # Start a session
31 r = session.get("https://www.chichester.gov.uk/checkyourbinday")
32 r.raise_for_status()
33 soup = BeautifulSoup(r.text, features="html.parser")
34
35 # Extract form submission url
36 form = soup.find("form", attrs={"id": "WASTECOLLECTIONCALENDARV2_FORM"})
37 form_url = form["action"]
38
39 # Submit form
40 form_data = {
41 "WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT": "Submit",
42 "WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN": self._uprn,
43 }
44 r = session.post(form_url, data=form_data)
45 r.raise_for_status()
46
47 # Extract collection dates
48 soup = BeautifulSoup(r.text, features="html.parser")
49 entries = []
50 data = soup.find_all("div", attrs={"class": "bin-days"})
51 for bin in data:
52 if "print-only" in bin["class"]:
53 continue
54
55 type = bin.find("span").contents[0].replace("bin", "").strip().title()
56 list_items = bin.find_all("li")
57 if list_items:
58 for item in list_items:
59 date = datetime.strptime(item.text, "%d %B %Y").date()
60 entries.append(
61 Collection(
62 date=date,
63 t=type,
64 icon=ICON_MAP.get(type),
65 )
66 )
67
68 return entries
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py
@@ -33,13 +33,13 @@
soup = BeautifulSoup(r.text, features="html.parser")
# Extract form submission url
- form = soup.find("form", attrs={"id": "WASTECOLLECTIONCALENDARV2_FORM"})
+ form = soup.find("form", attrs={"id": "WASTECOLLECTIONCALENDARV5_FORM"})
form_url = form["action"]
# Submit form
form_data = {
- "WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT": "Submit",
- "WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN": self._uprn,
+ "WASTECOLLECTIONCALENDARV5_FORMACTION_NEXT": "Submit",
+ "WASTECOLLECTIONCALENDARV5_CALENDAR_UPRN": self._uprn,
}
r = session.post(form_url, data=form_data)
r.raise_for_status()
@@ -47,16 +47,18 @@
# Extract collection dates
soup = BeautifulSoup(r.text, features="html.parser")
entries = []
- data = soup.find_all("div", attrs={"class": "bin-days"})
- for bin in data:
- if "print-only" in bin["class"]:
- continue
-
- type = bin.find("span").contents[0].replace("bin", "").strip().title()
- list_items = bin.find_all("li")
- if list_items:
- for item in list_items:
- date = datetime.strptime(item.text, "%d %B %Y").date()
+ tables = soup.find_all("table", attrs={"class": "bin-collection-dates"})
+ # Data is presented in two tables side-by-side
+ for table in tables:
+ # Each collection is a table row
+ data = table.find_all("tr")
+ for bin in data:
+ cells = bin.find_all("td")
+ # Ignore the header row
+ if len(cells) == 2:
+ date = datetime.strptime(cells[0].text, "%d %B %Y").date()
+ # Maintain backwards compatibility - it used to be General Waste and now it is General waste
+ type = cells[1].text.title()
entries.append(
Collection(
date=date,
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\n@@ -33,13 +33,13 @@\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n \n # Extract form submission url\n- form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV2_FORM\"})\n+ form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV5_FORM\"})\n form_url = form[\"action\"]\n \n # Submit form\n form_data = {\n- \"WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT\": \"Submit\",\n- \"WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN\": self._uprn,\n+ \"WASTECOLLECTIONCALENDARV5_FORMACTION_NEXT\": \"Submit\",\n+ \"WASTECOLLECTIONCALENDARV5_CALENDAR_UPRN\": self._uprn,\n }\n r = session.post(form_url, data=form_data)\n r.raise_for_status()\n@@ -47,16 +47,18 @@\n # Extract collection dates\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n entries = []\n- data = soup.find_all(\"div\", attrs={\"class\": \"bin-days\"})\n- for bin in data:\n- if \"print-only\" in bin[\"class\"]:\n- continue\n-\n- type = bin.find(\"span\").contents[0].replace(\"bin\", \"\").strip().title()\n- list_items = bin.find_all(\"li\")\n- if list_items:\n- for item in list_items:\n- date = datetime.strptime(item.text, \"%d %B %Y\").date()\n+ tables = soup.find_all(\"table\", attrs={\"class\": \"bin-collection-dates\"})\n+ # Data is presented in two tables side-by-side\n+ for table in tables:\n+ # Each collection is a table row\n+ data = table.find_all(\"tr\")\n+ for bin in data:\n+ cells = bin.find_all(\"td\")\n+ # Ignore the header row\n+ if len(cells) == 2:\n+ date = datetime.strptime(cells[0].text, \"%d %B %Y\").date()\n+ # Maintain backwards compatibility - it used to be General Waste and now it is General waste\n+ type = cells[1].text.title()\n entries.append(\n Collection(\n date=date,\n", "issue": "[Bug]: Chichester District Council is not working\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe source has stopped working since Tuesday 13th February 2024. All the collection days no longer show on the calendar at all. The Chichester District Council website still shows me the days.\n\n### Source (if relevant)\n\nchichester_gov_uk\n\n### Logs\n\n```Shell\nThis error originated from a custom integration.\r\n\r\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nIntegration: waste_collection_schedule (documentation)\r\nFirst occurred: 11:36:47 (1 occurrences)\r\nLast logged: 11:36:47\r\n\r\nfetch failed for source Chichester District Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\", line 37, in fetch form_url = form[\"action\"] ~~~~^^^^^^^^^^ TypeError: 'NoneType' object is not subscriptable\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: chichester_gov_uk\r\n args:\r\n uprn: 10002466648\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Chichester District Council\"\nDESCRIPTION = \"Source for chichester.gov.uk services for Chichester\"\nURL = \"chichester.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"010002476348\"},\n \"Test_002\": {\"uprn\": \"100062612654\"},\n \"Test_003\": {\"uprn\": \"100061745708\"},\n}\n\nICON_MAP = {\n \"General Waste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Garden Recycling\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = uprn\n\n def fetch(self):\n session = requests.Session()\n # Start a session\n r = session.get(\"https://www.chichester.gov.uk/checkyourbinday\")\n r.raise_for_status()\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n # Extract form submission url\n form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV2_FORM\"})\n form_url = form[\"action\"]\n\n # Submit form\n form_data = {\n \"WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT\": \"Submit\",\n \"WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN\": self._uprn,\n }\n r = session.post(form_url, data=form_data)\n r.raise_for_status()\n\n # Extract collection dates\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n entries = []\n data = soup.find_all(\"div\", attrs={\"class\": \"bin-days\"})\n for bin in data:\n if \"print-only\" in bin[\"class\"]:\n continue\n\n type = bin.find(\"span\").contents[0].replace(\"bin\", \"\").strip().title()\n list_items = bin.find_all(\"li\")\n if list_items:\n for item in list_items:\n date = datetime.strptime(item.text, \"%d %B %Y\").date()\n entries.append(\n Collection(\n date=date,\n t=type,\n icon=ICON_MAP.get(type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py"}], "after_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Chichester District Council\"\nDESCRIPTION = \"Source for chichester.gov.uk services for Chichester\"\nURL = \"chichester.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"010002476348\"},\n \"Test_002\": {\"uprn\": \"100062612654\"},\n \"Test_003\": {\"uprn\": \"100061745708\"},\n}\n\nICON_MAP = {\n \"General Waste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Garden Recycling\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = uprn\n\n def fetch(self):\n session = requests.Session()\n # Start a session\n r = session.get(\"https://www.chichester.gov.uk/checkyourbinday\")\n r.raise_for_status()\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n # Extract form submission url\n form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV5_FORM\"})\n form_url = form[\"action\"]\n\n # Submit form\n form_data = {\n \"WASTECOLLECTIONCALENDARV5_FORMACTION_NEXT\": \"Submit\",\n \"WASTECOLLECTIONCALENDARV5_CALENDAR_UPRN\": self._uprn,\n }\n r = session.post(form_url, data=form_data)\n r.raise_for_status()\n\n # Extract collection dates\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n entries = []\n tables = soup.find_all(\"table\", attrs={\"class\": \"bin-collection-dates\"})\n # Data is presented in two tables side-by-side\n for table in tables:\n # Each collection is a table row\n data = table.find_all(\"tr\")\n for bin in data:\n cells = bin.find_all(\"td\")\n # Ignore the header row\n if len(cells) == 2:\n date = datetime.strptime(cells[0].text, \"%d %B %Y\").date()\n # Maintain backwards compatibility - it used to be General Waste and now it is General waste\n type = cells[1].text.title()\n entries.append(\n Collection(\n date=date,\n t=type,\n icon=ICON_MAP.get(type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py"}]}
| 1,465 | 596 |
gh_patches_debug_899
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-1063
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User.full_name doesn't handle non-ASCII (in Python 2?)
### Steps to reproduce
```python
updater = ext.Updater(token=settings.telegram_token())
def F(bot, update):
user = update.effective_user
print repr(user.first_name), repr(user.last_name)
print '%s %s' % (user.first_name, user.last_name)
print user.full_name
updater.dispatcher.add_handler(ext.MessageHandler(0, F))
updater.start_polling()
updater.idle()
```
### Expected behaviour
```
u'Dan\u2022iel' u'Reed'
Dan•iel Reed
Dan•iel Reed
```
### Actual behaviour
```
u'Dan\u2022iel' u'Reed'
Dan•iel Reed
ERROR dispatcher.py:301] An uncaught error was raised while processing the update
Traceback (most recent call last):
File "local/lib/python2.7/site-packages/telegram/ext/dispatcher.py", line 279, in process_update
handler.handle_update(update, self)
File "local/lib/python2.7/site-packages/telegram/ext/messagehandler.py", line 169, in handle_update
return self.callback(dispatcher.bot, update, **optional_args)
File "<stdin>", line 5, in F
File "local/lib/python2.7/site-packages/telegram/user.py", line 91, in full_name
return '{} {}'.format(self.first_name, self.last_name)
UnicodeEncodeError: 'ascii' codec can't encode character u'\u2022' in position 3: ordinal not in range(128)
```
### Configuration
**Operating System:**
**Version of Python, python-telegram-bot & dependencies:**
```
python-telegram-bot 10.0.1
certifi 2018.01.18
future 0.16.0
Python 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0]
```
I'm a little rushed, but this is works for me:
```python
@property
def full_name(self):
"""
:obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)
:attr:`last_name`.
"""
if self.last_name:
! return u'{} {}'.format(self.first_name, self.last_name)
return self.first_name
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/user.py`
Content:
```
1 #!/usr/bin/env python
2 # pylint: disable=C0103,W0622
3 #
4 # A library that provides a Python interface to the Telegram Bot API
5 # Copyright (C) 2015-2018
6 # Leandro Toledo de Souza <[email protected]>
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU Lesser Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU Lesser Public License for more details.
17 #
18 # You should have received a copy of the GNU Lesser Public License
19 # along with this program. If not, see [http://www.gnu.org/licenses/].
20 """This module contains an object that represents a Telegram User."""
21
22 from telegram import TelegramObject
23 from telegram.utils.helpers import mention_html as util_mention_html
24 from telegram.utils.helpers import mention_markdown as util_mention_markdown
25
26
27 class User(TelegramObject):
28 """This object represents a Telegram user or bot.
29
30 Attributes:
31 id (:obj:`int`): Unique identifier for this user or bot.
32 is_bot (:obj:`bool`): True, if this user is a bot
33 first_name (:obj:`str`): User's or bot's first name.
34 last_name (:obj:`str`): Optional. User's or bot's last name.
35 username (:obj:`str`): Optional. User's or bot's username.
36 language_code (:obj:`str`): Optional. IETF language tag of the user's language.
37 bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.
38
39 Args:
40 id (:obj:`int`): Unique identifier for this user or bot.
41 is_bot (:obj:`bool`): True, if this user is a bot
42 first_name (:obj:`str`): User's or bot's first name.
43 last_name (:obj:`str`, optional): User's or bot's last name.
44 username (:obj:`str`, optional): User's or bot's username.
45 language_code (:obj:`str`, optional): IETF language tag of the user's language.
46 bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.
47
48 """
49
50 def __init__(self,
51 id,
52 first_name,
53 is_bot,
54 last_name=None,
55 username=None,
56 language_code=None,
57 bot=None,
58 **kwargs):
59 # Required
60 self.id = int(id)
61 self.first_name = first_name
62 self.is_bot = is_bot
63 # Optionals
64 self.last_name = last_name
65 self.username = username
66 self.language_code = language_code
67
68 self.bot = bot
69
70 self._id_attrs = (self.id,)
71
72 @property
73 def name(self):
74 """
75 :obj:`str`: Convenience property. If available, returns the user's :attr:`username`
76 prefixed with "@". If :attr:`username` is not available, returns :attr:`full_name`.
77
78 """
79 if self.username:
80 return '@{}'.format(self.username)
81 return self.full_name
82
83 @property
84 def full_name(self):
85 """
86 :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)
87 :attr:`last_name`.
88
89 """
90 if self.last_name:
91 return '{} {}'.format(self.first_name, self.last_name)
92 return self.first_name
93
94 @classmethod
95 def de_json(cls, data, bot):
96 if not data:
97 return None
98
99 data = super(User, cls).de_json(data, bot)
100
101 return cls(bot=bot, **data)
102
103 def get_profile_photos(self, *args, **kwargs):
104 """
105 Shortcut for::
106
107 bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)
108
109 """
110
111 return self.bot.get_user_profile_photos(self.id, *args, **kwargs)
112
113 @classmethod
114 def de_list(cls, data, bot):
115 if not data:
116 return []
117
118 users = list()
119 for user in data:
120 users.append(cls.de_json(user, bot))
121
122 return users
123
124 def mention_markdown(self, name=None):
125 """
126 Args:
127 name (:obj:`str`): If provided, will overwrite the user's name.
128
129 Returns:
130 :obj:`str`: The inline mention for the user as markdown.
131 """
132 if not name:
133 return util_mention_markdown(self.id, self.name)
134 else:
135 return util_mention_markdown(self.id, name)
136
137 def mention_html(self, name=None):
138 """
139 Args:
140 name (:obj:`str`): If provided, will overwrite the user's name.
141
142 Returns:
143 :obj:`str`: The inline mention for the user as HTML.
144 """
145 if not name:
146 return util_mention_html(self.id, self.name)
147 else:
148 return util_mention_html(self.id, name)
149
150 def send_message(self, *args, **kwargs):
151 """Shortcut for::
152
153 bot.send_message(User.chat_id, *args, **kwargs)
154
155 Where User is the current instance.
156
157 Returns:
158 :class:`telegram.Message`: On success, instance representing the message posted.
159
160 """
161 return self.bot.send_message(self.id, *args, **kwargs)
162
163 def send_photo(self, *args, **kwargs):
164 """Shortcut for::
165
166 bot.send_photo(User.chat_id, *args, **kwargs)
167
168 Where User is the current instance.
169
170 Returns:
171 :class:`telegram.Message`: On success, instance representing the message posted.
172
173 """
174 return self.bot.send_photo(self.id, *args, **kwargs)
175
176 def send_audio(self, *args, **kwargs):
177 """Shortcut for::
178
179 bot.send_audio(User.chat_id, *args, **kwargs)
180
181 Where User is the current instance.
182
183 Returns:
184 :class:`telegram.Message`: On success, instance representing the message posted.
185
186 """
187 return self.bot.send_audio(self.id, *args, **kwargs)
188
189 def send_document(self, *args, **kwargs):
190 """Shortcut for::
191
192 bot.send_document(User.chat_id, *args, **kwargs)
193
194 Where User is the current instance.
195
196 Returns:
197 :class:`telegram.Message`: On success, instance representing the message posted.
198
199 """
200 return self.bot.send_document(self.id, *args, **kwargs)
201
202 def send_sticker(self, *args, **kwargs):
203 """Shortcut for::
204
205 bot.send_sticker(User.chat_id, *args, **kwargs)
206
207 Where User is the current instance.
208
209 Returns:
210 :class:`telegram.Message`: On success, instance representing the message posted.
211
212 """
213 return self.bot.send_sticker(self.id, *args, **kwargs)
214
215 def send_video(self, *args, **kwargs):
216 """Shortcut for::
217
218 bot.send_video(User.chat_id, *args, **kwargs)
219
220 Where User is the current instance.
221
222 Returns:
223 :class:`telegram.Message`: On success, instance representing the message posted.
224
225 """
226 return self.bot.send_video(self.id, *args, **kwargs)
227
228 def send_video_note(self, *args, **kwargs):
229 """Shortcut for::
230
231 bot.send_video_note(User.chat_id, *args, **kwargs)
232
233 Where User is the current instance.
234
235 Returns:
236 :class:`telegram.Message`: On success, instance representing the message posted.
237
238 """
239 return self.bot.send_video_note(self.id, *args, **kwargs)
240
241 def send_voice(self, *args, **kwargs):
242 """Shortcut for::
243
244 bot.send_voice(User.chat_id, *args, **kwargs)
245
246 Where User is the current instance.
247
248 Returns:
249 :class:`telegram.Message`: On success, instance representing the message posted.
250
251 """
252 return self.bot.send_voice(self.id, *args, **kwargs)
253
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram/user.py b/telegram/user.py
--- a/telegram/user.py
+++ b/telegram/user.py
@@ -88,7 +88,7 @@
"""
if self.last_name:
- return '{} {}'.format(self.first_name, self.last_name)
+ return u'{} {}'.format(self.first_name, self.last_name)
return self.first_name
@classmethod
|
{"golden_diff": "diff --git a/telegram/user.py b/telegram/user.py\n--- a/telegram/user.py\n+++ b/telegram/user.py\n@@ -88,7 +88,7 @@\n \n \"\"\"\n if self.last_name:\n- return '{} {}'.format(self.first_name, self.last_name)\n+ return u'{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n \n @classmethod\n", "issue": "User.full_name doesn't handle non-ASCII (in Python 2?)\n### Steps to reproduce\r\n```python\r\nupdater = ext.Updater(token=settings.telegram_token())\r\ndef F(bot, update):\r\n user = update.effective_user\r\n print repr(user.first_name), repr(user.last_name)\r\n print '%s %s' % (user.first_name, user.last_name)\r\n print user.full_name\r\n\r\nupdater.dispatcher.add_handler(ext.MessageHandler(0, F))\r\nupdater.start_polling()\r\nupdater.idle()\r\n```\r\n\r\n### Expected behaviour\r\n```\r\nu'Dan\\u2022iel' u'Reed'\r\nDan\u2022iel Reed\r\nDan\u2022iel Reed\r\n```\r\n\r\n### Actual behaviour\r\n```\r\nu'Dan\\u2022iel' u'Reed'\r\nDan\u2022iel Reed\r\nERROR dispatcher.py:301] An uncaught error was raised while processing the update\r\nTraceback (most recent call last):\r\n File \"local/lib/python2.7/site-packages/telegram/ext/dispatcher.py\", line 279, in process_update\r\n handler.handle_update(update, self)\r\n File \"local/lib/python2.7/site-packages/telegram/ext/messagehandler.py\", line 169, in handle_update\r\n return self.callback(dispatcher.bot, update, **optional_args)\r\n File \"<stdin>\", line 5, in F\r\n File \"local/lib/python2.7/site-packages/telegram/user.py\", line 91, in full_name\r\n return '{} {}'.format(self.first_name, self.last_name)\r\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\u2022' in position 3: ordinal not in range(128)\r\n```\r\n\r\n### Configuration\r\n**Operating System:**\r\n\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n```\r\npython-telegram-bot 10.0.1\r\ncertifi 2018.01.18\r\nfuture 0.16.0\r\nPython 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0]\r\n```\r\n\r\nI'm a little rushed, but this is works for me:\r\n\r\n```python\r\n @property\r\n def full_name(self):\r\n \"\"\"\r\n :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)\r\n :attr:`last_name`.\r\n\r\n \"\"\"\r\n if self.last_name:\r\n! return u'{} {}'.format(self.first_name, self.last_name)\r\n return self.first_name\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram User.\"\"\"\n\nfrom telegram import TelegramObject\nfrom telegram.utils.helpers import mention_html as util_mention_html\nfrom telegram.utils.helpers import mention_markdown as util_mention_markdown\n\n\nclass User(TelegramObject):\n \"\"\"This object represents a Telegram user or bot.\n\n Attributes:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`): Optional. User's or bot's last name.\n username (:obj:`str`): Optional. User's or bot's username.\n language_code (:obj:`str`): Optional. IETF language tag of the user's language.\n bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.\n\n Args:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`, optional): User's or bot's last name.\n username (:obj:`str`, optional): User's or bot's username.\n language_code (:obj:`str`, optional): IETF language tag of the user's language.\n bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.\n\n \"\"\"\n\n def __init__(self,\n id,\n first_name,\n is_bot,\n last_name=None,\n username=None,\n language_code=None,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.first_name = first_name\n self.is_bot = is_bot\n # Optionals\n self.last_name = last_name\n self.username = username\n self.language_code = language_code\n\n self.bot = bot\n\n self._id_attrs = (self.id,)\n\n @property\n def name(self):\n \"\"\"\n :obj:`str`: Convenience property. If available, returns the user's :attr:`username`\n prefixed with \"@\". If :attr:`username` is not available, returns :attr:`full_name`.\n\n \"\"\"\n if self.username:\n return '@{}'.format(self.username)\n return self.full_name\n\n @property\n def full_name(self):\n \"\"\"\n :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)\n :attr:`last_name`.\n\n \"\"\"\n if self.last_name:\n return '{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n data = super(User, cls).de_json(data, bot)\n\n return cls(bot=bot, **data)\n\n def get_profile_photos(self, *args, **kwargs):\n \"\"\"\n Shortcut for::\n\n bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)\n\n \"\"\"\n\n return self.bot.get_user_profile_photos(self.id, *args, **kwargs)\n\n @classmethod\n def de_list(cls, data, bot):\n if not data:\n return []\n\n users = list()\n for user in data:\n users.append(cls.de_json(user, bot))\n\n return users\n\n def mention_markdown(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): If provided, will overwrite the user's name.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if not name:\n return util_mention_markdown(self.id, self.name)\n else:\n return util_mention_markdown(self.id, name)\n\n def mention_html(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): If provided, will overwrite the user's name.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n \"\"\"\n if not name:\n return util_mention_html(self.id, self.name)\n else:\n return util_mention_html(self.id, name)\n\n def send_message(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_message(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_message(self.id, *args, **kwargs)\n\n def send_photo(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_photo(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_photo(self.id, *args, **kwargs)\n\n def send_audio(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_audio(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_audio(self.id, *args, **kwargs)\n\n def send_document(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_document(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_document(self.id, *args, **kwargs)\n\n def send_sticker(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_sticker(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_sticker(self.id, *args, **kwargs)\n\n def send_video(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video(self.id, *args, **kwargs)\n\n def send_video_note(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video_note(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video_note(self.id, *args, **kwargs)\n\n def send_voice(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_voice(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_voice(self.id, *args, **kwargs)\n", "path": "telegram/user.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram User.\"\"\"\n\nfrom telegram import TelegramObject\nfrom telegram.utils.helpers import mention_html as util_mention_html\nfrom telegram.utils.helpers import mention_markdown as util_mention_markdown\n\n\nclass User(TelegramObject):\n \"\"\"This object represents a Telegram user or bot.\n\n Attributes:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`): Optional. User's or bot's last name.\n username (:obj:`str`): Optional. User's or bot's username.\n language_code (:obj:`str`): Optional. IETF language tag of the user's language.\n bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.\n\n Args:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`, optional): User's or bot's last name.\n username (:obj:`str`, optional): User's or bot's username.\n language_code (:obj:`str`, optional): IETF language tag of the user's language.\n bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.\n\n \"\"\"\n\n def __init__(self,\n id,\n first_name,\n is_bot,\n last_name=None,\n username=None,\n language_code=None,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.first_name = first_name\n self.is_bot = is_bot\n # Optionals\n self.last_name = last_name\n self.username = username\n self.language_code = language_code\n\n self.bot = bot\n\n self._id_attrs = (self.id,)\n\n @property\n def name(self):\n \"\"\"\n :obj:`str`: Convenience property. If available, returns the user's :attr:`username`\n prefixed with \"@\". If :attr:`username` is not available, returns :attr:`full_name`.\n\n \"\"\"\n if self.username:\n return '@{}'.format(self.username)\n return self.full_name\n\n @property\n def full_name(self):\n \"\"\"\n :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)\n :attr:`last_name`.\n\n \"\"\"\n if self.last_name:\n return u'{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n data = super(User, cls).de_json(data, bot)\n\n return cls(bot=bot, **data)\n\n def get_profile_photos(self, *args, **kwargs):\n \"\"\"\n Shortcut for::\n\n bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)\n\n \"\"\"\n\n return self.bot.get_user_profile_photos(self.id, *args, **kwargs)\n\n @classmethod\n def de_list(cls, data, bot):\n if not data:\n return []\n\n users = list()\n for user in data:\n users.append(cls.de_json(user, bot))\n\n return users\n\n def mention_markdown(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): If provided, will overwrite the user's name.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if not name:\n return util_mention_markdown(self.id, self.name)\n else:\n return util_mention_markdown(self.id, name)\n\n def mention_html(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): If provided, will overwrite the user's name.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n \"\"\"\n if not name:\n return util_mention_html(self.id, self.name)\n else:\n return util_mention_html(self.id, name)\n\n def send_message(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_message(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_message(self.id, *args, **kwargs)\n\n def send_photo(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_photo(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_photo(self.id, *args, **kwargs)\n\n def send_audio(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_audio(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_audio(self.id, *args, **kwargs)\n\n def send_document(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_document(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_document(self.id, *args, **kwargs)\n\n def send_sticker(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_sticker(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_sticker(self.id, *args, **kwargs)\n\n def send_video(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video(self.id, *args, **kwargs)\n\n def send_video_note(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video_note(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video_note(self.id, *args, **kwargs)\n\n def send_voice(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_voice(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_voice(self.id, *args, **kwargs)\n", "path": "telegram/user.py"}]}
| 3,240 | 89 |
gh_patches_debug_2252
|
rasdani/github-patches
|
git_diff
|
fonttools__fonttools-337
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I find the font's line height is bigger than original font.
I have tried pyftsubset with command line option --no-recalc-bounds ,
but the generated subfont's line height is still bigger than original font.
I used html font-face render font.
@font-face {
font-family: 'freetype';
src: url('font.ttf') format('truetype');
}
the font file is microsoft chinese liti.ttf.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Lib/fontTools/ttLib/tables/_v_h_e_a.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2 from fontTools.misc.py23 import *
3 from fontTools.misc import sstruct
4 from fontTools.misc.textTools import safeEval
5 from . import DefaultTable
6
7 vheaFormat = """
8 > # big endian
9 tableVersion: 16.16F
10 ascent: h
11 descent: h
12 lineGap: h
13 advanceHeightMax: H
14 minTopSideBearing: h
15 minBottomSideBearing: h
16 yMaxExtent: h
17 caretSlopeRise: h
18 caretSlopeRun: h
19 reserved0: h
20 reserved1: h
21 reserved2: h
22 reserved3: h
23 reserved4: h
24 metricDataFormat: h
25 numberOfVMetrics: H
26 """
27
28 class table__v_h_e_a(DefaultTable.DefaultTable):
29
30 # Note: Keep in sync with table__h_h_e_a
31
32 dependencies = ['vmtx', 'glyf']
33
34 def decompile(self, data, ttFont):
35 sstruct.unpack(vheaFormat, data, self)
36
37 def compile(self, ttFont):
38 self.recalc(ttFont)
39 return sstruct.pack(vheaFormat, self)
40
41 def recalc(self, ttFont):
42 vtmxTable = ttFont['vmtx']
43 if 'glyf' in ttFont:
44 glyfTable = ttFont['glyf']
45 INFINITY = 100000
46 advanceHeightMax = 0
47 minTopSideBearing = +INFINITY # arbitrary big number
48 minBottomSideBearing = +INFINITY # arbitrary big number
49 yMaxExtent = -INFINITY # arbitrary big negative number
50
51 for name in ttFont.getGlyphOrder():
52 height, tsb = vtmxTable[name]
53 advanceHeightMax = max(advanceHeightMax, height)
54 g = glyfTable[name]
55 if g.numberOfContours == 0:
56 continue
57 if g.numberOfContours < 0 and not hasattr(g, "yMax"):
58 # Composite glyph without extents set.
59 # Calculate those.
60 g.recalcBounds(glyfTable)
61 minTopSideBearing = min(minTopSideBearing, tsb)
62 bsb = height - tsb - (g.yMax - g.yMin)
63 minBottomSideBearing = min(minBottomSideBearing, bsb)
64 extent = tsb + (g.yMax - g.yMin)
65 yMaxExtent = max(yMaxExtent, extent)
66
67 if yMaxExtent == -INFINITY:
68 # No glyph has outlines.
69 minTopSideBearing = 0
70 minBottomSideBearing = 0
71 yMaxExtent = 0
72
73 self.advanceHeightMax = advanceHeightMax
74 self.minTopSideBearing = minTopSideBearing
75 self.minBottomSideBearing = minBottomSideBearing
76 self.yMaxExtent = yMaxExtent
77 else:
78 # XXX CFF recalc...
79 pass
80
81 def toXML(self, writer, ttFont):
82 formatstring, names, fixes = sstruct.getformat(vheaFormat)
83 for name in names:
84 value = getattr(self, name)
85 writer.simpletag(name, value=value)
86 writer.newline()
87
88 def fromXML(self, name, attrs, content, ttFont):
89 setattr(self, name, safeEval(attrs["value"]))
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
--- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py
+++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
@@ -35,7 +35,8 @@
sstruct.unpack(vheaFormat, data, self)
def compile(self, ttFont):
- self.recalc(ttFont)
+ if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
+ self.recalc(ttFont)
return sstruct.pack(vheaFormat, self)
def recalc(self, ttFont):
|
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py\n--- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py\n+++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py\n@@ -35,7 +35,8 @@\n \t\tsstruct.unpack(vheaFormat, data, self)\n \n \tdef compile(self, ttFont):\n-\t\tself.recalc(ttFont)\n+\t\tif ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:\n+\t\t\tself.recalc(ttFont)\n \t\treturn sstruct.pack(vheaFormat, self)\n \n \tdef recalc(self, ttFont):\n", "issue": "I find the font's line height is bigger than original font.\n I have tried pyftsubset with command line option --no-recalc-bounds ,\nbut the generated subfont's line height is still bigger than original font.\n\nI used html font-face render font.\n@font-face {\n font-family: 'freetype';\n src: url('font.ttf') format('truetype');\n }\n\nthe font file is microsoft chinese liti.ttf.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\n\nvheaFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion:\t\t16.16F\n\t\tascent:\t\t\th\n\t\tdescent:\t\th\n\t\tlineGap:\t\th\n\t\tadvanceHeightMax:\tH\n\t\tminTopSideBearing:\th\n\t\tminBottomSideBearing:\th\n\t\tyMaxExtent:\t\th\n\t\tcaretSlopeRise:\t\th\n\t\tcaretSlopeRun:\t\th\n\t\treserved0:\t\th\n\t\treserved1:\t\th\n\t\treserved2:\t\th\n\t\treserved3:\t\th\n\t\treserved4:\t\th\n\t\tmetricDataFormat:\th\n\t\tnumberOfVMetrics:\tH\n\"\"\"\n\nclass table__v_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__h_h_e_a\n\n\tdependencies = ['vmtx', 'glyf']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(vheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tself.recalc(ttFont)\n\t\treturn sstruct.pack(vheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tvtmxTable = ttFont['vmtx']\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tINFINITY = 100000\n\t\t\tadvanceHeightMax = 0\n\t\t\tminTopSideBearing = +INFINITY # arbitrary big number\n\t\t\tminBottomSideBearing = +INFINITY # arbitrary big number\n\t\t\tyMaxExtent = -INFINITY # arbitrary big negative number\n\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\theight, tsb = vtmxTable[name]\n\t\t\t\tadvanceHeightMax = max(advanceHeightMax, height)\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"yMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tminTopSideBearing = min(minTopSideBearing, tsb)\n\t\t\t\tbsb = height - tsb - (g.yMax - g.yMin)\n\t\t\t\tminBottomSideBearing = min(minBottomSideBearing, bsb)\n\t\t\t\textent = tsb + (g.yMax - g.yMin)\n\t\t\t\tyMaxExtent = max(yMaxExtent, extent)\n\n\t\t\tif yMaxExtent == -INFINITY:\n\t\t\t\t# No glyph has outlines.\n\t\t\t\tminTopSideBearing = 0\n\t\t\t\tminBottomSideBearing = 0\n\t\t\t\tyMaxExtent = 0\n\n\t\t\tself.advanceHeightMax = advanceHeightMax\n\t\t\tself.minTopSideBearing = minTopSideBearing\n\t\t\tself.minBottomSideBearing = minBottomSideBearing\n\t\t\tself.yMaxExtent = yMaxExtent\n\t\telse:\n\t\t\t# XXX CFF recalc...\n\t\t\tpass\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(vheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_v_h_e_a.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\n\nvheaFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion:\t\t16.16F\n\t\tascent:\t\t\th\n\t\tdescent:\t\th\n\t\tlineGap:\t\th\n\t\tadvanceHeightMax:\tH\n\t\tminTopSideBearing:\th\n\t\tminBottomSideBearing:\th\n\t\tyMaxExtent:\t\th\n\t\tcaretSlopeRise:\t\th\n\t\tcaretSlopeRun:\t\th\n\t\treserved0:\t\th\n\t\treserved1:\t\th\n\t\treserved2:\t\th\n\t\treserved3:\t\th\n\t\treserved4:\t\th\n\t\tmetricDataFormat:\th\n\t\tnumberOfVMetrics:\tH\n\"\"\"\n\nclass table__v_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__h_h_e_a\n\n\tdependencies = ['vmtx', 'glyf']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(vheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tif ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:\n\t\t\tself.recalc(ttFont)\n\t\treturn sstruct.pack(vheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tvtmxTable = ttFont['vmtx']\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tINFINITY = 100000\n\t\t\tadvanceHeightMax = 0\n\t\t\tminTopSideBearing = +INFINITY # arbitrary big number\n\t\t\tminBottomSideBearing = +INFINITY # arbitrary big number\n\t\t\tyMaxExtent = -INFINITY # arbitrary big negative number\n\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\theight, tsb = vtmxTable[name]\n\t\t\t\tadvanceHeightMax = max(advanceHeightMax, height)\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"yMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tminTopSideBearing = min(minTopSideBearing, tsb)\n\t\t\t\tbsb = height - tsb - (g.yMax - g.yMin)\n\t\t\t\tminBottomSideBearing = min(minBottomSideBearing, bsb)\n\t\t\t\textent = tsb + (g.yMax - g.yMin)\n\t\t\t\tyMaxExtent = max(yMaxExtent, extent)\n\n\t\t\tif yMaxExtent == -INFINITY:\n\t\t\t\t# No glyph has outlines.\n\t\t\t\tminTopSideBearing = 0\n\t\t\t\tminBottomSideBearing = 0\n\t\t\t\tyMaxExtent = 0\n\n\t\t\tself.advanceHeightMax = advanceHeightMax\n\t\t\tself.minTopSideBearing = minTopSideBearing\n\t\t\tself.minBottomSideBearing = minBottomSideBearing\n\t\t\tself.yMaxExtent = yMaxExtent\n\t\telse:\n\t\t\t# XXX CFF recalc...\n\t\t\tpass\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(vheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_v_h_e_a.py"}]}
| 1,316 | 161 |
gh_patches_debug_22699
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-3592
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prevent full disk
### Describe the bug
Sometimes the server's storage gets full, because for some reason filepond uploads aren't being deleted. Today this caused the server to crash (because the full server disk broke redis). We should prevent this from happening in multiple ways:
- Make old uploads be deleted. Would be nice to find out why the uploads aren't being deleted already. But we should also (additionally) periodically remove old files from the media volume.
- Maybe limit the volume size such that it getting full does not influence the rest of the server. But docker doesn't really support that nicely. We could make a separate volume for it on the host and bind-mount it I guess.
### How to reproduce
<!-- Steps to reproduce the behaviour -->
1. Upload lots of albums to a docker deployment
2. See the media volume get larger.
### Expected behaviour
Stuff is cleaned up once it's processed and periodically.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/photos/tasks.py`
Content:
```
1 from django.db import transaction
2 from django.dispatch import Signal
3
4 from celery import shared_task
5 from django_drf_filepond.models import TemporaryUpload
6 from django_filepond_widget.fields import FilePondFile
7
8 from photos.models import Album
9
10 from .services import extract_archive
11
12 album_uploaded = Signal()
13
14
15 @shared_task
16 def process_album_upload(archive_upload_id: str, album_id: int):
17 try:
18 album = Album.objects.get(id=album_id)
19 except Album.DoesNotExist:
20 return
21
22 archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file
23 try:
24 with transaction.atomic():
25 # We make the upload atomic separately, so we can keep using the db if it fails.
26 # See https://docs.djangoproject.com/en/4.2/topics/db/transactions/#handling-exceptions-within-postgresql-transactions.
27 extract_archive(album, archive)
28 album.is_processing = False
29 album.save()
30
31 # Send signal to notify that an album has been uploaded. This is used
32 # by facedetection, and possibly in the future to notify the uploader.
33 album_uploaded.send(sender=None, album=album)
34 finally:
35 if isinstance(archive, FilePondFile):
36 archive.remove()
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/photos/tasks.py b/website/photos/tasks.py
--- a/website/photos/tasks.py
+++ b/website/photos/tasks.py
@@ -3,7 +3,6 @@
from celery import shared_task
from django_drf_filepond.models import TemporaryUpload
-from django_filepond_widget.fields import FilePondFile
from photos.models import Album
@@ -19,7 +18,8 @@
except Album.DoesNotExist:
return
- archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file
+ upload = TemporaryUpload.objects.get(upload_id=archive_upload_id)
+ archive = upload.file
try:
with transaction.atomic():
# We make the upload atomic separately, so we can keep using the db if it fails.
@@ -32,5 +32,5 @@
# by facedetection, and possibly in the future to notify the uploader.
album_uploaded.send(sender=None, album=album)
finally:
- if isinstance(archive, FilePondFile):
- archive.remove()
+ archive.delete()
+ upload.delete()
|
{"golden_diff": "diff --git a/website/photos/tasks.py b/website/photos/tasks.py\n--- a/website/photos/tasks.py\n+++ b/website/photos/tasks.py\n@@ -3,7 +3,6 @@\n \n from celery import shared_task\n from django_drf_filepond.models import TemporaryUpload\n-from django_filepond_widget.fields import FilePondFile\n \n from photos.models import Album\n \n@@ -19,7 +18,8 @@\n except Album.DoesNotExist:\n return\n \n- archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file\n+ upload = TemporaryUpload.objects.get(upload_id=archive_upload_id)\n+ archive = upload.file\n try:\n with transaction.atomic():\n # We make the upload atomic separately, so we can keep using the db if it fails.\n@@ -32,5 +32,5 @@\n # by facedetection, and possibly in the future to notify the uploader.\n album_uploaded.send(sender=None, album=album)\n finally:\n- if isinstance(archive, FilePondFile):\n- archive.remove()\n+ archive.delete()\n+ upload.delete()\n", "issue": "Prevent full disk\n### Describe the bug\r\nSometimes the server's storage gets full, because for some reason filepond uploads aren't being deleted. Today this caused the server to crash (because the full server disk broke redis). We should prevent this from happening in multiple ways:\r\n\r\n- Make old uploads be deleted. Would be nice to find out why the uploads aren't being deleted already. But we should also (additionally) periodically remove old files from the media volume.\r\n- Maybe limit the volume size such that it getting full does not influence the rest of the server. But docker doesn't really support that nicely. We could make a separate volume for it on the host and bind-mount it I guess.\r\n\r\n### How to reproduce\r\n<!-- Steps to reproduce the behaviour -->\r\n1. Upload lots of albums to a docker deployment\r\n2. See the media volume get larger.\r\n\r\n### Expected behaviour\r\nStuff is cleaned up once it's processed and periodically.\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db import transaction\nfrom django.dispatch import Signal\n\nfrom celery import shared_task\nfrom django_drf_filepond.models import TemporaryUpload\nfrom django_filepond_widget.fields import FilePondFile\n\nfrom photos.models import Album\n\nfrom .services import extract_archive\n\nalbum_uploaded = Signal()\n\n\n@shared_task\ndef process_album_upload(archive_upload_id: str, album_id: int):\n try:\n album = Album.objects.get(id=album_id)\n except Album.DoesNotExist:\n return\n\n archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file\n try:\n with transaction.atomic():\n # We make the upload atomic separately, so we can keep using the db if it fails.\n # See https://docs.djangoproject.com/en/4.2/topics/db/transactions/#handling-exceptions-within-postgresql-transactions.\n extract_archive(album, archive)\n album.is_processing = False\n album.save()\n\n # Send signal to notify that an album has been uploaded. This is used\n # by facedetection, and possibly in the future to notify the uploader.\n album_uploaded.send(sender=None, album=album)\n finally:\n if isinstance(archive, FilePondFile):\n archive.remove()\n", "path": "website/photos/tasks.py"}], "after_files": [{"content": "from django.db import transaction\nfrom django.dispatch import Signal\n\nfrom celery import shared_task\nfrom django_drf_filepond.models import TemporaryUpload\n\nfrom photos.models import Album\n\nfrom .services import extract_archive\n\nalbum_uploaded = Signal()\n\n\n@shared_task\ndef process_album_upload(archive_upload_id: str, album_id: int):\n try:\n album = Album.objects.get(id=album_id)\n except Album.DoesNotExist:\n return\n\n upload = TemporaryUpload.objects.get(upload_id=archive_upload_id)\n archive = upload.file\n try:\n with transaction.atomic():\n # We make the upload atomic separately, so we can keep using the db if it fails.\n # See https://docs.djangoproject.com/en/4.2/topics/db/transactions/#handling-exceptions-within-postgresql-transactions.\n extract_archive(album, archive)\n album.is_processing = False\n album.save()\n\n # Send signal to notify that an album has been uploaded. This is used\n # by facedetection, and possibly in the future to notify the uploader.\n album_uploaded.send(sender=None, album=album)\n finally:\n archive.delete()\n upload.delete()\n", "path": "website/photos/tasks.py"}]}
| 775 | 234 |
gh_patches_debug_16164
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-1631
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make spawn_pipeline not depend on the order of tasks in the yaml file
Currently, if a task is defined in the yaml file before its dependencies, the spawn_pipeline script fails with:
```
Traceback (most recent call last):
File "/code/spawn_pipeline.py", line 132, in <module>
main()
File "/code/spawn_pipeline.py", line 110, in main
new_dependencies.append(id_mapping[dependency])
KeyError: 'regressor-finder'
```
So things like https://github.com/mozilla/bugbug/commit/aaa67b3b0a1db7530cbf88df644aff076fcd2e4e are needed.
We should make the spawn_pipeline script not depend on the order of definition of tasks in the yaml file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `infra/spawn_pipeline.py`
Content:
```
1 #!/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2019 Mozilla
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """
19 This script triggers the data pipeline for the bugbug project
20 """
21
22 import argparse
23 import os
24 import sys
25
26 import jsone
27 import requests.packages.urllib3
28 import taskcluster
29 import yaml
30
31 requests.packages.urllib3.disable_warnings()
32
33 TASKCLUSTER_DEFAULT_URL = "https://community-tc.services.mozilla.com"
34
35
36 def get_taskcluster_options():
37 """
38 Helper to get the Taskcluster setup options
39 according to current environment (local or Taskcluster)
40 """
41 options = taskcluster.optionsFromEnvironment()
42 proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL")
43
44 if proxy_url is not None:
45 # Always use proxy url when available
46 options["rootUrl"] = proxy_url
47
48 if "rootUrl" not in options:
49 # Always have a value in root url
50 options["rootUrl"] = TASKCLUSTER_DEFAULT_URL
51
52 return options
53
54
55 def main():
56 parser = argparse.ArgumentParser(description="Spawn tasks for bugbug data pipeline")
57 parser.add_argument("data_pipeline_json")
58
59 args = parser.parse_args()
60 decision_task_id = os.environ.get("TASK_ID")
61 options = get_taskcluster_options()
62 add_self = False
63 if decision_task_id:
64 add_self = True
65 task_group_id = decision_task_id
66 else:
67 task_group_id = taskcluster.utils.slugId()
68 keys = {"taskGroupId": task_group_id}
69
70 id_mapping = {}
71
72 # First pass, do the template rendering and dependencies resolution
73 tasks = []
74
75 with open(args.data_pipeline_json) as pipeline_file:
76 raw_tasks = yaml.safe_load(pipeline_file.read())
77
78 version = os.getenv("TAG", "latest")
79 context = {"version": version}
80 rendered = jsone.render(raw_tasks, context)
81
82 for task in rendered["tasks"]:
83 # We need to generate new unique task ids for taskcluster to be happy
84 # but need to identify dependencies across tasks. So we create a
85 # mapping between an internal ID and the generate ID
86
87 task_id = taskcluster.utils.slugId()
88 task_internal_id = task.pop("ID")
89
90 if task_internal_id in id_mapping:
91 raise ValueError(f"Conflicting IDs {task_internal_id}")
92
93 id_mapping[task_internal_id] = task_id
94
95 for key, value in keys.items():
96 task[key] = value
97
98 task_payload = task["payload"]
99
100 if "env" in task_payload and task_payload["env"]:
101 task_payload["env"]["TAG"] = version
102 else:
103 task_payload["env"] = {
104 "TAG": version,
105 }
106
107 # Process the dependencies
108 new_dependencies = []
109 for dependency in task.get("dependencies", []):
110 new_dependencies.append(id_mapping[dependency])
111
112 if add_self:
113 new_dependencies.append(decision_task_id)
114
115 task["dependencies"] = new_dependencies
116
117 tasks.append((task_id, task))
118
119 # Now sends them
120 queue = taskcluster.Queue(options)
121 try:
122 for task_id, task_payload in tasks:
123 queue.createTask(task_id, task_payload)
124
125 print(f"https://community-tc.services.mozilla.com/tasks/groups/{task_group_id}")
126 except taskcluster.exceptions.TaskclusterAuthFailure as e:
127 print(f"TaskclusterAuthFailure: {e.body}", file=sys.stderr)
128 raise
129
130
131 if __name__ == "__main__":
132 main()
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/infra/spawn_pipeline.py b/infra/spawn_pipeline.py
--- a/infra/spawn_pipeline.py
+++ b/infra/spawn_pipeline.py
@@ -85,13 +85,19 @@
# mapping between an internal ID and the generate ID
task_id = taskcluster.utils.slugId()
- task_internal_id = task.pop("ID")
+ task_internal_id = task["ID"]
if task_internal_id in id_mapping:
raise ValueError(f"Conflicting IDs {task_internal_id}")
+ # Store each task ID in the id_mapping dictionary before processing dependencies.
+ # This way, tasks can be defined in any order.
id_mapping[task_internal_id] = task_id
+ for task in rendered["tasks"]:
+ task_internal_id = task.pop("ID")
+ task_id = id_mapping[task_internal_id]
+
for key, value in keys.items():
task[key] = value
|
{"golden_diff": "diff --git a/infra/spawn_pipeline.py b/infra/spawn_pipeline.py\n--- a/infra/spawn_pipeline.py\n+++ b/infra/spawn_pipeline.py\n@@ -85,13 +85,19 @@\n # mapping between an internal ID and the generate ID\n \n task_id = taskcluster.utils.slugId()\n- task_internal_id = task.pop(\"ID\")\n+ task_internal_id = task[\"ID\"]\n \n if task_internal_id in id_mapping:\n raise ValueError(f\"Conflicting IDs {task_internal_id}\")\n \n+ # Store each task ID in the id_mapping dictionary before processing dependencies.\n+ # This way, tasks can be defined in any order.\n id_mapping[task_internal_id] = task_id\n \n+ for task in rendered[\"tasks\"]:\n+ task_internal_id = task.pop(\"ID\")\n+ task_id = id_mapping[task_internal_id]\n+\n for key, value in keys.items():\n task[key] = value\n", "issue": "Make spawn_pipeline not depend on the order of tasks in the yaml file\nCurrently, if a task is defined in the yaml file before its dependencies, the spawn_pipeline script fails with:\r\n```\r\nTraceback (most recent call last):\r\n File \"/code/spawn_pipeline.py\", line 132, in <module>\r\n main()\r\n File \"/code/spawn_pipeline.py\", line 110, in main\r\n new_dependencies.append(id_mapping[dependency])\r\nKeyError: 'regressor-finder'\r\n```\r\n\r\nSo things like https://github.com/mozilla/bugbug/commit/aaa67b3b0a1db7530cbf88df644aff076fcd2e4e are needed.\r\n\r\nWe should make the spawn_pipeline script not depend on the order of definition of tasks in the yaml file.\n", "before_files": [{"content": "#!/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Mozilla\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis script triggers the data pipeline for the bugbug project\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport jsone\nimport requests.packages.urllib3\nimport taskcluster\nimport yaml\n\nrequests.packages.urllib3.disable_warnings()\n\nTASKCLUSTER_DEFAULT_URL = \"https://community-tc.services.mozilla.com\"\n\n\ndef get_taskcluster_options():\n \"\"\"\n Helper to get the Taskcluster setup options\n according to current environment (local or Taskcluster)\n \"\"\"\n options = taskcluster.optionsFromEnvironment()\n proxy_url = os.environ.get(\"TASKCLUSTER_PROXY_URL\")\n\n if proxy_url is not None:\n # Always use proxy url when available\n options[\"rootUrl\"] = proxy_url\n\n if \"rootUrl\" not in options:\n # Always have a value in root url\n options[\"rootUrl\"] = TASKCLUSTER_DEFAULT_URL\n\n return options\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Spawn tasks for bugbug data pipeline\")\n parser.add_argument(\"data_pipeline_json\")\n\n args = parser.parse_args()\n decision_task_id = os.environ.get(\"TASK_ID\")\n options = get_taskcluster_options()\n add_self = False\n if decision_task_id:\n add_self = True\n task_group_id = decision_task_id\n else:\n task_group_id = taskcluster.utils.slugId()\n keys = {\"taskGroupId\": task_group_id}\n\n id_mapping = {}\n\n # First pass, do the template rendering and dependencies resolution\n tasks = []\n\n with open(args.data_pipeline_json) as pipeline_file:\n raw_tasks = yaml.safe_load(pipeline_file.read())\n\n version = os.getenv(\"TAG\", \"latest\")\n context = {\"version\": version}\n rendered = jsone.render(raw_tasks, context)\n\n for task in rendered[\"tasks\"]:\n # We need to generate new unique task ids for taskcluster to be happy\n # but need to identify dependencies across tasks. So we create a\n # mapping between an internal ID and the generate ID\n\n task_id = taskcluster.utils.slugId()\n task_internal_id = task.pop(\"ID\")\n\n if task_internal_id in id_mapping:\n raise ValueError(f\"Conflicting IDs {task_internal_id}\")\n\n id_mapping[task_internal_id] = task_id\n\n for key, value in keys.items():\n task[key] = value\n\n task_payload = task[\"payload\"]\n\n if \"env\" in task_payload and task_payload[\"env\"]:\n task_payload[\"env\"][\"TAG\"] = version\n else:\n task_payload[\"env\"] = {\n \"TAG\": version,\n }\n\n # Process the dependencies\n new_dependencies = []\n for dependency in task.get(\"dependencies\", []):\n new_dependencies.append(id_mapping[dependency])\n\n if add_self:\n new_dependencies.append(decision_task_id)\n\n task[\"dependencies\"] = new_dependencies\n\n tasks.append((task_id, task))\n\n # Now sends them\n queue = taskcluster.Queue(options)\n try:\n for task_id, task_payload in tasks:\n queue.createTask(task_id, task_payload)\n\n print(f\"https://community-tc.services.mozilla.com/tasks/groups/{task_group_id}\")\n except taskcluster.exceptions.TaskclusterAuthFailure as e:\n print(f\"TaskclusterAuthFailure: {e.body}\", file=sys.stderr)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "infra/spawn_pipeline.py"}], "after_files": [{"content": "#!/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Mozilla\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis script triggers the data pipeline for the bugbug project\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport jsone\nimport requests.packages.urllib3\nimport taskcluster\nimport yaml\n\nrequests.packages.urllib3.disable_warnings()\n\nTASKCLUSTER_DEFAULT_URL = \"https://community-tc.services.mozilla.com\"\n\n\ndef get_taskcluster_options():\n \"\"\"\n Helper to get the Taskcluster setup options\n according to current environment (local or Taskcluster)\n \"\"\"\n options = taskcluster.optionsFromEnvironment()\n proxy_url = os.environ.get(\"TASKCLUSTER_PROXY_URL\")\n\n if proxy_url is not None:\n # Always use proxy url when available\n options[\"rootUrl\"] = proxy_url\n\n if \"rootUrl\" not in options:\n # Always have a value in root url\n options[\"rootUrl\"] = TASKCLUSTER_DEFAULT_URL\n\n return options\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Spawn tasks for bugbug data pipeline\")\n parser.add_argument(\"data_pipeline_json\")\n\n args = parser.parse_args()\n decision_task_id = os.environ.get(\"TASK_ID\")\n options = get_taskcluster_options()\n add_self = False\n if decision_task_id:\n add_self = True\n task_group_id = decision_task_id\n else:\n task_group_id = taskcluster.utils.slugId()\n keys = {\"taskGroupId\": task_group_id}\n\n id_mapping = {}\n\n # First pass, do the template rendering and dependencies resolution\n tasks = []\n\n with open(args.data_pipeline_json) as pipeline_file:\n raw_tasks = yaml.safe_load(pipeline_file.read())\n\n version = os.getenv(\"TAG\", \"latest\")\n context = {\"version\": version}\n rendered = jsone.render(raw_tasks, context)\n\n for task in rendered[\"tasks\"]:\n # We need to generate new unique task ids for taskcluster to be happy\n # but need to identify dependencies across tasks. So we create a\n # mapping between an internal ID and the generate ID\n\n task_id = taskcluster.utils.slugId()\n task_internal_id = task[\"ID\"]\n\n if task_internal_id in id_mapping:\n raise ValueError(f\"Conflicting IDs {task_internal_id}\")\n\n # Store each task ID in the id_mapping dictionary before processing dependencies.\n # This way, tasks can be defined in any order.\n id_mapping[task_internal_id] = task_id\n\n for task in rendered[\"tasks\"]:\n task_internal_id = task.pop(\"ID\")\n task_id = id_mapping[task_internal_id]\n\n for key, value in keys.items():\n task[key] = value\n\n task_payload = task[\"payload\"]\n\n if \"env\" in task_payload and task_payload[\"env\"]:\n task_payload[\"env\"][\"TAG\"] = version\n else:\n task_payload[\"env\"] = {\n \"TAG\": version,\n }\n\n # Process the dependencies\n new_dependencies = []\n for dependency in task.get(\"dependencies\", []):\n new_dependencies.append(id_mapping[dependency])\n\n if add_self:\n new_dependencies.append(decision_task_id)\n\n task[\"dependencies\"] = new_dependencies\n\n tasks.append((task_id, task))\n\n # Now sends them\n queue = taskcluster.Queue(options)\n try:\n for task_id, task_payload in tasks:\n queue.createTask(task_id, task_payload)\n\n print(f\"https://community-tc.services.mozilla.com/tasks/groups/{task_group_id}\")\n except taskcluster.exceptions.TaskclusterAuthFailure as e:\n print(f\"TaskclusterAuthFailure: {e.body}\", file=sys.stderr)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "infra/spawn_pipeline.py"}]}
| 1,607 | 208 |
gh_patches_debug_11927
|
rasdani/github-patches
|
git_diff
|
pytorch__text-280
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError in Python 2.7
https://github.com/pytorch/text/blob/a2795e5731d1b7c0298a1b5087bb8142e1c39d0b/torchtext/datasets/imdb.py#L32
In python 2.7, it will report that `TypeError: 'encoding' is an invalid keyword argument for this function`.
I replace `open` with `io.open` to fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/datasets/imdb.py`
Content:
```
1 import os
2 import glob
3
4 from .. import data
5
6
7 class IMDB(data.Dataset):
8
9 urls = ['http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz']
10 name = 'imdb'
11 dirname = 'aclImdb'
12
13 @staticmethod
14 def sort_key(ex):
15 return len(ex.text)
16
17 def __init__(self, path, text_field, label_field, **kwargs):
18 """Create an IMDB dataset instance given a path and fields.
19
20 Arguments:
21 path: Path to the dataset's highest level directory
22 text_field: The field that will be used for text data.
23 label_field: The field that will be used for label data.
24 Remaining keyword arguments: Passed to the constructor of
25 data.Dataset.
26 """
27 fields = [('text', text_field), ('label', label_field)]
28 examples = []
29
30 for label in ['pos', 'neg']:
31 for fname in glob.iglob(os.path.join(path, label, '*.txt')):
32 with open(fname, 'r', encoding="utf-8") as f:
33 text = f.readline()
34 examples.append(data.Example.fromlist([text, label], fields))
35
36 super(IMDB, self).__init__(examples, fields, **kwargs)
37
38 @classmethod
39 def splits(cls, text_field, label_field, root='.data',
40 train='train', test='test', **kwargs):
41 """Create dataset objects for splits of the IMDB dataset.
42
43 Arguments:
44 text_field: The field that will be used for the sentence.
45 label_field: The field that will be used for label data.
46 root: Root dataset storage directory. Default is '.data'.
47 train: The directory that contains the training examples
48 test: The directory that contains the test examples
49 Remaining keyword arguments: Passed to the splits method of
50 Dataset.
51 """
52 return super(IMDB, cls).splits(
53 root=root, text_field=text_field, label_field=label_field,
54 train=train, validation=None, test=test, **kwargs)
55
56 @classmethod
57 def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs):
58 """Creater iterator objects for splits of the IMDB dataset.
59
60 Arguments:
61 batch_size: Batch_size
62 device: Device to create batches on. Use - 1 for CPU and None for
63 the currently active GPU device.
64 root: The root directory that contains the imdb dataset subdirectory
65 vectors: one of the available pretrained vectors or a list with each
66 element one of the available pretrained vectors (see Vocab.load_vectors)
67
68 Remaining keyword arguments: Passed to the splits method.
69 """
70 TEXT = data.Field()
71 LABEL = data.Field(sequential=False)
72
73 train, test = cls.splits(TEXT, LABEL, root=root, **kwargs)
74
75 TEXT.build_vocab(train, vectors=vectors)
76 LABEL.build_vocab(train)
77
78 return data.BucketIterator.splits(
79 (train, test), batch_size=batch_size, device=device)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchtext/datasets/imdb.py b/torchtext/datasets/imdb.py
--- a/torchtext/datasets/imdb.py
+++ b/torchtext/datasets/imdb.py
@@ -1,5 +1,6 @@
import os
import glob
+import io
from .. import data
@@ -29,7 +30,7 @@
for label in ['pos', 'neg']:
for fname in glob.iglob(os.path.join(path, label, '*.txt')):
- with open(fname, 'r', encoding="utf-8") as f:
+ with io.open(fname, 'r', encoding="utf-8") as f:
text = f.readline()
examples.append(data.Example.fromlist([text, label], fields))
|
{"golden_diff": "diff --git a/torchtext/datasets/imdb.py b/torchtext/datasets/imdb.py\n--- a/torchtext/datasets/imdb.py\n+++ b/torchtext/datasets/imdb.py\n@@ -1,5 +1,6 @@\n import os\n import glob\n+import io\n \n from .. import data\n \n@@ -29,7 +30,7 @@\n \n for label in ['pos', 'neg']:\n for fname in glob.iglob(os.path.join(path, label, '*.txt')):\n- with open(fname, 'r', encoding=\"utf-8\") as f:\n+ with io.open(fname, 'r', encoding=\"utf-8\") as f:\n text = f.readline()\n examples.append(data.Example.fromlist([text, label], fields))\n", "issue": "TypeError in Python 2.7\nhttps://github.com/pytorch/text/blob/a2795e5731d1b7c0298a1b5087bb8142e1c39d0b/torchtext/datasets/imdb.py#L32\r\n\r\nIn python 2.7, it will report that `TypeError: 'encoding' is an invalid keyword argument for this function`.\r\n\r\nI replace `open` with `io.open` to fix it.\n", "before_files": [{"content": "import os\nimport glob\n\nfrom .. import data\n\n\nclass IMDB(data.Dataset):\n\n urls = ['http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz']\n name = 'imdb'\n dirname = 'aclImdb'\n\n @staticmethod\n def sort_key(ex):\n return len(ex.text)\n\n def __init__(self, path, text_field, label_field, **kwargs):\n \"\"\"Create an IMDB dataset instance given a path and fields.\n\n Arguments:\n path: Path to the dataset's highest level directory\n text_field: The field that will be used for text data.\n label_field: The field that will be used for label data.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n fields = [('text', text_field), ('label', label_field)]\n examples = []\n\n for label in ['pos', 'neg']:\n for fname in glob.iglob(os.path.join(path, label, '*.txt')):\n with open(fname, 'r', encoding=\"utf-8\") as f:\n text = f.readline()\n examples.append(data.Example.fromlist([text, label], fields))\n\n super(IMDB, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, text_field, label_field, root='.data',\n train='train', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of the IMDB dataset.\n\n Arguments:\n text_field: The field that will be used for the sentence.\n label_field: The field that will be used for label data.\n root: Root dataset storage directory. Default is '.data'.\n train: The directory that contains the training examples\n test: The directory that contains the test examples\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(IMDB, cls).splits(\n root=root, text_field=text_field, label_field=label_field,\n train=train, validation=None, test=test, **kwargs)\n\n @classmethod\n def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs):\n \"\"\"Creater iterator objects for splits of the IMDB dataset.\n\n Arguments:\n batch_size: Batch_size\n device: Device to create batches on. Use - 1 for CPU and None for\n the currently active GPU device.\n root: The root directory that contains the imdb dataset subdirectory\n vectors: one of the available pretrained vectors or a list with each\n element one of the available pretrained vectors (see Vocab.load_vectors)\n\n Remaining keyword arguments: Passed to the splits method.\n \"\"\"\n TEXT = data.Field()\n LABEL = data.Field(sequential=False)\n\n train, test = cls.splits(TEXT, LABEL, root=root, **kwargs)\n\n TEXT.build_vocab(train, vectors=vectors)\n LABEL.build_vocab(train)\n\n return data.BucketIterator.splits(\n (train, test), batch_size=batch_size, device=device)\n", "path": "torchtext/datasets/imdb.py"}], "after_files": [{"content": "import os\nimport glob\nimport io\n\nfrom .. import data\n\n\nclass IMDB(data.Dataset):\n\n urls = ['http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz']\n name = 'imdb'\n dirname = 'aclImdb'\n\n @staticmethod\n def sort_key(ex):\n return len(ex.text)\n\n def __init__(self, path, text_field, label_field, **kwargs):\n \"\"\"Create an IMDB dataset instance given a path and fields.\n\n Arguments:\n path: Path to the dataset's highest level directory\n text_field: The field that will be used for text data.\n label_field: The field that will be used for label data.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n fields = [('text', text_field), ('label', label_field)]\n examples = []\n\n for label in ['pos', 'neg']:\n for fname in glob.iglob(os.path.join(path, label, '*.txt')):\n with io.open(fname, 'r', encoding=\"utf-8\") as f:\n text = f.readline()\n examples.append(data.Example.fromlist([text, label], fields))\n\n super(IMDB, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, text_field, label_field, root='.data',\n train='train', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of the IMDB dataset.\n\n Arguments:\n text_field: The field that will be used for the sentence.\n label_field: The field that will be used for label data.\n root: Root dataset storage directory. Default is '.data'.\n train: The directory that contains the training examples\n test: The directory that contains the test examples\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(IMDB, cls).splits(\n root=root, text_field=text_field, label_field=label_field,\n train=train, validation=None, test=test, **kwargs)\n\n @classmethod\n def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs):\n \"\"\"Creater iterator objects for splits of the IMDB dataset.\n\n Arguments:\n batch_size: Batch_size\n device: Device to create batches on. Use - 1 for CPU and None for\n the currently active GPU device.\n root: The root directory that contains the imdb dataset subdirectory\n vectors: one of the available pretrained vectors or a list with each\n element one of the available pretrained vectors (see Vocab.load_vectors)\n\n Remaining keyword arguments: Passed to the splits method.\n \"\"\"\n TEXT = data.Field()\n LABEL = data.Field(sequential=False)\n\n train, test = cls.splits(TEXT, LABEL, root=root, **kwargs)\n\n TEXT.build_vocab(train, vectors=vectors)\n LABEL.build_vocab(train)\n\n return data.BucketIterator.splits(\n (train, test), batch_size=batch_size, device=device)\n", "path": "torchtext/datasets/imdb.py"}]}
| 1,179 | 168 |
gh_patches_debug_12119
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-647
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
what have done to static.py?
On last Friday,everything is ok,my static file test works fine.
Today,when I pip install sanic==0.5.1
It raise 404 error.
when I pip install sanic==0.5.0
everything is ok again.
seems like the code blow has some problem?
if not file_path.startswith(root_path):
raise FileNotFound('File not found',
path=file_or_directory,
relative_url=file_uri)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/static.py`
Content:
```
1 from mimetypes import guess_type
2 from os import path
3 from re import sub
4 from time import strftime, gmtime
5 from urllib.parse import unquote
6
7 from aiofiles.os import stat
8
9 from sanic.exceptions import (
10 ContentRangeError,
11 FileNotFound,
12 HeaderNotFound,
13 InvalidUsage,
14 )
15 from sanic.handlers import ContentRangeHandler
16 from sanic.response import file, HTTPResponse
17
18
19 def register(app, uri, file_or_directory, pattern,
20 use_modified_since, use_content_range):
21 # TODO: Though sanic is not a file server, I feel like we should at least
22 # make a good effort here. Modified-since is nice, but we could
23 # also look into etags, expires, and caching
24 """
25 Register a static directory handler with Sanic by adding a route to the
26 router and registering a handler.
27
28 :param app: Sanic
29 :param file_or_directory: File or directory path to serve from
30 :param uri: URL to serve from
31 :param pattern: regular expression used to match files in the URL
32 :param use_modified_since: If true, send file modified time, and return
33 not modified if the browser's matches the
34 server's
35 :param use_content_range: If true, process header for range requests
36 and sends the file part that is requested
37 """
38 # If we're not trying to match a file directly,
39 # serve from the folder
40 if not path.isfile(file_or_directory):
41 uri += '<file_uri:' + pattern + '>'
42
43 async def _handler(request, file_uri=None):
44 # Using this to determine if the URL is trying to break out of the path
45 # served. os.path.realpath seems to be very slow
46 if file_uri and '../' in file_uri:
47 raise InvalidUsage("Invalid URL")
48 # Merge served directory and requested file if provided
49 # Strip all / that in the beginning of the URL to help prevent python
50 # from herping a derp and treating the uri as an absolute path
51 root_path = file_path = file_or_directory
52 if file_uri:
53 file_path = path.join(
54 file_or_directory, sub('^[/]*', '', file_uri))
55
56 # URL decode the path sent by the browser otherwise we won't be able to
57 # match filenames which got encoded (filenames with spaces etc)
58 file_path = path.abspath(unquote(file_path))
59 if not file_path.startswith(root_path):
60 raise FileNotFound('File not found',
61 path=file_or_directory,
62 relative_url=file_uri)
63 try:
64 headers = {}
65 # Check if the client has been sent this file before
66 # and it has not been modified since
67 stats = None
68 if use_modified_since:
69 stats = await stat(file_path)
70 modified_since = strftime(
71 '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime))
72 if request.headers.get('If-Modified-Since') == modified_since:
73 return HTTPResponse(status=304)
74 headers['Last-Modified'] = modified_since
75 _range = None
76 if use_content_range:
77 _range = None
78 if not stats:
79 stats = await stat(file_path)
80 headers['Accept-Ranges'] = 'bytes'
81 headers['Content-Length'] = str(stats.st_size)
82 if request.method != 'HEAD':
83 try:
84 _range = ContentRangeHandler(request, stats)
85 except HeaderNotFound:
86 pass
87 else:
88 del headers['Content-Length']
89 for key, value in _range.headers.items():
90 headers[key] = value
91 if request.method == 'HEAD':
92 return HTTPResponse(
93 headers=headers,
94 content_type=guess_type(file_path)[0] or 'text/plain')
95 else:
96 return await file(file_path, headers=headers, _range=_range)
97 except ContentRangeError:
98 raise
99 except Exception:
100 raise FileNotFound('File not found',
101 path=file_or_directory,
102 relative_url=file_uri)
103
104 app.route(uri, methods=['GET', 'HEAD'])(_handler)
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sanic/static.py b/sanic/static.py
--- a/sanic/static.py
+++ b/sanic/static.py
@@ -56,7 +56,7 @@
# URL decode the path sent by the browser otherwise we won't be able to
# match filenames which got encoded (filenames with spaces etc)
file_path = path.abspath(unquote(file_path))
- if not file_path.startswith(root_path):
+ if not file_path.startswith(path.abspath(unquote(root_path))):
raise FileNotFound('File not found',
path=file_or_directory,
relative_url=file_uri)
|
{"golden_diff": "diff --git a/sanic/static.py b/sanic/static.py\n--- a/sanic/static.py\n+++ b/sanic/static.py\n@@ -56,7 +56,7 @@\n # URL decode the path sent by the browser otherwise we won't be able to\n # match filenames which got encoded (filenames with spaces etc)\n file_path = path.abspath(unquote(file_path))\n- if not file_path.startswith(root_path):\n+ if not file_path.startswith(path.abspath(unquote(root_path))):\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n", "issue": "what have done to static.py?\nOn last Friday,everything is ok,my static file test works fine.\r\n\r\nToday,when I pip install sanic==0.5.1\r\nIt raise 404 error.\r\n\r\nwhen I pip install sanic==0.5.0\r\neverything is ok again.\r\n\r\nseems like the code blow has some problem?\r\nif not file_path.startswith(root_path):\r\n raise FileNotFound('File not found',\r\n path=file_or_directory,\r\n relative_url=file_uri)\n", "before_files": [{"content": "from mimetypes import guess_type\nfrom os import path\nfrom re import sub\nfrom time import strftime, gmtime\nfrom urllib.parse import unquote\n\nfrom aiofiles.os import stat\n\nfrom sanic.exceptions import (\n ContentRangeError,\n FileNotFound,\n HeaderNotFound,\n InvalidUsage,\n)\nfrom sanic.handlers import ContentRangeHandler\nfrom sanic.response import file, HTTPResponse\n\n\ndef register(app, uri, file_or_directory, pattern,\n use_modified_since, use_content_range):\n # TODO: Though sanic is not a file server, I feel like we should at least\n # make a good effort here. Modified-since is nice, but we could\n # also look into etags, expires, and caching\n \"\"\"\n Register a static directory handler with Sanic by adding a route to the\n router and registering a handler.\n\n :param app: Sanic\n :param file_or_directory: File or directory path to serve from\n :param uri: URL to serve from\n :param pattern: regular expression used to match files in the URL\n :param use_modified_since: If true, send file modified time, and return\n not modified if the browser's matches the\n server's\n :param use_content_range: If true, process header for range requests\n and sends the file part that is requested\n \"\"\"\n # If we're not trying to match a file directly,\n # serve from the folder\n if not path.isfile(file_or_directory):\n uri += '<file_uri:' + pattern + '>'\n\n async def _handler(request, file_uri=None):\n # Using this to determine if the URL is trying to break out of the path\n # served. os.path.realpath seems to be very slow\n if file_uri and '../' in file_uri:\n raise InvalidUsage(\"Invalid URL\")\n # Merge served directory and requested file if provided\n # Strip all / that in the beginning of the URL to help prevent python\n # from herping a derp and treating the uri as an absolute path\n root_path = file_path = file_or_directory\n if file_uri:\n file_path = path.join(\n file_or_directory, sub('^[/]*', '', file_uri))\n\n # URL decode the path sent by the browser otherwise we won't be able to\n # match filenames which got encoded (filenames with spaces etc)\n file_path = path.abspath(unquote(file_path))\n if not file_path.startswith(root_path):\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n try:\n headers = {}\n # Check if the client has been sent this file before\n # and it has not been modified since\n stats = None\n if use_modified_since:\n stats = await stat(file_path)\n modified_since = strftime(\n '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime))\n if request.headers.get('If-Modified-Since') == modified_since:\n return HTTPResponse(status=304)\n headers['Last-Modified'] = modified_since\n _range = None\n if use_content_range:\n _range = None\n if not stats:\n stats = await stat(file_path)\n headers['Accept-Ranges'] = 'bytes'\n headers['Content-Length'] = str(stats.st_size)\n if request.method != 'HEAD':\n try:\n _range = ContentRangeHandler(request, stats)\n except HeaderNotFound:\n pass\n else:\n del headers['Content-Length']\n for key, value in _range.headers.items():\n headers[key] = value\n if request.method == 'HEAD':\n return HTTPResponse(\n headers=headers,\n content_type=guess_type(file_path)[0] or 'text/plain')\n else:\n return await file(file_path, headers=headers, _range=_range)\n except ContentRangeError:\n raise\n except Exception:\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n\n app.route(uri, methods=['GET', 'HEAD'])(_handler)\n", "path": "sanic/static.py"}], "after_files": [{"content": "from mimetypes import guess_type\nfrom os import path\nfrom re import sub\nfrom time import strftime, gmtime\nfrom urllib.parse import unquote\n\nfrom aiofiles.os import stat\n\nfrom sanic.exceptions import (\n ContentRangeError,\n FileNotFound,\n HeaderNotFound,\n InvalidUsage,\n)\nfrom sanic.handlers import ContentRangeHandler\nfrom sanic.response import file, HTTPResponse\n\n\ndef register(app, uri, file_or_directory, pattern,\n use_modified_since, use_content_range):\n # TODO: Though sanic is not a file server, I feel like we should at least\n # make a good effort here. Modified-since is nice, but we could\n # also look into etags, expires, and caching\n \"\"\"\n Register a static directory handler with Sanic by adding a route to the\n router and registering a handler.\n\n :param app: Sanic\n :param file_or_directory: File or directory path to serve from\n :param uri: URL to serve from\n :param pattern: regular expression used to match files in the URL\n :param use_modified_since: If true, send file modified time, and return\n not modified if the browser's matches the\n server's\n :param use_content_range: If true, process header for range requests\n and sends the file part that is requested\n \"\"\"\n # If we're not trying to match a file directly,\n # serve from the folder\n if not path.isfile(file_or_directory):\n uri += '<file_uri:' + pattern + '>'\n\n async def _handler(request, file_uri=None):\n # Using this to determine if the URL is trying to break out of the path\n # served. os.path.realpath seems to be very slow\n if file_uri and '../' in file_uri:\n raise InvalidUsage(\"Invalid URL\")\n # Merge served directory and requested file if provided\n # Strip all / that in the beginning of the URL to help prevent python\n # from herping a derp and treating the uri as an absolute path\n root_path = file_path = file_or_directory\n if file_uri:\n file_path = path.join(\n file_or_directory, sub('^[/]*', '', file_uri))\n\n # URL decode the path sent by the browser otherwise we won't be able to\n # match filenames which got encoded (filenames with spaces etc)\n file_path = path.abspath(unquote(file_path))\n if not file_path.startswith(path.abspath(unquote(root_path))):\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n try:\n headers = {}\n # Check if the client has been sent this file before\n # and it has not been modified since\n stats = None\n if use_modified_since:\n stats = await stat(file_path)\n modified_since = strftime(\n '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime))\n if request.headers.get('If-Modified-Since') == modified_since:\n return HTTPResponse(status=304)\n headers['Last-Modified'] = modified_since\n _range = None\n if use_content_range:\n _range = None\n if not stats:\n stats = await stat(file_path)\n headers['Accept-Ranges'] = 'bytes'\n headers['Content-Length'] = str(stats.st_size)\n if request.method != 'HEAD':\n try:\n _range = ContentRangeHandler(request, stats)\n except HeaderNotFound:\n pass\n else:\n del headers['Content-Length']\n for key, value in _range.headers.items():\n headers[key] = value\n if request.method == 'HEAD':\n return HTTPResponse(\n headers=headers,\n content_type=guess_type(file_path)[0] or 'text/plain')\n else:\n return await file(file_path, headers=headers, _range=_range)\n except ContentRangeError:\n raise\n except Exception:\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n\n app.route(uri, methods=['GET', 'HEAD'])(_handler)\n", "path": "sanic/static.py"}]}
| 1,452 | 128 |
gh_patches_debug_10725
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-5811
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[mypy] Need help to fix all `mypy` errors in the codebase
# Just one left to fix...
https://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5
* [x] other/least_recently_used.py
* [x] other/lfu_cache.py #5755
* [x] other/lru_cache.py #5755
---
__UPDATE:__ Our GitHub Actions now run `mypy --ignore-missing-imports` excluding those directories that fail that test.
* https://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5
Currently, we are not running `mypy` in our regular CI tests as there are a lot of errors in the entire codebase, which needs to be fixed. This won't be a one-person job, so we are asking for help from you. I cannot paste the entire message in here as there are around 600 of them, so here's just a gist of it:
```console
$ mypy --ignore-missing-imports .
strings/word_occurrence.py:17: error: Need type annotation for 'occurrence'
strings/min_cost_string_conversion.py:36: error: No overload variant of "__setitem__" of "list" matches argument types "int", "str"
strings/min_cost_string_conversion.py:36: note: Possible overload variants:
strings/min_cost_string_conversion.py:36: note: def __setitem__(self, int, int) -> None
strings/min_cost_string_conversion.py:36: note: def __setitem__(self, slice, Iterable[int]) -> None
strings/min_cost_string_conversion.py:40: error: No overload variant of "__setitem__" of "list" matches argument types "int", "str"
strings/min_cost_string_conversion.py:40: note: Possible overload variants:
strings/min_cost_string_conversion.py:40: note: def __setitem__(self, int, int) -> None
strings/min_cost_string_conversion.py:40: note: def __setitem__(self, slice, Iterable[int]) -> None
...
backtracking/n_queens_math.py:109: error: List comprehension has incompatible type List[str]; expected List[int]
backtracking/n_queens_math.py:110: error: Argument 1 to "append" of "list" has incompatible type "List[int]"; expected "List[str]"
backtracking/n_queens_math.py:149: error: Need type annotation for 'boards' (hint: "boards: List[<type>] = ...")
backtracking/minimax.py:15: error: "list" is not subscriptable, use "typing.List" instead
backtracking/knight_tour.py:6: error: "tuple" is not subscriptable, use "typing.Tuple" instead
backtracking/knight_tour.py:6: error: "list" is not subscriptable, use "typing.List" instead
...
```
# Guidelines to follow:
- Please make sure you read the [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) first.
- Please submit a fix for a maximum of 3 files at a time (1 file is also acceptable).
- As we are not running `mypy` in our CI tests, the user who is submitting a pull request should run it on their local machine and ensure there are no errors in their submission.
- Please ensure your pull request title contains the word `mypy` in it. If possible use this template for your pull request title:
```
[mypy] Fix type annotations for <filenames>
```
### Which errors to fix?
Please follow the below steps to produce all the errors in this library:
- Fork this repository if you haven't already.
- Clone the forked repository on your local machine using the command:
```
git clone --depth 1 https://github.com/TheAlgorithms/Python.git
```
Then you need to install all the necessary requirements:
```
cd python/
python -m pip install --upgrade pip
python -m pip install -r requirements.txt
python -m pip install mypy
```
Then run either of the two commands:
- `mypy --ignore-missing-imports .` -> To produce all the error messages for the entire codebase.
- `mypy --ignore-missing-imports <filepath1> <filepath2> ...` -> To produce error messages for the mentioned file.
### How to fix the errors?
- Make a separate branch for your fix with the command:
```
git checkout -b mypy-fix
```
- Make changes to the selected files.
- Push it to your forked copy and open a pull request with the appropriate title as mentioned above.
### Focus on one directory at a time:
```
.
├── [x] arithmetic_analysis
├── [x] backtracking
├── [x] bit_manipulation
├── [x] blockchain
├── [x] boolean_algebra
├── [x] cellular_automata
├── [x] ciphers
├── [x] compression
├── [x] computer_vision
├── [x] conversions
├── [ ] data_structures
├── [x] digital_image_processing
├── [x] divide_and_conquer
├── [ ] dynamic_programming
├── [x] electronics
├── [x] file_transfer
├── [x] fractals
├── [x] fuzzy_logic
├── [x] genetic_algorithm
├── [x] geodesy
├── [x] graphics
├── [ ] graphs
├── [x] hashes
├── [x] knapsack
├── [x] linear_algebra
├── [x] machine_learning
├── [ ] maths
├── [ ] matrix
├── [x] networking_flow
├── [x] neural_network
├── [ ] other
├── [ ] project_euler
├── [x] quantum
├── [x] scheduling
├── [x] scripts
├── [ ] searches
├── [x] sorts
├── [ ] strings
└── [x] web_programming
```
### Pre-requisites:
- You should be familiar with `mypy`: https://mypy.readthedocs.io
- You should be familiar with Python type hints: https://docs.python.org/3/library/typing.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `other/least_recently_used.py`
Content:
```
1 import sys
2 from abc import abstractmethod
3 from collections import deque
4
5
6 class LRUCache:
7 """Page Replacement Algorithm, Least Recently Used (LRU) Caching."""
8
9 dq_store = object() # Cache store of keys
10 key_reference_map = object() # References of the keys in cache
11 _MAX_CAPACITY: int = 10 # Maximum capacity of cache
12
13 @abstractmethod
14 def __init__(self, n: int):
15 """Creates an empty store and map for the keys.
16 The LRUCache is set the size n.
17 """
18 self.dq_store = deque()
19 self.key_reference_map = set()
20 if not n:
21 LRUCache._MAX_CAPACITY = sys.maxsize
22 elif n < 0:
23 raise ValueError("n should be an integer greater than 0.")
24 else:
25 LRUCache._MAX_CAPACITY = n
26
27 def refer(self, x):
28 """
29 Looks for a page in the cache store and adds reference to the set.
30 Remove the least recently used key if the store is full.
31 Update store to reflect recent access.
32 """
33 if x not in self.key_reference_map:
34 if len(self.dq_store) == LRUCache._MAX_CAPACITY:
35 last_element = self.dq_store.pop()
36 self.key_reference_map.remove(last_element)
37 else:
38 index_remove = 0
39 for idx, key in enumerate(self.dq_store):
40 if key == x:
41 index_remove = idx
42 break
43 self.dq_store.remove(index_remove)
44
45 self.dq_store.appendleft(x)
46 self.key_reference_map.add(x)
47
48 def display(self):
49 """
50 Prints all the elements in the store.
51 """
52 for k in self.dq_store:
53 print(k)
54
55
56 if __name__ == "__main__":
57 lru_cache = LRUCache(4)
58 lru_cache.refer(1)
59 lru_cache.refer(2)
60 lru_cache.refer(3)
61 lru_cache.refer(1)
62 lru_cache.refer(4)
63 lru_cache.refer(5)
64 lru_cache.display()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/other/least_recently_used.py b/other/least_recently_used.py
--- a/other/least_recently_used.py
+++ b/other/least_recently_used.py
@@ -1,5 +1,4 @@
import sys
-from abc import abstractmethod
from collections import deque
@@ -10,7 +9,6 @@
key_reference_map = object() # References of the keys in cache
_MAX_CAPACITY: int = 10 # Maximum capacity of cache
- @abstractmethod
def __init__(self, n: int):
"""Creates an empty store and map for the keys.
The LRUCache is set the size n.
|
{"golden_diff": "diff --git a/other/least_recently_used.py b/other/least_recently_used.py\n--- a/other/least_recently_used.py\n+++ b/other/least_recently_used.py\n@@ -1,5 +1,4 @@\n import sys\n-from abc import abstractmethod\n from collections import deque\n \n \n@@ -10,7 +9,6 @@\n key_reference_map = object() # References of the keys in cache\n _MAX_CAPACITY: int = 10 # Maximum capacity of cache\n \n- @abstractmethod\n def __init__(self, n: int):\n \"\"\"Creates an empty store and map for the keys.\n The LRUCache is set the size n.\n", "issue": "[mypy] Need help to fix all `mypy` errors in the codebase\n# Just one left to fix...\r\nhttps://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5\r\n* [x] other/least_recently_used.py\r\n* [x] other/lfu_cache.py #5755\r\n* [x] other/lru_cache.py #5755\r\n\r\n---\r\n\r\n__UPDATE:__ Our GitHub Actions now run `mypy --ignore-missing-imports` excluding those directories that fail that test.\r\n* https://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5\r\n\r\nCurrently, we are not running `mypy` in our regular CI tests as there are a lot of errors in the entire codebase, which needs to be fixed. This won't be a one-person job, so we are asking for help from you. I cannot paste the entire message in here as there are around 600 of them, so here's just a gist of it:\r\n\r\n```console\r\n$ mypy --ignore-missing-imports .\r\nstrings/word_occurrence.py:17: error: Need type annotation for 'occurrence'\r\nstrings/min_cost_string_conversion.py:36: error: No overload variant of \"__setitem__\" of \"list\" matches argument types \"int\", \"str\"\r\nstrings/min_cost_string_conversion.py:36: note: Possible overload variants:\r\nstrings/min_cost_string_conversion.py:36: note: def __setitem__(self, int, int) -> None\r\nstrings/min_cost_string_conversion.py:36: note: def __setitem__(self, slice, Iterable[int]) -> None\r\nstrings/min_cost_string_conversion.py:40: error: No overload variant of \"__setitem__\" of \"list\" matches argument types \"int\", \"str\"\r\nstrings/min_cost_string_conversion.py:40: note: Possible overload variants:\r\nstrings/min_cost_string_conversion.py:40: note: def __setitem__(self, int, int) -> None\r\nstrings/min_cost_string_conversion.py:40: note: def __setitem__(self, slice, Iterable[int]) -> None\r\n...\r\nbacktracking/n_queens_math.py:109: error: List comprehension has incompatible type List[str]; expected List[int]\r\nbacktracking/n_queens_math.py:110: error: Argument 1 to \"append\" of \"list\" has incompatible type \"List[int]\"; expected \"List[str]\"\r\nbacktracking/n_queens_math.py:149: error: Need type annotation for 'boards' (hint: \"boards: List[<type>] = ...\")\r\nbacktracking/minimax.py:15: error: \"list\" is not subscriptable, use \"typing.List\" instead\r\nbacktracking/knight_tour.py:6: error: \"tuple\" is not subscriptable, use \"typing.Tuple\" instead\r\nbacktracking/knight_tour.py:6: error: \"list\" is not subscriptable, use \"typing.List\" instead\r\n...\r\n```\r\n\r\n# Guidelines to follow:\r\n\r\n- Please make sure you read the [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) first.\r\n- Please submit a fix for a maximum of 3 files at a time (1 file is also acceptable).\r\n- As we are not running `mypy` in our CI tests, the user who is submitting a pull request should run it on their local machine and ensure there are no errors in their submission.\r\n- Please ensure your pull request title contains the word `mypy` in it. If possible use this template for your pull request title:\r\n```\r\n[mypy] Fix type annotations for <filenames>\r\n```\r\n\r\n### Which errors to fix?\r\n\r\nPlease follow the below steps to produce all the errors in this library:\r\n- Fork this repository if you haven't already.\r\n- Clone the forked repository on your local machine using the command:\r\n\r\n```\r\ngit clone --depth 1 https://github.com/TheAlgorithms/Python.git\r\n```\r\nThen you need to install all the necessary requirements:\r\n```\r\ncd python/\r\npython -m pip install --upgrade pip\r\npython -m pip install -r requirements.txt\r\npython -m pip install mypy\r\n```\r\nThen run either of the two commands:\r\n- `mypy --ignore-missing-imports .` -> To produce all the error messages for the entire codebase.\r\n- `mypy --ignore-missing-imports <filepath1> <filepath2> ...` -> To produce error messages for the mentioned file.\r\n\r\n### How to fix the errors?\r\n\r\n- Make a separate branch for your fix with the command: \r\n```\r\ngit checkout -b mypy-fix\r\n```\r\n- Make changes to the selected files.\r\n- Push it to your forked copy and open a pull request with the appropriate title as mentioned above.\r\n\r\n### Focus on one directory at a time:\r\n\r\n```\r\n.\r\n\u251c\u2500\u2500 [x] arithmetic_analysis\r\n\u251c\u2500\u2500 [x] backtracking\r\n\u251c\u2500\u2500 [x] bit_manipulation\r\n\u251c\u2500\u2500 [x] blockchain\r\n\u251c\u2500\u2500 [x] boolean_algebra\r\n\u251c\u2500\u2500 [x] cellular_automata\r\n\u251c\u2500\u2500 [x] ciphers\r\n\u251c\u2500\u2500 [x] compression\r\n\u251c\u2500\u2500 [x] computer_vision\r\n\u251c\u2500\u2500 [x] conversions\r\n\u251c\u2500\u2500 [ ] data_structures\r\n\u251c\u2500\u2500 [x] digital_image_processing\r\n\u251c\u2500\u2500 [x] divide_and_conquer\r\n\u251c\u2500\u2500 [ ] dynamic_programming\r\n\u251c\u2500\u2500 [x] electronics\r\n\u251c\u2500\u2500 [x] file_transfer\r\n\u251c\u2500\u2500 [x] fractals\r\n\u251c\u2500\u2500 [x] fuzzy_logic\r\n\u251c\u2500\u2500 [x] genetic_algorithm\r\n\u251c\u2500\u2500 [x] geodesy\r\n\u251c\u2500\u2500 [x] graphics\r\n\u251c\u2500\u2500 [ ] graphs\r\n\u251c\u2500\u2500 [x] hashes\r\n\u251c\u2500\u2500 [x] knapsack\r\n\u251c\u2500\u2500 [x] linear_algebra\r\n\u251c\u2500\u2500 [x] machine_learning\r\n\u251c\u2500\u2500 [ ] maths\r\n\u251c\u2500\u2500 [ ] matrix\r\n\u251c\u2500\u2500 [x] networking_flow\r\n\u251c\u2500\u2500 [x] neural_network\r\n\u251c\u2500\u2500 [ ] other\r\n\u251c\u2500\u2500 [ ] project_euler\r\n\u251c\u2500\u2500 [x] quantum\r\n\u251c\u2500\u2500 [x] scheduling\r\n\u251c\u2500\u2500 [x] scripts\r\n\u251c\u2500\u2500 [ ] searches\r\n\u251c\u2500\u2500 [x] sorts\r\n\u251c\u2500\u2500 [ ] strings\r\n\u2514\u2500\u2500 [x] web_programming\r\n```\r\n\r\n### Pre-requisites:\r\n- You should be familiar with `mypy`: https://mypy.readthedocs.io\r\n- You should be familiar with Python type hints: https://docs.python.org/3/library/typing.html\n", "before_files": [{"content": "import sys\nfrom abc import abstractmethod\nfrom collections import deque\n\n\nclass LRUCache:\n \"\"\"Page Replacement Algorithm, Least Recently Used (LRU) Caching.\"\"\"\n\n dq_store = object() # Cache store of keys\n key_reference_map = object() # References of the keys in cache\n _MAX_CAPACITY: int = 10 # Maximum capacity of cache\n\n @abstractmethod\n def __init__(self, n: int):\n \"\"\"Creates an empty store and map for the keys.\n The LRUCache is set the size n.\n \"\"\"\n self.dq_store = deque()\n self.key_reference_map = set()\n if not n:\n LRUCache._MAX_CAPACITY = sys.maxsize\n elif n < 0:\n raise ValueError(\"n should be an integer greater than 0.\")\n else:\n LRUCache._MAX_CAPACITY = n\n\n def refer(self, x):\n \"\"\"\n Looks for a page in the cache store and adds reference to the set.\n Remove the least recently used key if the store is full.\n Update store to reflect recent access.\n \"\"\"\n if x not in self.key_reference_map:\n if len(self.dq_store) == LRUCache._MAX_CAPACITY:\n last_element = self.dq_store.pop()\n self.key_reference_map.remove(last_element)\n else:\n index_remove = 0\n for idx, key in enumerate(self.dq_store):\n if key == x:\n index_remove = idx\n break\n self.dq_store.remove(index_remove)\n\n self.dq_store.appendleft(x)\n self.key_reference_map.add(x)\n\n def display(self):\n \"\"\"\n Prints all the elements in the store.\n \"\"\"\n for k in self.dq_store:\n print(k)\n\n\nif __name__ == \"__main__\":\n lru_cache = LRUCache(4)\n lru_cache.refer(1)\n lru_cache.refer(2)\n lru_cache.refer(3)\n lru_cache.refer(1)\n lru_cache.refer(4)\n lru_cache.refer(5)\n lru_cache.display()\n", "path": "other/least_recently_used.py"}], "after_files": [{"content": "import sys\nfrom collections import deque\n\n\nclass LRUCache:\n \"\"\"Page Replacement Algorithm, Least Recently Used (LRU) Caching.\"\"\"\n\n dq_store = object() # Cache store of keys\n key_reference_map = object() # References of the keys in cache\n _MAX_CAPACITY: int = 10 # Maximum capacity of cache\n\n def __init__(self, n: int):\n \"\"\"Creates an empty store and map for the keys.\n The LRUCache is set the size n.\n \"\"\"\n self.dq_store = deque()\n self.key_reference_map = set()\n if not n:\n LRUCache._MAX_CAPACITY = sys.maxsize\n elif n < 0:\n raise ValueError(\"n should be an integer greater than 0.\")\n else:\n LRUCache._MAX_CAPACITY = n\n\n def refer(self, x):\n \"\"\"\n Looks for a page in the cache store and adds reference to the set.\n Remove the least recently used key if the store is full.\n Update store to reflect recent access.\n \"\"\"\n if x not in self.key_reference_map:\n if len(self.dq_store) == LRUCache._MAX_CAPACITY:\n last_element = self.dq_store.pop()\n self.key_reference_map.remove(last_element)\n else:\n index_remove = 0\n for idx, key in enumerate(self.dq_store):\n if key == x:\n index_remove = idx\n break\n self.dq_store.remove(index_remove)\n\n self.dq_store.appendleft(x)\n self.key_reference_map.add(x)\n\n def display(self):\n \"\"\"\n Prints all the elements in the store.\n \"\"\"\n for k in self.dq_store:\n print(k)\n\n\nif __name__ == \"__main__\":\n lru_cache = LRUCache(4)\n lru_cache.refer(1)\n lru_cache.refer(2)\n lru_cache.refer(3)\n lru_cache.refer(1)\n lru_cache.refer(4)\n lru_cache.refer(5)\n lru_cache.display()\n", "path": "other/least_recently_used.py"}]}
| 2,227 | 152 |
gh_patches_debug_11593
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-847
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Partial celery task time limit is not support
Python 3.8
**Steps to reproduce**
- Create celery task with only one of the time limit soft/hard
Or use this test to reproduce:
`instrumentation/opentelemetry-instrumentation-celery/tests/test_utils.py`
```python
def test_set_attributes_partial_timelimit_hard_limit(self):
context = {
"correlation_id": "44b7f305",
"delivery_info": {"eager": True},
"eta": "soon",
"expires": "later",
"hostname": "localhost",
"id": "44b7f305",
"reply_to": "44b7f305",
"retries": 4,
"timelimit": ("now", None),
"custom_meta": "custom_value",
"routing_key": "celery",
}
span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext))
utils.set_attributes_from_context(span, context)
self.assertEqual(span.attributes.get("celery.timelimit"), ("now", ""))
```
**What is the expected behavior?**
The time limit that was specify should be instrumented
**What is the actual behavior?**
Exception is raised and no time limit is instrumented
```
[__init__.py:_translate_attributes:164] _translate_key_values(key, value)
exception.trace.1
[exporter.py:_translate_key_values:126] return KeyValue(key=key, value=_translate_value(value))
exception.trace.2
[ exporter.py:_translate_value:104] array_value=ArrayValue(values=[_translate_value(v) for v in value])
exception.trace.3
[ exporter.py:<listcomp>:104] array_value=ArrayValue(values=[_translate_value(v) for v in value])
exception.trace.4
[ exporter.py:_translate_value:119] "Invalid type {} of value {}".format(type(value), value)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16
17 from celery import registry # pylint: disable=no-name-in-module
18
19 from opentelemetry.semconv.trace import SpanAttributes
20
21 logger = logging.getLogger(__name__)
22
23 # Celery Context key
24 CTX_KEY = "__otel_task_span"
25
26 # Celery Context attributes
27 CELERY_CONTEXT_ATTRIBUTES = (
28 "compression",
29 "correlation_id",
30 "countdown",
31 "delivery_info",
32 "declare",
33 "eta",
34 "exchange",
35 "expires",
36 "hostname",
37 "id",
38 "priority",
39 "queue",
40 "reply_to",
41 "retries",
42 "routing_key",
43 "serializer",
44 "timelimit",
45 "origin",
46 "state",
47 )
48
49
50 # pylint:disable=too-many-branches
51 def set_attributes_from_context(span, context):
52 """Helper to extract meta values from a Celery Context"""
53 if not span.is_recording():
54 return
55 for key in CELERY_CONTEXT_ATTRIBUTES:
56 value = context.get(key)
57
58 # Skip this key if it is not set
59 if value is None or value == "":
60 continue
61
62 # Skip `timelimit` if it is not set (it's default/unset value is a
63 # tuple or a list of `None` values
64 if key == "timelimit" and value in [(None, None), [None, None]]:
65 continue
66
67 # Skip `retries` if it's value is `0`
68 if key == "retries" and value == 0:
69 continue
70
71 attribute_name = None
72
73 # Celery 4.0 uses `origin` instead of `hostname`; this change preserves
74 # the same name for the tag despite Celery version
75 if key == "origin":
76 key = "hostname"
77
78 elif key == "delivery_info":
79 # Get also destination from this
80 routing_key = value.get("routing_key")
81 if routing_key is not None:
82 span.set_attribute(
83 SpanAttributes.MESSAGING_DESTINATION, routing_key
84 )
85 value = str(value)
86
87 elif key == "id":
88 attribute_name = SpanAttributes.MESSAGING_MESSAGE_ID
89
90 elif key == "correlation_id":
91 attribute_name = SpanAttributes.MESSAGING_CONVERSATION_ID
92
93 elif key == "routing_key":
94 attribute_name = SpanAttributes.MESSAGING_DESTINATION
95
96 # according to https://docs.celeryproject.org/en/stable/userguide/routing.html#exchange-types
97 elif key == "declare":
98 attribute_name = SpanAttributes.MESSAGING_DESTINATION_KIND
99 for declare in value:
100 if declare.exchange.type == "direct":
101 value = "queue"
102 break
103 if declare.exchange.type == "topic":
104 value = "topic"
105 break
106
107 # set attribute name if not set specially for a key
108 if attribute_name is None:
109 attribute_name = f"celery.{key}"
110
111 span.set_attribute(attribute_name, value)
112
113
114 def attach_span(task, task_id, span, is_publish=False):
115 """Helper to propagate a `Span` for the given `Task` instance. This
116 function uses a `dict` that stores the Span using the
117 `(task_id, is_publish)` as a key. This is useful when information must be
118 propagated from one Celery signal to another.
119
120 We use (task_id, is_publish) for the key to ensure that publishing a
121 task from within another task does not cause any conflicts.
122
123 This mostly happens when either a task fails and a retry policy is in place,
124 or when a task is manually retries (e.g. `task.retry()`), we end up trying
125 to publish a task with the same id as the task currently running.
126
127 Previously publishing the new task would overwrite the existing `celery.run` span
128 in the `dict` causing that span to be forgotten and never finished
129 NOTE: We cannot test for this well yet, because we do not run a celery worker,
130 and cannot run `task.apply_async()`
131 """
132 span_dict = getattr(task, CTX_KEY, None)
133 if span_dict is None:
134 span_dict = {}
135 setattr(task, CTX_KEY, span_dict)
136
137 span_dict[(task_id, is_publish)] = span
138
139
140 def detach_span(task, task_id, is_publish=False):
141 """Helper to remove a `Span` in a Celery task when it's propagated.
142 This function handles tasks where the `Span` is not attached.
143 """
144 span_dict = getattr(task, CTX_KEY, None)
145 if span_dict is None:
146 return
147
148 # See note in `attach_span` for key info
149 span_dict.pop((task_id, is_publish), (None, None))
150
151
152 def retrieve_span(task, task_id, is_publish=False):
153 """Helper to retrieve an active `Span` stored in a `Task`
154 instance
155 """
156 span_dict = getattr(task, CTX_KEY, None)
157 if span_dict is None:
158 return (None, None)
159
160 # See note in `attach_span` for key info
161 return span_dict.get((task_id, is_publish), (None, None))
162
163
164 def retrieve_task(kwargs):
165 task = kwargs.get("task")
166 if task is None:
167 logger.debug("Unable to retrieve task from signal arguments")
168 return task
169
170
171 def retrieve_task_from_sender(kwargs):
172 sender = kwargs.get("sender")
173 if sender is None:
174 logger.debug("Unable to retrieve the sender from signal arguments")
175
176 # before and after publish signals sender is the task name
177 # for retry and failure signals sender is the task object
178 if isinstance(sender, str):
179 sender = registry.tasks.get(sender)
180 if sender is None:
181 logger.debug("Unable to retrieve the task from sender=%s", sender)
182
183 return sender
184
185
186 def retrieve_task_id(kwargs):
187 task_id = kwargs.get("task_id")
188 if task_id is None:
189 logger.debug("Unable to retrieve task_id from signal arguments")
190 return task_id
191
192
193 def retrieve_task_id_from_request(kwargs):
194 # retry signal does not include task_id as argument so use request argument
195 request = kwargs.get("request")
196 if request is None:
197 logger.debug("Unable to retrieve the request from signal arguments")
198
199 task_id = getattr(request, "id")
200 if task_id is None:
201 logger.debug("Unable to retrieve the task_id from the request")
202
203 return task_id
204
205
206 def retrieve_task_id_from_message(kwargs):
207 """Helper to retrieve the `Task` identifier from the message `body`.
208 This helper supports Protocol Version 1 and 2. The Protocol is well
209 detailed in the official documentation:
210 http://docs.celeryproject.org/en/latest/internals/protocol.html
211 """
212 headers = kwargs.get("headers")
213 body = kwargs.get("body")
214 if headers is not None and len(headers) > 0:
215 # Protocol Version 2 (default from Celery 4.0)
216 return headers.get("id")
217 # Protocol Version 1
218 return body.get("id")
219
220
221 def retrieve_reason(kwargs):
222 reason = kwargs.get("reason")
223 if not reason:
224 logger.debug("Unable to retrieve the retry reason")
225 return reason
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py
--- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py
+++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py
@@ -61,8 +61,11 @@
# Skip `timelimit` if it is not set (it's default/unset value is a
# tuple or a list of `None` values
- if key == "timelimit" and value in [(None, None), [None, None]]:
- continue
+ if key == "timelimit":
+ if value in [(None, None), [None, None]]:
+ continue
+ if None in value:
+ value = ["" if tl is None else tl for tl in value]
# Skip `retries` if it's value is `0`
if key == "retries" and value == 0:
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py\n--- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py\n+++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py\n@@ -61,8 +61,11 @@\n \n # Skip `timelimit` if it is not set (it's default/unset value is a\n # tuple or a list of `None` values\n- if key == \"timelimit\" and value in [(None, None), [None, None]]:\n- continue\n+ if key == \"timelimit\":\n+ if value in [(None, None), [None, None]]:\n+ continue\n+ if None in value:\n+ value = [\"\" if tl is None else tl for tl in value]\n \n # Skip `retries` if it's value is `0`\n if key == \"retries\" and value == 0:\n", "issue": "Partial celery task time limit is not support\nPython 3.8\r\n\r\n**Steps to reproduce**\r\n- Create celery task with only one of the time limit soft/hard\r\nOr use this test to reproduce:\r\n\r\n`instrumentation/opentelemetry-instrumentation-celery/tests/test_utils.py`\r\n\r\n```python\r\n def test_set_attributes_partial_timelimit_hard_limit(self):\r\n context = {\r\n \"correlation_id\": \"44b7f305\",\r\n \"delivery_info\": {\"eager\": True},\r\n \"eta\": \"soon\",\r\n \"expires\": \"later\",\r\n \"hostname\": \"localhost\",\r\n \"id\": \"44b7f305\",\r\n \"reply_to\": \"44b7f305\",\r\n \"retries\": 4,\r\n \"timelimit\": (\"now\", None),\r\n \"custom_meta\": \"custom_value\",\r\n \"routing_key\": \"celery\",\r\n }\r\n span = trace._Span(\"name\", mock.Mock(spec=trace_api.SpanContext))\r\n utils.set_attributes_from_context(span, context)\r\n self.assertEqual(span.attributes.get(\"celery.timelimit\"), (\"now\", \"\"))\r\n```\r\n**What is the expected behavior?**\r\nThe time limit that was specify should be instrumented \r\n\r\n**What is the actual behavior?**\r\nException is raised and no time limit is instrumented\r\n```\r\n[__init__.py:_translate_attributes:164] _translate_key_values(key, value)\r\nexception.trace.1\t\r\n[exporter.py:_translate_key_values:126] return KeyValue(key=key, value=_translate_value(value))\r\nexception.trace.2\t\r\n[ exporter.py:_translate_value:104] array_value=ArrayValue(values=[_translate_value(v) for v in value])\r\nexception.trace.3\t\r\n[ exporter.py:<listcomp>:104] array_value=ArrayValue(values=[_translate_value(v) for v in value])\r\nexception.trace.4\t\r\n[ exporter.py:_translate_value:119] \"Invalid type {} of value {}\".format(type(value), value)\r\n```\r\n \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom celery import registry # pylint: disable=no-name-in-module\n\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nlogger = logging.getLogger(__name__)\n\n# Celery Context key\nCTX_KEY = \"__otel_task_span\"\n\n# Celery Context attributes\nCELERY_CONTEXT_ATTRIBUTES = (\n \"compression\",\n \"correlation_id\",\n \"countdown\",\n \"delivery_info\",\n \"declare\",\n \"eta\",\n \"exchange\",\n \"expires\",\n \"hostname\",\n \"id\",\n \"priority\",\n \"queue\",\n \"reply_to\",\n \"retries\",\n \"routing_key\",\n \"serializer\",\n \"timelimit\",\n \"origin\",\n \"state\",\n)\n\n\n# pylint:disable=too-many-branches\ndef set_attributes_from_context(span, context):\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n if not span.is_recording():\n return\n for key in CELERY_CONTEXT_ATTRIBUTES:\n value = context.get(key)\n\n # Skip this key if it is not set\n if value is None or value == \"\":\n continue\n\n # Skip `timelimit` if it is not set (it's default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\" and value in [(None, None), [None, None]]:\n continue\n\n # Skip `retries` if it's value is `0`\n if key == \"retries\" and value == 0:\n continue\n\n attribute_name = None\n\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n if key == \"origin\":\n key = \"hostname\"\n\n elif key == \"delivery_info\":\n # Get also destination from this\n routing_key = value.get(\"routing_key\")\n if routing_key is not None:\n span.set_attribute(\n SpanAttributes.MESSAGING_DESTINATION, routing_key\n )\n value = str(value)\n\n elif key == \"id\":\n attribute_name = SpanAttributes.MESSAGING_MESSAGE_ID\n\n elif key == \"correlation_id\":\n attribute_name = SpanAttributes.MESSAGING_CONVERSATION_ID\n\n elif key == \"routing_key\":\n attribute_name = SpanAttributes.MESSAGING_DESTINATION\n\n # according to https://docs.celeryproject.org/en/stable/userguide/routing.html#exchange-types\n elif key == \"declare\":\n attribute_name = SpanAttributes.MESSAGING_DESTINATION_KIND\n for declare in value:\n if declare.exchange.type == \"direct\":\n value = \"queue\"\n break\n if declare.exchange.type == \"topic\":\n value = \"topic\"\n break\n\n # set attribute name if not set specially for a key\n if attribute_name is None:\n attribute_name = f\"celery.{key}\"\n\n span.set_attribute(attribute_name, value)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `dict` that stores the Span using the\n `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retries (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `dict` causing that span to be forgotten and never finished\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n span_dict = {}\n setattr(task, CTX_KEY, span_dict)\n\n span_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n return\n\n # See note in `attach_span` for key info\n span_dict.pop((task_id, is_publish), (None, None))\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n return (None, None)\n\n # See note in `attach_span` for key info\n return span_dict.get((task_id, is_publish), (None, None))\n\n\ndef retrieve_task(kwargs):\n task = kwargs.get(\"task\")\n if task is None:\n logger.debug(\"Unable to retrieve task from signal arguments\")\n return task\n\n\ndef retrieve_task_from_sender(kwargs):\n sender = kwargs.get(\"sender\")\n if sender is None:\n logger.debug(\"Unable to retrieve the sender from signal arguments\")\n\n # before and after publish signals sender is the task name\n # for retry and failure signals sender is the task object\n if isinstance(sender, str):\n sender = registry.tasks.get(sender)\n if sender is None:\n logger.debug(\"Unable to retrieve the task from sender=%s\", sender)\n\n return sender\n\n\ndef retrieve_task_id(kwargs):\n task_id = kwargs.get(\"task_id\")\n if task_id is None:\n logger.debug(\"Unable to retrieve task_id from signal arguments\")\n return task_id\n\n\ndef retrieve_task_id_from_request(kwargs):\n # retry signal does not include task_id as argument so use request argument\n request = kwargs.get(\"request\")\n if request is None:\n logger.debug(\"Unable to retrieve the request from signal arguments\")\n\n task_id = getattr(request, \"id\")\n if task_id is None:\n logger.debug(\"Unable to retrieve the task_id from the request\")\n\n return task_id\n\n\ndef retrieve_task_id_from_message(kwargs):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = kwargs.get(\"headers\")\n body = kwargs.get(\"body\")\n if headers is not None and len(headers) > 0:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n # Protocol Version 1\n return body.get(\"id\")\n\n\ndef retrieve_reason(kwargs):\n reason = kwargs.get(\"reason\")\n if not reason:\n logger.debug(\"Unable to retrieve the retry reason\")\n return reason\n", "path": "instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom celery import registry # pylint: disable=no-name-in-module\n\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nlogger = logging.getLogger(__name__)\n\n# Celery Context key\nCTX_KEY = \"__otel_task_span\"\n\n# Celery Context attributes\nCELERY_CONTEXT_ATTRIBUTES = (\n \"compression\",\n \"correlation_id\",\n \"countdown\",\n \"delivery_info\",\n \"declare\",\n \"eta\",\n \"exchange\",\n \"expires\",\n \"hostname\",\n \"id\",\n \"priority\",\n \"queue\",\n \"reply_to\",\n \"retries\",\n \"routing_key\",\n \"serializer\",\n \"timelimit\",\n \"origin\",\n \"state\",\n)\n\n\n# pylint:disable=too-many-branches\ndef set_attributes_from_context(span, context):\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n if not span.is_recording():\n return\n for key in CELERY_CONTEXT_ATTRIBUTES:\n value = context.get(key)\n\n # Skip this key if it is not set\n if value is None or value == \"\":\n continue\n\n # Skip `timelimit` if it is not set (it's default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\":\n if value in [(None, None), [None, None]]:\n continue\n if None in value:\n value = [\"\" if tl is None else tl for tl in value]\n\n # Skip `retries` if it's value is `0`\n if key == \"retries\" and value == 0:\n continue\n\n attribute_name = None\n\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n if key == \"origin\":\n key = \"hostname\"\n\n elif key == \"delivery_info\":\n # Get also destination from this\n routing_key = value.get(\"routing_key\")\n if routing_key is not None:\n span.set_attribute(\n SpanAttributes.MESSAGING_DESTINATION, routing_key\n )\n value = str(value)\n\n elif key == \"id\":\n attribute_name = SpanAttributes.MESSAGING_MESSAGE_ID\n\n elif key == \"correlation_id\":\n attribute_name = SpanAttributes.MESSAGING_CONVERSATION_ID\n\n elif key == \"routing_key\":\n attribute_name = SpanAttributes.MESSAGING_DESTINATION\n\n # according to https://docs.celeryproject.org/en/stable/userguide/routing.html#exchange-types\n elif key == \"declare\":\n attribute_name = SpanAttributes.MESSAGING_DESTINATION_KIND\n for declare in value:\n if declare.exchange.type == \"direct\":\n value = \"queue\"\n break\n if declare.exchange.type == \"topic\":\n value = \"topic\"\n break\n\n # set attribute name if not set specially for a key\n if attribute_name is None:\n attribute_name = f\"celery.{key}\"\n\n span.set_attribute(attribute_name, value)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `dict` that stores the Span using the\n `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retries (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `dict` causing that span to be forgotten and never finished\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n span_dict = {}\n setattr(task, CTX_KEY, span_dict)\n\n span_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n return\n\n # See note in `attach_span` for key info\n span_dict.pop((task_id, is_publish), (None, None))\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n return (None, None)\n\n # See note in `attach_span` for key info\n return span_dict.get((task_id, is_publish), (None, None))\n\n\ndef retrieve_task(kwargs):\n task = kwargs.get(\"task\")\n if task is None:\n logger.debug(\"Unable to retrieve task from signal arguments\")\n return task\n\n\ndef retrieve_task_from_sender(kwargs):\n sender = kwargs.get(\"sender\")\n if sender is None:\n logger.debug(\"Unable to retrieve the sender from signal arguments\")\n\n # before and after publish signals sender is the task name\n # for retry and failure signals sender is the task object\n if isinstance(sender, str):\n sender = registry.tasks.get(sender)\n if sender is None:\n logger.debug(\"Unable to retrieve the task from sender=%s\", sender)\n\n return sender\n\n\ndef retrieve_task_id(kwargs):\n task_id = kwargs.get(\"task_id\")\n if task_id is None:\n logger.debug(\"Unable to retrieve task_id from signal arguments\")\n return task_id\n\n\ndef retrieve_task_id_from_request(kwargs):\n # retry signal does not include task_id as argument so use request argument\n request = kwargs.get(\"request\")\n if request is None:\n logger.debug(\"Unable to retrieve the request from signal arguments\")\n\n task_id = getattr(request, \"id\")\n if task_id is None:\n logger.debug(\"Unable to retrieve the task_id from the request\")\n\n return task_id\n\n\ndef retrieve_task_id_from_message(kwargs):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = kwargs.get(\"headers\")\n body = kwargs.get(\"body\")\n if headers is not None and len(headers) > 0:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n # Protocol Version 1\n return body.get(\"id\")\n\n\ndef retrieve_reason(kwargs):\n reason = kwargs.get(\"reason\")\n if not reason:\n logger.debug(\"Unable to retrieve the retry reason\")\n return reason\n", "path": "instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py"}]}
| 2,973 | 267 |
gh_patches_debug_21075
|
rasdani/github-patches
|
git_diff
|
larq__larq-146
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document how to use a quantizer as an activation function
`tf.keras.layers.Activation("ste_sign")`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `larq/quantizers.py`
Content:
```
1 """A Quantizer defines the way of transforming a full precision input to a
2 quantized output and the pseudo-gradient method used for the backwards pass."""
3
4 import tensorflow as tf
5 from larq import utils, math
6
7
8 @tf.custom_gradient
9 def _binarize_with_identity_grad(x):
10 def grad(dy):
11 return dy
12
13 return math.sign(x), grad
14
15
16 @tf.custom_gradient
17 def _binarize_with_weighted_grad(x):
18 def grad(dy):
19 return (1 - tf.abs(x)) * 2 * dy
20
21 return math.sign(x), grad
22
23
24 @utils.register_keras_custom_object
25 @utils.set_precision(1)
26 def ste_sign(x):
27 r"""
28 Sign binarization function.
29 \\[
30 q(x) = \begin{cases}
31 -1 & x < 0 \\\
32 1 & x \geq 0
33 \end{cases}
34 \\]
35
36 The gradient is estimated using the Straight-Through Estimator
37 (essentially the binarization is replaced by a clipped identity on the
38 backward pass).
39 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
40 1 & \left|x\right| \leq 1 \\\
41 0 & \left|x\right| > 1
42 \end{cases}\\]
43
44 ```plot-activation
45 quantizers.ste_sign
46 ```
47
48 # Arguments
49 x: Input tensor.
50
51 # Returns
52 Binarized tensor.
53
54 # References
55 - [Binarized Neural Networks: Training Deep Neural Networks with Weights and
56 Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)
57 """
58
59 x = tf.clip_by_value(x, -1, 1)
60
61 return _binarize_with_identity_grad(x)
62
63
64 @utils.register_keras_custom_object
65 @utils.set_precision(1)
66 def magnitude_aware_sign(x):
67 r"""
68 Magnitude-aware sign for Bi-Real Net.
69
70 ```plot-activation
71 quantizers.magnitude_aware_sign
72 ```
73
74 # Arguments
75 x: Input tensor
76
77 # Returns
78 Scaled binarized tensor (with values in $\{-a, a\}$, where $a$ is a float).
79
80 # References
81 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
82 Representational Capability and Advanced Training
83 Algorithm](https://arxiv.org/abs/1808.00278)
84
85 """
86 scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))
87
88 return tf.stop_gradient(scale_factor) * ste_sign(x)
89
90
91 @utils.register_keras_custom_object
92 @utils.set_precision(1)
93 def approx_sign(x):
94 r"""
95 Sign binarization function.
96 \\[
97 q(x) = \begin{cases}
98 -1 & x < 0 \\\
99 1 & x \geq 0
100 \end{cases}
101 \\]
102
103 The gradient is estimated using the ApproxSign method.
104 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
105 (2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\
106 0 & \left|x\right| > 1
107 \end{cases}
108 \\]
109
110 ```plot-activation
111 quantizers.approx_sign
112 ```
113
114 # Arguments
115 x: Input tensor.
116
117 # Returns
118 Binarized tensor.
119
120 # References
121 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved
122 Representational Capability and Advanced
123 Training Algorithm](http://arxiv.org/abs/1808.00278)
124 """
125
126 x = tf.clip_by_value(x, -1, 1)
127
128 return _binarize_with_weighted_grad(x)
129
130
131 @utils.register_keras_custom_object
132 @utils.set_precision(2)
133 class SteTern:
134 r"""
135 Ternarization function.
136 \\[
137 q(x) = \begin{cases}
138 +1 & x > \Delta \\\
139 0 & |x| < \Delta \\\
140 -1 & x < - \Delta
141 \end{cases}
142 \\]
143
144 where $\Delta$ is defined as the threshold and can be passed as an argument,
145 or can be calculated as per the Ternary Weight Networks original paper, such that
146
147 \\[
148 \Delta = \frac{0.7}{n} \sum_{i=1}^{n} |W_i|
149 \\]
150 where we assume that $W_i$ is generated from a normal distribution.
151
152 The gradient is estimated using the Straight-Through Estimator
153 (essentially the Ternarization is replaced by a clipped identity on the
154 backward pass).
155 \\[\frac{\partial q(x)}{\partial x} = \begin{cases}
156 1 & \left|x\right| \leq 1 \\\
157 0 & \left|x\right| > 1
158 \end{cases}\\]
159
160 ```plot-activation
161 quantizers.SteTern
162 ```
163
164 # Arguments
165 x: Input tensor.
166 threshold_value: The value for the threshold, $\Delta$.
167 ternary_weight_networks: Boolean of whether to use the Ternary Weight Networks threshold calculation.
168
169 # Returns
170 Ternarized tensor.
171
172 # References
173 - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
174 """
175
176 def __init__(self, threshold_value=0.1, ternary_weight_networks=False):
177 self.threshold_value = threshold_value
178 self.ternary_weight_networks = ternary_weight_networks
179
180 def __call__(self, x):
181 x = tf.clip_by_value(x, -1, 1)
182 if self.ternary_weight_networks:
183 threshold = self.threshold_twn(x)
184 else:
185 threshold = self.threshold_value
186
187 @tf.custom_gradient
188 def _ternarize_with_identity_grad(x):
189 def grad(dy):
190 return dy
191
192 return (tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad)
193
194 return _ternarize_with_identity_grad(x)
195
196 def threshold_twn(self, x):
197 return 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)
198
199 def get_config(self):
200 return {
201 "threshold_value": self.threshold_value,
202 "ternary_weight_networks": self.ternary_weight_networks,
203 }
204
205
206 def serialize(initializer):
207 return tf.keras.utils.serialize_keras_object(initializer)
208
209
210 def deserialize(name, custom_objects=None):
211 return tf.keras.utils.deserialize_keras_object(
212 name,
213 module_objects=globals(),
214 custom_objects=custom_objects,
215 printable_module_name="quantization function",
216 )
217
218
219 def get(identifier):
220 if identifier is None:
221 return None
222 if isinstance(identifier, str):
223 return deserialize(str(identifier))
224 if callable(identifier):
225 return identifier
226 raise ValueError(
227 f"Could not interpret quantization function identifier: {identifier}"
228 )
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/larq/quantizers.py b/larq/quantizers.py
--- a/larq/quantizers.py
+++ b/larq/quantizers.py
@@ -1,5 +1,39 @@
"""A Quantizer defines the way of transforming a full precision input to a
-quantized output and the pseudo-gradient method used for the backwards pass."""
+quantized output and the pseudo-gradient method used for the backwards pass.
+
+Quantizers can either be used through quantizer arguments that are supported
+for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they
+can be used similar to activations, i.e. either through an `Activation` layer,
+or through the `activation` argument supported by all forward layer:
+
+```python
+import tensorflow as tf
+import larq as lq
+...
+x = lq.layers.QuantDense(64, activation=None)(x)
+x = lq.layers.QuantDense(64, input_quantizer="ste_sign")(x)
+```
+
+is equivalent to:
+
+```python
+x = lq.layers.QuantDense(64)(x)
+x = tf.keras.layers.Activation("ste_sign")(x)
+x = lq.layers.QuantDense(64)(x)
+```
+
+as well as:
+
+```python
+x = lq.layers.QuantDense(64, activation="ste_sign")(x)
+x = lq.layers.QuantDense(64)(x)
+```
+
+We highly recommend using the first of these formulations: for the
+other two formulations, intermediate layers - like batch normalization or
+average pooling - and shortcut connections may result in non-binary input
+to the convolutions.
+"""
import tensorflow as tf
from larq import utils, math
|
{"golden_diff": "diff --git a/larq/quantizers.py b/larq/quantizers.py\n--- a/larq/quantizers.py\n+++ b/larq/quantizers.py\n@@ -1,5 +1,39 @@\n \"\"\"A Quantizer defines the way of transforming a full precision input to a\n-quantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n+quantized output and the pseudo-gradient method used for the backwards pass.\n+\n+Quantizers can either be used through quantizer arguments that are supported\n+for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they\n+can be used similar to activations, i.e. either through an `Activation` layer,\n+or through the `activation` argument supported by all forward layer:\n+\n+```python\n+import tensorflow as tf\n+import larq as lq\n+...\n+x = lq.layers.QuantDense(64, activation=None)(x)\n+x = lq.layers.QuantDense(64, input_quantizer=\"ste_sign\")(x)\n+```\n+\n+is equivalent to:\n+\n+```python\n+x = lq.layers.QuantDense(64)(x)\n+x = tf.keras.layers.Activation(\"ste_sign\")(x)\n+x = lq.layers.QuantDense(64)(x)\n+```\n+\n+as well as:\n+\n+```python\n+x = lq.layers.QuantDense(64, activation=\"ste_sign\")(x)\n+x = lq.layers.QuantDense(64)(x)\n+```\n+\n+We highly recommend using the first of these formulations: for the\n+other two formulations, intermediate layers - like batch normalization or\n+average pooling - and shortcut connections may result in non-binary input\n+to the convolutions.\n+\"\"\"\n \n import tensorflow as tf\n from larq import utils, math\n", "issue": "Document how to use a quantizer as an activation function\n`tf.keras.layers.Activation(\"ste_sign\")`\n", "before_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils, math\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return math.sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return math.sign(x), grad\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.ste_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n ```plot-activation\n quantizers.magnitude_aware_sign\n ```\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n\n return tf.stop_gradient(scale_factor) * ste_sign(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n ```plot-activation\n quantizers.approx_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(2)\nclass SteTern:\n r\"\"\"\n Ternarization function.\n \\\\[\n q(x) = \\begin{cases}\n +1 & x > \\Delta \\\\\\\n 0 & |x| < \\Delta \\\\\\\n -1 & x < - \\Delta\n \\end{cases}\n \\\\]\n\n where $\\Delta$ is defined as the threshold and can be passed as an argument,\n or can be calculated as per the Ternary Weight Networks original paper, such that\n\n \\\\[\n \\Delta = \\frac{0.7}{n} \\sum_{i=1}^{n} |W_i|\n \\\\]\n where we assume that $W_i$ is generated from a normal distribution.\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the Ternarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.SteTern\n ```\n\n # Arguments\n x: Input tensor.\n threshold_value: The value for the threshold, $\\Delta$.\n ternary_weight_networks: Boolean of whether to use the Ternary Weight Networks threshold calculation.\n\n # Returns\n Ternarized tensor.\n\n # References\n - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)\n \"\"\"\n\n def __init__(self, threshold_value=0.1, ternary_weight_networks=False):\n self.threshold_value = threshold_value\n self.ternary_weight_networks = ternary_weight_networks\n\n def __call__(self, x):\n x = tf.clip_by_value(x, -1, 1)\n if self.ternary_weight_networks:\n threshold = self.threshold_twn(x)\n else:\n threshold = self.threshold_value\n\n @tf.custom_gradient\n def _ternarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return (tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad)\n\n return _ternarize_with_identity_grad(x)\n\n def threshold_twn(self, x):\n return 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)\n\n def get_config(self):\n return {\n \"threshold_value\": self.threshold_value,\n \"ternary_weight_networks\": self.ternary_weight_networks,\n }\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}], "after_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\n\nQuantizers can either be used through quantizer arguments that are supported\nfor Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they\ncan be used similar to activations, i.e. either through an `Activation` layer,\nor through the `activation` argument supported by all forward layer:\n\n```python\nimport tensorflow as tf\nimport larq as lq\n...\nx = lq.layers.QuantDense(64, activation=None)(x)\nx = lq.layers.QuantDense(64, input_quantizer=\"ste_sign\")(x)\n```\n\nis equivalent to:\n\n```python\nx = lq.layers.QuantDense(64)(x)\nx = tf.keras.layers.Activation(\"ste_sign\")(x)\nx = lq.layers.QuantDense(64)(x)\n```\n\nas well as:\n\n```python\nx = lq.layers.QuantDense(64, activation=\"ste_sign\")(x)\nx = lq.layers.QuantDense(64)(x)\n```\n\nWe highly recommend using the first of these formulations: for the\nother two formulations, intermediate layers - like batch normalization or\naverage pooling - and shortcut connections may result in non-binary input\nto the convolutions.\n\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils, math\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return math.sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return math.sign(x), grad\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.ste_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n ```plot-activation\n quantizers.magnitude_aware_sign\n ```\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n\n return tf.stop_gradient(scale_factor) * ste_sign(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n ```plot-activation\n quantizers.approx_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(2)\nclass SteTern:\n r\"\"\"\n Ternarization function.\n \\\\[\n q(x) = \\begin{cases}\n +1 & x > \\Delta \\\\\\\n 0 & |x| < \\Delta \\\\\\\n -1 & x < - \\Delta\n \\end{cases}\n \\\\]\n\n where $\\Delta$ is defined as the threshold and can be passed as an argument,\n or can be calculated as per the Ternary Weight Networks original paper, such that\n\n \\\\[\n \\Delta = \\frac{0.7}{n} \\sum_{i=1}^{n} |W_i|\n \\\\]\n where we assume that $W_i$ is generated from a normal distribution.\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the Ternarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.SteTern\n ```\n\n # Arguments\n x: Input tensor.\n threshold_value: The value for the threshold, $\\Delta$.\n ternary_weight_networks: Boolean of whether to use the Ternary Weight Networks threshold calculation.\n\n # Returns\n Ternarized tensor.\n\n # References\n - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)\n \"\"\"\n\n def __init__(self, threshold_value=0.1, ternary_weight_networks=False):\n self.threshold_value = threshold_value\n self.ternary_weight_networks = ternary_weight_networks\n\n def __call__(self, x):\n x = tf.clip_by_value(x, -1, 1)\n if self.ternary_weight_networks:\n threshold = self.threshold_twn(x)\n else:\n threshold = self.threshold_value\n\n @tf.custom_gradient\n def _ternarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return (tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad)\n\n return _ternarize_with_identity_grad(x)\n\n def threshold_twn(self, x):\n return 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)\n\n def get_config(self):\n return {\n \"threshold_value\": self.threshold_value,\n \"ternary_weight_networks\": self.ternary_weight_networks,\n }\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}]}
| 2,489 | 402 |
gh_patches_debug_34198
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-3214
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better User deletion
With #3175 we now have a way for users to mark themselves as wanting to be deleted, now we need to actually delete them. This could be done inline during delete, or as a batch job that runs after the web view returns.
We want to actually **delete** the User instance, but we need to confirm a couple things:
* Projects that the user _only_ owns are deleted. This is a [M2m](https://github.com/rtfd/readthedocs.org/blob/master/readthedocs/projects/models.py#L83), so we need to either *remove* them if there are multiple users, or *delete* the project if they are the only owner, so we don't end up with orphaned projects.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/profiles/views.py`
Content:
```
1 """Views for creating, editing and viewing site-specific user profiles."""
2
3 from __future__ import absolute_import
4
5 from django.contrib import messages
6 from django.contrib.auth import logout
7 from django.contrib.auth.decorators import login_required
8 from django.contrib.auth.models import User
9 from django.core.exceptions import ObjectDoesNotExist
10 from django.core.urlresolvers import reverse
11 from django.http import Http404
12 from django.http import HttpResponseRedirect
13 from django.shortcuts import get_object_or_404, render, redirect
14 from django.shortcuts import render_to_response
15 from django.template import RequestContext
16
17 from readthedocs.core.forms import UserDeleteForm
18
19
20 def create_profile(request, form_class, success_url=None,
21 template_name='profiles/private/create_profile.html',
22 extra_context=None):
23 """
24 Create a profile for the current user, if one doesn't already exist.
25
26 If the user already has a profile, a redirect will be issued to the
27 :view:`profiles.views.edit_profile` view.
28
29 **Optional arguments:**
30
31 ``extra_context``
32 A dictionary of variables to add to the template context. Any
33 callable object in this dictionary will be called to produce
34 the end result which appears in the context.
35
36 ``form_class``
37 The form class to use for validating and creating the user
38 profile. This form class must define a method named
39 ``save()``, implementing the same argument signature as the
40 ``save()`` method of a standard Django ``ModelForm`` (this
41 view will call ``save(commit=False)`` to obtain the profile
42 object, and fill in the user before the final save). If the
43 profile object includes many-to-many relations, the convention
44 established by ``ModelForm`` of using a method named
45 ``save_m2m()`` will be used, and so your form class should
46 also define this method.
47
48 ``success_url``
49 The URL to redirect to after successful profile creation. If
50 this argument is not supplied, this will default to the URL of
51 :view:`profiles.views.profile_detail` for the newly-created
52 profile object.
53
54 ``template_name``
55 The template to use when displaying the profile-creation
56 form. If not supplied, this will default to
57 :template:`profiles/create_profile.html`.
58
59 **Context:**
60
61 ``form``
62 The profile-creation form.
63
64 **Template:**
65
66 ``template_name`` keyword argument, or
67 :template:`profiles/create_profile.html`.
68
69 """
70 try:
71 profile_obj = request.user.profile
72 return HttpResponseRedirect(reverse('profiles_edit_profile'))
73 except ObjectDoesNotExist:
74 pass
75
76 #
77 # We set up success_url here, rather than as the default value for
78 # the argument. Trying to do it as the argument's default would
79 # mean evaluating the call to reverse() at the time this module is
80 # first imported, which introduces a circular dependency: to
81 # perform the reverse lookup we need access to profiles/urls.py,
82 # but profiles/urls.py in turn imports this module.
83 #
84
85 if success_url is None:
86 success_url = reverse('profiles_profile_detail',
87 kwargs={'username': request.user.username})
88 if request.method == 'POST':
89 form = form_class(data=request.POST, files=request.FILES)
90 if form.is_valid():
91 profile_obj = form.save(commit=False)
92 profile_obj.user = request.user
93 profile_obj.save()
94 if hasattr(form, 'save_m2m'):
95 form.save_m2m()
96 return HttpResponseRedirect(success_url)
97 else:
98 form = form_class()
99
100 if extra_context is None:
101 extra_context = {}
102 context = RequestContext(request)
103 for key, value in list(extra_context.items()):
104 context[key] = (value() if callable(value) else value)
105
106 return render_to_response(template_name,
107 {'form': form},
108 context_instance=context)
109 create_profile = login_required(create_profile)
110
111
112 def edit_profile(request, form_class, success_url=None,
113 template_name='profiles/private/edit_profile.html',
114 extra_context=None):
115 """
116 Edit the current user's profile.
117
118 If the user does not already have a profile, a redirect will be issued to
119 the :view:`profiles.views.create_profile` view.
120
121 **Optional arguments:**
122
123 ``extra_context``
124 A dictionary of variables to add to the template context. Any
125 callable object in this dictionary will be called to produce
126 the end result which appears in the context.
127
128 ``form_class``
129 The form class to use for validating and editing the user
130 profile. This form class must operate similarly to a standard
131 Django ``ModelForm`` in that it must accept an instance of the
132 object to be edited as the keyword argument ``instance`` to
133 its constructor, and it must implement a method named
134 ``save()`` which will save the updates to the object.
135
136 ``success_url``
137 The URL to redirect to following a successful edit. If not
138 specified, this will default to the URL of
139 :view:`profiles.views.profile_detail` for the profile object
140 being edited.
141
142 ``template_name``
143 The template to use when displaying the profile-editing
144 form. If not specified, this will default to
145 :template:`profiles/edit_profile.html`.
146
147 **Context:**
148
149 ``form``
150 The form for editing the profile.
151
152 ``profile``
153 The user's current profile.
154
155 **Template:**
156
157 ``template_name`` keyword argument or
158 :template:`profiles/edit_profile.html`.
159
160 """
161 try:
162 profile_obj = request.user.profile
163 except ObjectDoesNotExist:
164 return HttpResponseRedirect(reverse('profiles_profile_create'))
165
166 if success_url is None:
167 success_url = reverse('profiles_profile_detail',
168 kwargs={'username': request.user.username})
169 if request.method == 'POST':
170 form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)
171 if form.is_valid():
172 form.save()
173 return HttpResponseRedirect(success_url)
174 else:
175 form = form_class(instance=profile_obj)
176
177 if extra_context is None:
178 extra_context = {}
179 context = RequestContext(request)
180 for key, value in list(extra_context.items()):
181 context[key] = (value() if callable(value) else value)
182
183 return render_to_response(template_name, {
184 'form': form,
185 'profile': profile_obj,
186 'user': profile_obj.user,
187 }, context_instance=context)
188 edit_profile = login_required(edit_profile)
189
190
191 @login_required()
192 def delete_account(request):
193 form = UserDeleteForm()
194 template_name = 'profiles/private/delete_account.html'
195
196 if request.method == 'POST':
197 form = UserDeleteForm(instance=request.user, data=request.POST)
198 if form.is_valid():
199
200 # Do not delete the account permanently because it may create disaster
201 # Inactive the user instead.
202 request.user.is_active = False
203 request.user.save()
204 logout(request)
205 messages.info(request, 'You have successfully deleted your account')
206
207 return redirect('homepage')
208
209 return render(request, template_name, {'form': form})
210
211
212 def profile_detail(request, username, public_profile_field=None,
213 template_name='profiles/public/profile_detail.html',
214 extra_context=None):
215 """
216 Detail view of a user's profile.
217
218 If the user has not yet created a profile, ``Http404`` will be
219 raised.
220
221 **Required arguments:**
222
223 ``username``
224 The username of the user whose profile is being displayed.
225
226 **Optional arguments:**
227
228 ``extra_context``
229 A dictionary of variables to add to the template context. Any
230 callable object in this dictionary will be called to produce
231 the end result which appears in the context.
232
233 ``public_profile_field``
234 The name of a ``BooleanField`` on the profile model; if the
235 value of that field on the user's profile is ``False``, the
236 ``profile`` variable in the template will be ``None``. Use
237 this feature to allow users to mark their profiles as not
238 being publicly viewable.
239
240 If this argument is not specified, it will be assumed that all
241 users' profiles are publicly viewable.
242
243 ``template_name``
244 The name of the template to use for displaying the profile. If
245 not specified, this will default to
246 :template:`profiles/profile_detail.html`.
247
248 **Context:**
249
250 ``profile``
251 The user's profile, or ``None`` if the user's profile is not
252 publicly viewable (see the description of
253 ``public_profile_field`` above).
254
255 **Template:**
256
257 ``template_name`` keyword argument or
258 :template:`profiles/profile_detail.html`.
259
260 """
261 user = get_object_or_404(User, username=username)
262 try:
263 profile_obj = user.profile
264 except ObjectDoesNotExist:
265 raise Http404
266 if public_profile_field is not None and \
267 not getattr(profile_obj, public_profile_field):
268 profile_obj = None
269
270 if extra_context is None:
271 extra_context = {}
272 context = RequestContext(request)
273 for key, value in list(extra_context.items()):
274 context[key] = (value() if callable(value) else value)
275
276 return render_to_response(template_name,
277 {'profile': profile_obj},
278 context_instance=context)
279
```
Path: `readthedocs/core/signals.py`
Content:
```
1 """Signal handling for core app."""
2
3 from __future__ import absolute_import
4
5 import logging
6
7 from corsheaders import signals
8 from django.dispatch import Signal
9 from django.db.models import Q
10 from future.backports.urllib.parse import urlparse
11
12 from readthedocs.projects.models import Project, Domain
13
14
15 log = logging.getLogger(__name__)
16
17 WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']
18
19
20 webhook_github = Signal(providing_args=['project', 'data', 'event'])
21 webhook_gitlab = Signal(providing_args=['project', 'data', 'event'])
22 webhook_bitbucket = Signal(providing_args=['project', 'data', 'event'])
23
24
25 def decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument
26 """
27 Decide whether a request should be given CORS access.
28
29 This checks that:
30 * The URL is whitelisted against our CORS-allowed domains
31 * The Domain exists in our database, and belongs to the project being queried.
32
33 Returns True when a request should be given CORS access.
34 """
35 if 'HTTP_ORIGIN' not in request.META:
36 return False
37 host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]
38 valid_url = False
39 for url in WHITELIST_URLS:
40 if request.path_info.startswith(url):
41 valid_url = True
42
43 if valid_url:
44 project_slug = request.GET.get('project', None)
45 try:
46 project = Project.objects.get(slug=project_slug)
47 except Project.DoesNotExist:
48 log.warning(
49 'Invalid project passed to domain. [{project}:{domain}'.format(
50 project=project_slug,
51 domain=host,
52 )
53 )
54 return False
55
56 domain = Domain.objects.filter(
57 Q(domain__icontains=host),
58 Q(project=project) | Q(project__subprojects__child=project)
59 )
60 if domain.exists():
61 return True
62
63 return False
64
65 signals.check_request_enabled.connect(decide_if_cors)
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/readthedocs/core/signals.py b/readthedocs/core/signals.py
--- a/readthedocs/core/signals.py
+++ b/readthedocs/core/signals.py
@@ -5,13 +5,15 @@
import logging
from corsheaders import signals
+from django.conf import settings
+from django.db.models.signals import pre_delete
from django.dispatch import Signal
-from django.db.models import Q
+from django.db.models import Q, Count
+from django.dispatch import receiver
from future.backports.urllib.parse import urlparse
from readthedocs.projects.models import Project, Domain
-
log = logging.getLogger(__name__)
WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']
@@ -62,4 +64,20 @@
return False
+
+@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)
+def delete_projects_and_organizations(sender, instance, *args, **kwargs):
+ # Here we count the owner list from the projects that the user own
+ # Then exclude the projects where there are more than one owner
+ projects = instance.projects.all().annotate(num_users=Count('users')).exclude(num_users__gt=1)
+
+ # Here we count the users list from the organization that the user belong
+ # Then exclude the organizations where there are more than one user
+ oauth_organizations = (instance.oauth_organizations.annotate(num_users=Count('users'))
+ .exclude(num_users__gt=1))
+
+ projects.delete()
+ oauth_organizations.delete()
+
+
signals.check_request_enabled.connect(decide_if_cors)
diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py
--- a/readthedocs/profiles/views.py
+++ b/readthedocs/profiles/views.py
@@ -196,11 +196,9 @@
if request.method == 'POST':
form = UserDeleteForm(instance=request.user, data=request.POST)
if form.is_valid():
-
- # Do not delete the account permanently because it may create disaster
- # Inactive the user instead.
- request.user.is_active = False
- request.user.save()
+ # Delete the user permanently
+ # It will also delete some projects where he is the only owner
+ request.user.delete()
logout(request)
messages.info(request, 'You have successfully deleted your account')
|
{"golden_diff": "diff --git a/readthedocs/core/signals.py b/readthedocs/core/signals.py\n--- a/readthedocs/core/signals.py\n+++ b/readthedocs/core/signals.py\n@@ -5,13 +5,15 @@\n import logging\n \n from corsheaders import signals\n+from django.conf import settings\n+from django.db.models.signals import pre_delete\n from django.dispatch import Signal\n-from django.db.models import Q\n+from django.db.models import Q, Count\n+from django.dispatch import receiver\n from future.backports.urllib.parse import urlparse\n \n from readthedocs.projects.models import Project, Domain\n \n-\n log = logging.getLogger(__name__)\n \n WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']\n@@ -62,4 +64,20 @@\n \n return False\n \n+\n+@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)\n+def delete_projects_and_organizations(sender, instance, *args, **kwargs):\n+ # Here we count the owner list from the projects that the user own\n+ # Then exclude the projects where there are more than one owner\n+ projects = instance.projects.all().annotate(num_users=Count('users')).exclude(num_users__gt=1)\n+\n+ # Here we count the users list from the organization that the user belong\n+ # Then exclude the organizations where there are more than one user\n+ oauth_organizations = (instance.oauth_organizations.annotate(num_users=Count('users'))\n+ .exclude(num_users__gt=1))\n+\n+ projects.delete()\n+ oauth_organizations.delete()\n+\n+\n signals.check_request_enabled.connect(decide_if_cors)\ndiff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py\n--- a/readthedocs/profiles/views.py\n+++ b/readthedocs/profiles/views.py\n@@ -196,11 +196,9 @@\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n-\n- # Do not delete the account permanently because it may create disaster\n- # Inactive the user instead.\n- request.user.is_active = False\n- request.user.save()\n+ # Delete the user permanently\n+ # It will also delete some projects where he is the only owner\n+ request.user.delete()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n", "issue": "Better User deletion\nWith #3175 we now have a way for users to mark themselves as wanting to be deleted, now we need to actually delete them. This could be done inline during delete, or as a batch job that runs after the web view returns.\r\n\r\nWe want to actually **delete** the User instance, but we need to confirm a couple things:\r\n\r\n* Projects that the user _only_ owns are deleted. This is a [M2m](https://github.com/rtfd/readthedocs.org/blob/master/readthedocs/projects/models.py#L83), so we need to either *remove* them if there are multiple users, or *delete* the project if they are the only owner, so we don't end up with orphaned projects. \r\n\n", "before_files": [{"content": "\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom readthedocs.core.forms import UserDeleteForm\n\n\ndef create_profile(request, form_class, success_url=None,\n template_name='profiles/private/create_profile.html',\n extra_context=None):\n \"\"\"\n Create a profile for the current user, if one doesn't already exist.\n\n If the user already has a profile, a redirect will be issued to the\n :view:`profiles.views.edit_profile` view.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and creating the user\n profile. This form class must define a method named\n ``save()``, implementing the same argument signature as the\n ``save()`` method of a standard Django ``ModelForm`` (this\n view will call ``save(commit=False)`` to obtain the profile\n object, and fill in the user before the final save). If the\n profile object includes many-to-many relations, the convention\n established by ``ModelForm`` of using a method named\n ``save_m2m()`` will be used, and so your form class should\n also define this method.\n\n ``success_url``\n The URL to redirect to after successful profile creation. If\n this argument is not supplied, this will default to the URL of\n :view:`profiles.views.profile_detail` for the newly-created\n profile object.\n\n ``template_name``\n The template to use when displaying the profile-creation\n form. If not supplied, this will default to\n :template:`profiles/create_profile.html`.\n\n **Context:**\n\n ``form``\n The profile-creation form.\n\n **Template:**\n\n ``template_name`` keyword argument, or\n :template:`profiles/create_profile.html`.\n\n \"\"\"\n try:\n profile_obj = request.user.profile\n return HttpResponseRedirect(reverse('profiles_edit_profile'))\n except ObjectDoesNotExist:\n pass\n\n #\n # We set up success_url here, rather than as the default value for\n # the argument. Trying to do it as the argument's default would\n # mean evaluating the call to reverse() at the time this module is\n # first imported, which introduces a circular dependency: to\n # perform the reverse lookup we need access to profiles/urls.py,\n # but profiles/urls.py in turn imports this module.\n #\n\n if success_url is None:\n success_url = reverse('profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES)\n if form.is_valid():\n profile_obj = form.save(commit=False)\n profile_obj.user = request.user\n profile_obj.save()\n if hasattr(form, 'save_m2m'):\n form.save_m2m()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class()\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name,\n {'form': form},\n context_instance=context)\ncreate_profile = login_required(create_profile)\n\n\ndef edit_profile(request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html',\n extra_context=None):\n \"\"\"\n Edit the current user's profile.\n\n If the user does not already have a profile, a redirect will be issued to\n the :view:`profiles.views.create_profile` view.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and editing the user\n profile. This form class must operate similarly to a standard\n Django ``ModelForm`` in that it must accept an instance of the\n object to be edited as the keyword argument ``instance`` to\n its constructor, and it must implement a method named\n ``save()`` which will save the updates to the object.\n\n ``success_url``\n The URL to redirect to following a successful edit. If not\n specified, this will default to the URL of\n :view:`profiles.views.profile_detail` for the profile object\n being edited.\n\n ``template_name``\n The template to use when displaying the profile-editing\n form. If not specified, this will default to\n :template:`profiles/edit_profile.html`.\n\n **Context:**\n\n ``form``\n The form for editing the profile.\n\n ``profile``\n The user's current profile.\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/edit_profile.html`.\n\n \"\"\"\n try:\n profile_obj = request.user.profile\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('profiles_profile_create'))\n\n if success_url is None:\n success_url = reverse('profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(instance=profile_obj)\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name, {\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n }, context_instance=context)\nedit_profile = login_required(edit_profile)\n\n\n@login_required()\ndef delete_account(request):\n form = UserDeleteForm()\n template_name = 'profiles/private/delete_account.html'\n\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n\n # Do not delete the account permanently because it may create disaster\n # Inactive the user instead.\n request.user.is_active = False\n request.user.save()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n\n return redirect('homepage')\n\n return render(request, template_name, {'form': form})\n\n\ndef profile_detail(request, username, public_profile_field=None,\n template_name='profiles/public/profile_detail.html',\n extra_context=None):\n \"\"\"\n Detail view of a user's profile.\n\n If the user has not yet created a profile, ``Http404`` will be\n raised.\n\n **Required arguments:**\n\n ``username``\n The username of the user whose profile is being displayed.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``public_profile_field``\n The name of a ``BooleanField`` on the profile model; if the\n value of that field on the user's profile is ``False``, the\n ``profile`` variable in the template will be ``None``. Use\n this feature to allow users to mark their profiles as not\n being publicly viewable.\n\n If this argument is not specified, it will be assumed that all\n users' profiles are publicly viewable.\n\n ``template_name``\n The name of the template to use for displaying the profile. If\n not specified, this will default to\n :template:`profiles/profile_detail.html`.\n\n **Context:**\n\n ``profile``\n The user's profile, or ``None`` if the user's profile is not\n publicly viewable (see the description of\n ``public_profile_field`` above).\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/profile_detail.html`.\n\n \"\"\"\n user = get_object_or_404(User, username=username)\n try:\n profile_obj = user.profile\n except ObjectDoesNotExist:\n raise Http404\n if public_profile_field is not None and \\\n not getattr(profile_obj, public_profile_field):\n profile_obj = None\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name,\n {'profile': profile_obj},\n context_instance=context)\n", "path": "readthedocs/profiles/views.py"}, {"content": "\"\"\"Signal handling for core app.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom corsheaders import signals\nfrom django.dispatch import Signal\nfrom django.db.models import Q\nfrom future.backports.urllib.parse import urlparse\n\nfrom readthedocs.projects.models import Project, Domain\n\n\nlog = logging.getLogger(__name__)\n\nWHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']\n\n\nwebhook_github = Signal(providing_args=['project', 'data', 'event'])\nwebhook_gitlab = Signal(providing_args=['project', 'data', 'event'])\nwebhook_bitbucket = Signal(providing_args=['project', 'data', 'event'])\n\n\ndef decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Decide whether a request should be given CORS access.\n\n This checks that:\n * The URL is whitelisted against our CORS-allowed domains\n * The Domain exists in our database, and belongs to the project being queried.\n\n Returns True when a request should be given CORS access.\n \"\"\"\n if 'HTTP_ORIGIN' not in request.META:\n return False\n host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]\n valid_url = False\n for url in WHITELIST_URLS:\n if request.path_info.startswith(url):\n valid_url = True\n\n if valid_url:\n project_slug = request.GET.get('project', None)\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n log.warning(\n 'Invalid project passed to domain. [{project}:{domain}'.format(\n project=project_slug,\n domain=host,\n )\n )\n return False\n\n domain = Domain.objects.filter(\n Q(domain__icontains=host),\n Q(project=project) | Q(project__subprojects__child=project)\n )\n if domain.exists():\n return True\n\n return False\n\nsignals.check_request_enabled.connect(decide_if_cors)\n", "path": "readthedocs/core/signals.py"}], "after_files": [{"content": "\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom readthedocs.core.forms import UserDeleteForm\n\n\ndef create_profile(request, form_class, success_url=None,\n template_name='profiles/private/create_profile.html',\n extra_context=None):\n \"\"\"\n Create a profile for the current user, if one doesn't already exist.\n\n If the user already has a profile, a redirect will be issued to the\n :view:`profiles.views.edit_profile` view.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and creating the user\n profile. This form class must define a method named\n ``save()``, implementing the same argument signature as the\n ``save()`` method of a standard Django ``ModelForm`` (this\n view will call ``save(commit=False)`` to obtain the profile\n object, and fill in the user before the final save). If the\n profile object includes many-to-many relations, the convention\n established by ``ModelForm`` of using a method named\n ``save_m2m()`` will be used, and so your form class should\n also define this method.\n\n ``success_url``\n The URL to redirect to after successful profile creation. If\n this argument is not supplied, this will default to the URL of\n :view:`profiles.views.profile_detail` for the newly-created\n profile object.\n\n ``template_name``\n The template to use when displaying the profile-creation\n form. If not supplied, this will default to\n :template:`profiles/create_profile.html`.\n\n **Context:**\n\n ``form``\n The profile-creation form.\n\n **Template:**\n\n ``template_name`` keyword argument, or\n :template:`profiles/create_profile.html`.\n\n \"\"\"\n try:\n profile_obj = request.user.profile\n return HttpResponseRedirect(reverse('profiles_edit_profile'))\n except ObjectDoesNotExist:\n pass\n\n #\n # We set up success_url here, rather than as the default value for\n # the argument. Trying to do it as the argument's default would\n # mean evaluating the call to reverse() at the time this module is\n # first imported, which introduces a circular dependency: to\n # perform the reverse lookup we need access to profiles/urls.py,\n # but profiles/urls.py in turn imports this module.\n #\n\n if success_url is None:\n success_url = reverse('profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES)\n if form.is_valid():\n profile_obj = form.save(commit=False)\n profile_obj.user = request.user\n profile_obj.save()\n if hasattr(form, 'save_m2m'):\n form.save_m2m()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class()\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name,\n {'form': form},\n context_instance=context)\ncreate_profile = login_required(create_profile)\n\n\ndef edit_profile(request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html',\n extra_context=None):\n \"\"\"\n Edit the current user's profile.\n\n If the user does not already have a profile, a redirect will be issued to\n the :view:`profiles.views.create_profile` view.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and editing the user\n profile. This form class must operate similarly to a standard\n Django ``ModelForm`` in that it must accept an instance of the\n object to be edited as the keyword argument ``instance`` to\n its constructor, and it must implement a method named\n ``save()`` which will save the updates to the object.\n\n ``success_url``\n The URL to redirect to following a successful edit. If not\n specified, this will default to the URL of\n :view:`profiles.views.profile_detail` for the profile object\n being edited.\n\n ``template_name``\n The template to use when displaying the profile-editing\n form. If not specified, this will default to\n :template:`profiles/edit_profile.html`.\n\n **Context:**\n\n ``form``\n The form for editing the profile.\n\n ``profile``\n The user's current profile.\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/edit_profile.html`.\n\n \"\"\"\n try:\n profile_obj = request.user.profile\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('profiles_profile_create'))\n\n if success_url is None:\n success_url = reverse('profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(instance=profile_obj)\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name, {\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n }, context_instance=context)\nedit_profile = login_required(edit_profile)\n\n\n@login_required()\ndef delete_account(request):\n form = UserDeleteForm()\n template_name = 'profiles/private/delete_account.html'\n\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n # Delete the user permanently\n # It will also delete some projects where he is the only owner\n request.user.delete()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n\n return redirect('homepage')\n\n return render(request, template_name, {'form': form})\n\n\ndef profile_detail(request, username, public_profile_field=None,\n template_name='profiles/public/profile_detail.html',\n extra_context=None):\n \"\"\"\n Detail view of a user's profile.\n\n If the user has not yet created a profile, ``Http404`` will be\n raised.\n\n **Required arguments:**\n\n ``username``\n The username of the user whose profile is being displayed.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``public_profile_field``\n The name of a ``BooleanField`` on the profile model; if the\n value of that field on the user's profile is ``False``, the\n ``profile`` variable in the template will be ``None``. Use\n this feature to allow users to mark their profiles as not\n being publicly viewable.\n\n If this argument is not specified, it will be assumed that all\n users' profiles are publicly viewable.\n\n ``template_name``\n The name of the template to use for displaying the profile. If\n not specified, this will default to\n :template:`profiles/profile_detail.html`.\n\n **Context:**\n\n ``profile``\n The user's profile, or ``None`` if the user's profile is not\n publicly viewable (see the description of\n ``public_profile_field`` above).\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/profile_detail.html`.\n\n \"\"\"\n user = get_object_or_404(User, username=username)\n try:\n profile_obj = user.profile\n except ObjectDoesNotExist:\n raise Http404\n if public_profile_field is not None and \\\n not getattr(profile_obj, public_profile_field):\n profile_obj = None\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name,\n {'profile': profile_obj},\n context_instance=context)\n", "path": "readthedocs/profiles/views.py"}, {"content": "\"\"\"Signal handling for core app.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom corsheaders import signals\nfrom django.conf import settings\nfrom django.db.models.signals import pre_delete\nfrom django.dispatch import Signal\nfrom django.db.models import Q, Count\nfrom django.dispatch import receiver\nfrom future.backports.urllib.parse import urlparse\n\nfrom readthedocs.projects.models import Project, Domain\n\nlog = logging.getLogger(__name__)\n\nWHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']\n\n\nwebhook_github = Signal(providing_args=['project', 'data', 'event'])\nwebhook_gitlab = Signal(providing_args=['project', 'data', 'event'])\nwebhook_bitbucket = Signal(providing_args=['project', 'data', 'event'])\n\n\ndef decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Decide whether a request should be given CORS access.\n\n This checks that:\n * The URL is whitelisted against our CORS-allowed domains\n * The Domain exists in our database, and belongs to the project being queried.\n\n Returns True when a request should be given CORS access.\n \"\"\"\n if 'HTTP_ORIGIN' not in request.META:\n return False\n host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]\n valid_url = False\n for url in WHITELIST_URLS:\n if request.path_info.startswith(url):\n valid_url = True\n\n if valid_url:\n project_slug = request.GET.get('project', None)\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n log.warning(\n 'Invalid project passed to domain. [{project}:{domain}'.format(\n project=project_slug,\n domain=host,\n )\n )\n return False\n\n domain = Domain.objects.filter(\n Q(domain__icontains=host),\n Q(project=project) | Q(project__subprojects__child=project)\n )\n if domain.exists():\n return True\n\n return False\n\n\n@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)\ndef delete_projects_and_organizations(sender, instance, *args, **kwargs):\n # Here we count the owner list from the projects that the user own\n # Then exclude the projects where there are more than one owner\n projects = instance.projects.all().annotate(num_users=Count('users')).exclude(num_users__gt=1)\n\n # Here we count the users list from the organization that the user belong\n # Then exclude the organizations where there are more than one user\n oauth_organizations = (instance.oauth_organizations.annotate(num_users=Count('users'))\n .exclude(num_users__gt=1))\n\n projects.delete()\n oauth_organizations.delete()\n\n\nsignals.check_request_enabled.connect(decide_if_cors)\n", "path": "readthedocs/core/signals.py"}]}
| 3,741 | 528 |
gh_patches_debug_49285
|
rasdani/github-patches
|
git_diff
|
ansible__awx-12242
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Websocket not working at non-root path
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Summary
Changes from #11342 and #652 are not full
### AWX version
21.0.0
### Select the relevant components
- [X] UI
- [ ] API
- [ ] Docs
### Installation method
kubernetes
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
Deploy AWX with custom `ingress_path: /awx`
### Expected results
websocket should work
### Actual results
`2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect
--- <exception caught here> ---
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__
]
2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect
--- <exception caught here> ---
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__
/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__`
### Additional information
It seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py
```
websocket_urlpatterns = [
re_path(r'websocket/$', consumers.EventConsumer),
re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),
]
```
From https://docs.djangoproject.com/en/4.0/ref/urls/:
When a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used).
Replacing with
```
websocket_urlpatterns = [
re_path(r'websocket/', consumers.EventConsumer),
re_path(r'websocket/broadcast/', consumers.BroadcastConsumer),
]
```
solves the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awx/main/routing.py`
Content:
```
1 import redis
2 import logging
3
4 from django.conf import settings
5 from django.urls import re_path
6
7 from channels.auth import AuthMiddlewareStack
8 from channels.routing import ProtocolTypeRouter, URLRouter
9
10 from . import consumers
11
12
13 logger = logging.getLogger('awx.main.routing')
14
15
16 class AWXProtocolTypeRouter(ProtocolTypeRouter):
17 def __init__(self, *args, **kwargs):
18 try:
19 r = redis.Redis.from_url(settings.BROKER_URL)
20 for k in r.scan_iter('asgi:*', 500):
21 logger.debug(f"cleaning up Redis key {k}")
22 r.delete(k)
23 except redis.exceptions.RedisError as e:
24 logger.warning("encountered an error communicating with redis.")
25 raise e
26 super().__init__(*args, **kwargs)
27
28
29 websocket_urlpatterns = [
30 re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
31 re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
32 ]
33
34 application = AWXProtocolTypeRouter(
35 {
36 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),
37 }
38 )
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/awx/main/routing.py b/awx/main/routing.py
--- a/awx/main/routing.py
+++ b/awx/main/routing.py
@@ -27,8 +27,8 @@
websocket_urlpatterns = [
- re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),
- re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),
+ re_path(r'websocket/', consumers.EventConsumer.as_asgi()),
+ re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),
]
application = AWXProtocolTypeRouter(
|
{"golden_diff": "diff --git a/awx/main/routing.py b/awx/main/routing.py\n--- a/awx/main/routing.py\n+++ b/awx/main/routing.py\n@@ -27,8 +27,8 @@\n \n \n websocket_urlpatterns = [\n- re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),\n- re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),\n+ re_path(r'websocket/', consumers.EventConsumer.as_asgi()),\n+ re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),\n ]\n \n application = AWXProtocolTypeRouter(\n", "issue": "Websocket not working at non-root path\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Summary\n\nChanges from #11342 and #652 are not full\n\n### AWX version\n\n21.0.0\n\n### Select the relevant components\n\n- [X] UI\n- [ ] API\n- [ ] Docs\n\n### Installation method\n\nkubernetes\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\nDeploy AWX with custom `ingress_path: /awx`\n\n### Expected results\n\nwebsocket should work\n\n### Actual results\n\n`2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect\r\n--- <exception caught here> ---\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__\r\n]\r\n2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect\r\n--- <exception caught here> ---\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__`\n\n### Additional information\n\nIt seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py\r\n\r\n```\r\nwebsocket_urlpatterns = [\r\n re_path(r'websocket/$', consumers.EventConsumer),\r\n re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),\r\n]\r\n```\r\nFrom https://docs.djangoproject.com/en/4.0/ref/urls/:\r\n\r\nWhen a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used).\r\n\r\nReplacing with\r\n\r\n```\r\nwebsocket_urlpatterns = [\r\n re_path(r'websocket/', consumers.EventConsumer),\r\n re_path(r'websocket/broadcast/', consumers.BroadcastConsumer),\r\n]\r\n```\r\n\r\nsolves the issue\n", "before_files": [{"content": "import redis\nimport logging\n\nfrom django.conf import settings\nfrom django.urls import re_path\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom . import consumers\n\n\nlogger = logging.getLogger('awx.main.routing')\n\n\nclass AWXProtocolTypeRouter(ProtocolTypeRouter):\n def __init__(self, *args, **kwargs):\n try:\n r = redis.Redis.from_url(settings.BROKER_URL)\n for k in r.scan_iter('asgi:*', 500):\n logger.debug(f\"cleaning up Redis key {k}\")\n r.delete(k)\n except redis.exceptions.RedisError as e:\n logger.warning(\"encountered an error communicating with redis.\")\n raise e\n super().__init__(*args, **kwargs)\n\n\nwebsocket_urlpatterns = [\n re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),\n re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),\n]\n\napplication = AWXProtocolTypeRouter(\n {\n 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),\n }\n)\n", "path": "awx/main/routing.py"}], "after_files": [{"content": "import redis\nimport logging\n\nfrom django.conf import settings\nfrom django.urls import re_path\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom . import consumers\n\n\nlogger = logging.getLogger('awx.main.routing')\n\n\nclass AWXProtocolTypeRouter(ProtocolTypeRouter):\n def __init__(self, *args, **kwargs):\n try:\n r = redis.Redis.from_url(settings.BROKER_URL)\n for k in r.scan_iter('asgi:*', 500):\n logger.debug(f\"cleaning up Redis key {k}\")\n r.delete(k)\n except redis.exceptions.RedisError as e:\n logger.warning(\"encountered an error communicating with redis.\")\n raise e\n super().__init__(*args, **kwargs)\n\n\nwebsocket_urlpatterns = [\n re_path(r'websocket/', consumers.EventConsumer.as_asgi()),\n re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),\n]\n\napplication = AWXProtocolTypeRouter(\n {\n 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),\n }\n)\n", "path": "awx/main/routing.py"}]}
| 1,911 | 132 |
gh_patches_debug_18711
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-1766
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add validation for project name
**Is your feature request related to a problem? Please describe.**
A follow up to https://github.com/feast-dev/feast/pull/1752 - I think project name should definitely be validated early on. Otherwise could run into an error message (https://github.com/feast-dev/feast/pull/1752#issue-700598472) that is not obviously related to the project name.
**Describe the solution you'd like**
Validation upon feast apply that checks the project name. Can use the name validation function [here](https://github.com/feast-dev/feast/blob/a548c48927e6f6858d91a93cf356b43fe7c67aad/sdk/python/feast/repo_operations.py#L390).
**Describe alternatives you've considered**
There's probably more validation coverage to be done, and it could be done at a later time if there are higher priorities at the moment.
**Additional context**
@tedhtchang to take this simple addition on per https://github.com/feast-dev/feast/pull/1752#issuecomment-891339661
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/repo_config.py`
Content:
```
1 from pathlib import Path
2 from typing import Any
3
4 import yaml
5 from pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator
6 from pydantic.error_wrappers import ErrorWrapper
7 from pydantic.typing import Dict, Optional, Union
8
9 from feast.importer import get_class_from_type
10 from feast.usage import log_exceptions
11
12 # These dict exists so that:
13 # - existing values for the online store type in featurestore.yaml files continue to work in a backwards compatible way
14 # - first party and third party implementations can use the same class loading code path.
15 ONLINE_STORE_CLASS_FOR_TYPE = {
16 "sqlite": "feast.infra.online_stores.sqlite.SqliteOnlineStore",
17 "datastore": "feast.infra.online_stores.datastore.DatastoreOnlineStore",
18 "redis": "feast.infra.online_stores.redis.RedisOnlineStore",
19 "dynamodb": "feast.infra.online_stores.dynamodb.DynamoDBOnlineStore",
20 }
21
22 OFFLINE_STORE_CLASS_FOR_TYPE = {
23 "file": "feast.infra.offline_stores.file.FileOfflineStore",
24 "bigquery": "feast.infra.offline_stores.bigquery.BigQueryOfflineStore",
25 "redshift": "feast.infra.offline_stores.redshift.RedshiftOfflineStore",
26 }
27
28
29 class FeastBaseModel(BaseModel):
30 """ Feast Pydantic Configuration Class """
31
32 class Config:
33 arbitrary_types_allowed = True
34 extra = "allow"
35
36
37 class FeastConfigBaseModel(BaseModel):
38 """ Feast Pydantic Configuration Class """
39
40 class Config:
41 arbitrary_types_allowed = True
42 extra = "forbid"
43
44
45 class RegistryConfig(FeastBaseModel):
46 """ Metadata Store Configuration. Configuration that relates to reading from and writing to the Feast registry."""
47
48 path: StrictStr
49 """ str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI """
50
51 cache_ttl_seconds: StrictInt = 600
52 """int: The cache TTL is the amount of time registry state will be cached in memory. If this TTL is exceeded then
53 the registry will be refreshed when any feature store method asks for access to registry state. The TTL can be
54 set to infinity by setting TTL to 0 seconds, which means the cache will only be loaded once and will never
55 expire. Users can manually refresh the cache by calling feature_store.refresh_registry() """
56
57
58 class RepoConfig(FeastBaseModel):
59 """ Repo config. Typically loaded from `feature_store.yaml` """
60
61 registry: Union[StrictStr, RegistryConfig] = "data/registry.db"
62 """ str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI """
63
64 project: StrictStr
65 """ str: Feast project id. This can be any alphanumeric string up to 16 characters.
66 You can have multiple independent feature repositories deployed to the same cloud
67 provider account, as long as they have different project ids.
68 """
69
70 provider: StrictStr
71 """ str: local or gcp or aws """
72
73 online_store: Any
74 """ OnlineStoreConfig: Online store configuration (optional depending on provider) """
75
76 offline_store: Any
77 """ OfflineStoreConfig: Offline store configuration (optional depending on provider) """
78
79 repo_path: Optional[Path] = None
80
81 def __init__(self, **data: Any):
82 super().__init__(**data)
83
84 if isinstance(self.online_store, Dict):
85 self.online_store = get_online_config_from_type(self.online_store["type"])(
86 **self.online_store
87 )
88 elif isinstance(self.online_store, str):
89 self.online_store = get_online_config_from_type(self.online_store)()
90
91 if isinstance(self.offline_store, Dict):
92 self.offline_store = get_offline_config_from_type(
93 self.offline_store["type"]
94 )(**self.offline_store)
95 elif isinstance(self.offline_store, str):
96 self.offline_store = get_offline_config_from_type(self.offline_store)()
97
98 def get_registry_config(self):
99 if isinstance(self.registry, str):
100 return RegistryConfig(path=self.registry)
101 else:
102 return self.registry
103
104 @root_validator(pre=True)
105 @log_exceptions
106 def _validate_online_store_config(cls, values):
107 # This method will validate whether the online store configurations are set correctly. This explicit validation
108 # is necessary because Pydantic Unions throw very verbose and cryptic exceptions. We also use this method to
109 # impute the default online store type based on the selected provider. For the time being this method should be
110 # considered tech debt until we can implement https://github.com/samuelcolvin/pydantic/issues/619 or a more
111 # granular configuration system
112
113 # Set empty online_store config if it isn't set explicitly
114 if "online_store" not in values:
115 values["online_store"] = dict()
116
117 # Skip if we aren't creating the configuration from a dict
118 if not isinstance(values["online_store"], Dict):
119 return values
120
121 # Make sure that the provider configuration is set. We need it to set the defaults
122 assert "provider" in values
123
124 # Set the default type
125 # This is only direct reference to a provider or online store that we should have
126 # for backwards compatibility.
127 if "type" not in values["online_store"]:
128 if values["provider"] == "local":
129 values["online_store"]["type"] = "sqlite"
130 elif values["provider"] == "gcp":
131 values["online_store"]["type"] = "datastore"
132 elif values["provider"] == "aws":
133 values["online_store"]["type"] = "dynamodb"
134
135 online_store_type = values["online_store"]["type"]
136
137 # Validate the dict to ensure one of the union types match
138 try:
139 online_config_class = get_online_config_from_type(online_store_type)
140 online_config_class(**values["online_store"])
141 except ValidationError as e:
142 raise ValidationError(
143 [ErrorWrapper(e, loc="online_store")], model=RepoConfig,
144 )
145
146 return values
147
148 @root_validator(pre=True)
149 def _validate_offline_store_config(cls, values):
150 # Set empty offline_store config if it isn't set explicitly
151 if "offline_store" not in values:
152 values["offline_store"] = dict()
153
154 # Skip if we aren't creating the configuration from a dict
155 if not isinstance(values["offline_store"], Dict):
156 return values
157
158 # Make sure that the provider configuration is set. We need it to set the defaults
159 assert "provider" in values
160
161 # Set the default type
162 if "type" not in values["offline_store"]:
163 if values["provider"] == "local":
164 values["offline_store"]["type"] = "file"
165 elif values["provider"] == "gcp":
166 values["offline_store"]["type"] = "bigquery"
167 elif values["provider"] == "aws":
168 values["offline_store"]["type"] = "redshift"
169
170 offline_store_type = values["offline_store"]["type"]
171
172 # Validate the dict to ensure one of the union types match
173 try:
174 offline_config_class = get_offline_config_from_type(offline_store_type)
175 offline_config_class(**values["offline_store"])
176 except ValidationError as e:
177 raise ValidationError(
178 [ErrorWrapper(e, loc="offline_store")], model=RepoConfig,
179 )
180
181 return values
182
183
184 class FeastConfigError(Exception):
185 def __init__(self, error_message, config_path):
186 self._error_message = error_message
187 self._config_path = config_path
188 super().__init__(self._error_message)
189
190 def __str__(self) -> str:
191 return f"{self._error_message}\nat {self._config_path}"
192
193 def __repr__(self) -> str:
194 return (
195 f"FeastConfigError({repr(self._error_message)}, {repr(self._config_path)})"
196 )
197
198
199 def get_data_source_class_from_type(data_source_type: str):
200 module_name, config_class_name = data_source_type.rsplit(".", 1)
201 return get_class_from_type(module_name, config_class_name, "Source")
202
203
204 def get_online_config_from_type(online_store_type: str):
205 if online_store_type in ONLINE_STORE_CLASS_FOR_TYPE:
206 online_store_type = ONLINE_STORE_CLASS_FOR_TYPE[online_store_type]
207 else:
208 assert online_store_type.endswith("OnlineStore")
209 module_name, online_store_class_type = online_store_type.rsplit(".", 1)
210 config_class_name = f"{online_store_class_type}Config"
211
212 return get_class_from_type(module_name, config_class_name, config_class_name)
213
214
215 def get_offline_config_from_type(offline_store_type: str):
216 if offline_store_type in OFFLINE_STORE_CLASS_FOR_TYPE:
217 offline_store_type = OFFLINE_STORE_CLASS_FOR_TYPE[offline_store_type]
218 else:
219 assert offline_store_type.endswith("OfflineStore")
220 module_name, offline_store_class_type = offline_store_type.rsplit(".", 1)
221 config_class_name = f"{offline_store_class_type}Config"
222
223 return get_class_from_type(module_name, config_class_name, config_class_name)
224
225
226 def load_repo_config(repo_path: Path) -> RepoConfig:
227 config_path = repo_path / "feature_store.yaml"
228
229 with open(config_path) as f:
230 raw_config = yaml.safe_load(f)
231 try:
232 c = RepoConfig(**raw_config)
233 c.repo_path = repo_path
234 return c
235 except ValidationError as e:
236 raise FeastConfigError(e, config_path)
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/feast/repo_config.py b/sdk/python/feast/repo_config.py
--- a/sdk/python/feast/repo_config.py
+++ b/sdk/python/feast/repo_config.py
@@ -2,7 +2,14 @@
from typing import Any
import yaml
-from pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator
+from pydantic import (
+ BaseModel,
+ StrictInt,
+ StrictStr,
+ ValidationError,
+ root_validator,
+ validator,
+)
from pydantic.error_wrappers import ErrorWrapper
from pydantic.typing import Dict, Optional, Union
@@ -180,6 +187,17 @@
return values
+ @validator("project")
+ def _validate_project_name(cls, v):
+ from feast.repo_operations import is_valid_name
+
+ if not is_valid_name(v):
+ raise ValueError(
+ f"Project name, {v}, should only have "
+ f"alphanumerical values and underscores but not start with an underscore."
+ )
+ return v
+
class FeastConfigError(Exception):
def __init__(self, error_message, config_path):
|
{"golden_diff": "diff --git a/sdk/python/feast/repo_config.py b/sdk/python/feast/repo_config.py\n--- a/sdk/python/feast/repo_config.py\n+++ b/sdk/python/feast/repo_config.py\n@@ -2,7 +2,14 @@\n from typing import Any\n \n import yaml\n-from pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator\n+from pydantic import (\n+ BaseModel,\n+ StrictInt,\n+ StrictStr,\n+ ValidationError,\n+ root_validator,\n+ validator,\n+)\n from pydantic.error_wrappers import ErrorWrapper\n from pydantic.typing import Dict, Optional, Union\n \n@@ -180,6 +187,17 @@\n \n return values\n \n+ @validator(\"project\")\n+ def _validate_project_name(cls, v):\n+ from feast.repo_operations import is_valid_name\n+\n+ if not is_valid_name(v):\n+ raise ValueError(\n+ f\"Project name, {v}, should only have \"\n+ f\"alphanumerical values and underscores but not start with an underscore.\"\n+ )\n+ return v\n+\n \n class FeastConfigError(Exception):\n def __init__(self, error_message, config_path):\n", "issue": "Add validation for project name\n**Is your feature request related to a problem? Please describe.**\r\nA follow up to https://github.com/feast-dev/feast/pull/1752 - I think project name should definitely be validated early on. Otherwise could run into an error message (https://github.com/feast-dev/feast/pull/1752#issue-700598472) that is not obviously related to the project name.\r\n\r\n**Describe the solution you'd like**\r\nValidation upon feast apply that checks the project name. Can use the name validation function [here](https://github.com/feast-dev/feast/blob/a548c48927e6f6858d91a93cf356b43fe7c67aad/sdk/python/feast/repo_operations.py#L390).\r\n\r\n**Describe alternatives you've considered**\r\nThere's probably more validation coverage to be done, and it could be done at a later time if there are higher priorities at the moment.\r\n\r\n**Additional context**\r\n@tedhtchang to take this simple addition on per https://github.com/feast-dev/feast/pull/1752#issuecomment-891339661\r\n\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Any\n\nimport yaml\nfrom pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator\nfrom pydantic.error_wrappers import ErrorWrapper\nfrom pydantic.typing import Dict, Optional, Union\n\nfrom feast.importer import get_class_from_type\nfrom feast.usage import log_exceptions\n\n# These dict exists so that:\n# - existing values for the online store type in featurestore.yaml files continue to work in a backwards compatible way\n# - first party and third party implementations can use the same class loading code path.\nONLINE_STORE_CLASS_FOR_TYPE = {\n \"sqlite\": \"feast.infra.online_stores.sqlite.SqliteOnlineStore\",\n \"datastore\": \"feast.infra.online_stores.datastore.DatastoreOnlineStore\",\n \"redis\": \"feast.infra.online_stores.redis.RedisOnlineStore\",\n \"dynamodb\": \"feast.infra.online_stores.dynamodb.DynamoDBOnlineStore\",\n}\n\nOFFLINE_STORE_CLASS_FOR_TYPE = {\n \"file\": \"feast.infra.offline_stores.file.FileOfflineStore\",\n \"bigquery\": \"feast.infra.offline_stores.bigquery.BigQueryOfflineStore\",\n \"redshift\": \"feast.infra.offline_stores.redshift.RedshiftOfflineStore\",\n}\n\n\nclass FeastBaseModel(BaseModel):\n \"\"\" Feast Pydantic Configuration Class \"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\"\n\n\nclass FeastConfigBaseModel(BaseModel):\n \"\"\" Feast Pydantic Configuration Class \"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"forbid\"\n\n\nclass RegistryConfig(FeastBaseModel):\n \"\"\" Metadata Store Configuration. Configuration that relates to reading from and writing to the Feast registry.\"\"\"\n\n path: StrictStr\n \"\"\" str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI \"\"\"\n\n cache_ttl_seconds: StrictInt = 600\n \"\"\"int: The cache TTL is the amount of time registry state will be cached in memory. If this TTL is exceeded then\n the registry will be refreshed when any feature store method asks for access to registry state. The TTL can be\n set to infinity by setting TTL to 0 seconds, which means the cache will only be loaded once and will never\n expire. Users can manually refresh the cache by calling feature_store.refresh_registry() \"\"\"\n\n\nclass RepoConfig(FeastBaseModel):\n \"\"\" Repo config. Typically loaded from `feature_store.yaml` \"\"\"\n\n registry: Union[StrictStr, RegistryConfig] = \"data/registry.db\"\n \"\"\" str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI \"\"\"\n\n project: StrictStr\n \"\"\" str: Feast project id. This can be any alphanumeric string up to 16 characters.\n You can have multiple independent feature repositories deployed to the same cloud\n provider account, as long as they have different project ids.\n \"\"\"\n\n provider: StrictStr\n \"\"\" str: local or gcp or aws \"\"\"\n\n online_store: Any\n \"\"\" OnlineStoreConfig: Online store configuration (optional depending on provider) \"\"\"\n\n offline_store: Any\n \"\"\" OfflineStoreConfig: Offline store configuration (optional depending on provider) \"\"\"\n\n repo_path: Optional[Path] = None\n\n def __init__(self, **data: Any):\n super().__init__(**data)\n\n if isinstance(self.online_store, Dict):\n self.online_store = get_online_config_from_type(self.online_store[\"type\"])(\n **self.online_store\n )\n elif isinstance(self.online_store, str):\n self.online_store = get_online_config_from_type(self.online_store)()\n\n if isinstance(self.offline_store, Dict):\n self.offline_store = get_offline_config_from_type(\n self.offline_store[\"type\"]\n )(**self.offline_store)\n elif isinstance(self.offline_store, str):\n self.offline_store = get_offline_config_from_type(self.offline_store)()\n\n def get_registry_config(self):\n if isinstance(self.registry, str):\n return RegistryConfig(path=self.registry)\n else:\n return self.registry\n\n @root_validator(pre=True)\n @log_exceptions\n def _validate_online_store_config(cls, values):\n # This method will validate whether the online store configurations are set correctly. This explicit validation\n # is necessary because Pydantic Unions throw very verbose and cryptic exceptions. We also use this method to\n # impute the default online store type based on the selected provider. For the time being this method should be\n # considered tech debt until we can implement https://github.com/samuelcolvin/pydantic/issues/619 or a more\n # granular configuration system\n\n # Set empty online_store config if it isn't set explicitly\n if \"online_store\" not in values:\n values[\"online_store\"] = dict()\n\n # Skip if we aren't creating the configuration from a dict\n if not isinstance(values[\"online_store\"], Dict):\n return values\n\n # Make sure that the provider configuration is set. We need it to set the defaults\n assert \"provider\" in values\n\n # Set the default type\n # This is only direct reference to a provider or online store that we should have\n # for backwards compatibility.\n if \"type\" not in values[\"online_store\"]:\n if values[\"provider\"] == \"local\":\n values[\"online_store\"][\"type\"] = \"sqlite\"\n elif values[\"provider\"] == \"gcp\":\n values[\"online_store\"][\"type\"] = \"datastore\"\n elif values[\"provider\"] == \"aws\":\n values[\"online_store\"][\"type\"] = \"dynamodb\"\n\n online_store_type = values[\"online_store\"][\"type\"]\n\n # Validate the dict to ensure one of the union types match\n try:\n online_config_class = get_online_config_from_type(online_store_type)\n online_config_class(**values[\"online_store\"])\n except ValidationError as e:\n raise ValidationError(\n [ErrorWrapper(e, loc=\"online_store\")], model=RepoConfig,\n )\n\n return values\n\n @root_validator(pre=True)\n def _validate_offline_store_config(cls, values):\n # Set empty offline_store config if it isn't set explicitly\n if \"offline_store\" not in values:\n values[\"offline_store\"] = dict()\n\n # Skip if we aren't creating the configuration from a dict\n if not isinstance(values[\"offline_store\"], Dict):\n return values\n\n # Make sure that the provider configuration is set. We need it to set the defaults\n assert \"provider\" in values\n\n # Set the default type\n if \"type\" not in values[\"offline_store\"]:\n if values[\"provider\"] == \"local\":\n values[\"offline_store\"][\"type\"] = \"file\"\n elif values[\"provider\"] == \"gcp\":\n values[\"offline_store\"][\"type\"] = \"bigquery\"\n elif values[\"provider\"] == \"aws\":\n values[\"offline_store\"][\"type\"] = \"redshift\"\n\n offline_store_type = values[\"offline_store\"][\"type\"]\n\n # Validate the dict to ensure one of the union types match\n try:\n offline_config_class = get_offline_config_from_type(offline_store_type)\n offline_config_class(**values[\"offline_store\"])\n except ValidationError as e:\n raise ValidationError(\n [ErrorWrapper(e, loc=\"offline_store\")], model=RepoConfig,\n )\n\n return values\n\n\nclass FeastConfigError(Exception):\n def __init__(self, error_message, config_path):\n self._error_message = error_message\n self._config_path = config_path\n super().__init__(self._error_message)\n\n def __str__(self) -> str:\n return f\"{self._error_message}\\nat {self._config_path}\"\n\n def __repr__(self) -> str:\n return (\n f\"FeastConfigError({repr(self._error_message)}, {repr(self._config_path)})\"\n )\n\n\ndef get_data_source_class_from_type(data_source_type: str):\n module_name, config_class_name = data_source_type.rsplit(\".\", 1)\n return get_class_from_type(module_name, config_class_name, \"Source\")\n\n\ndef get_online_config_from_type(online_store_type: str):\n if online_store_type in ONLINE_STORE_CLASS_FOR_TYPE:\n online_store_type = ONLINE_STORE_CLASS_FOR_TYPE[online_store_type]\n else:\n assert online_store_type.endswith(\"OnlineStore\")\n module_name, online_store_class_type = online_store_type.rsplit(\".\", 1)\n config_class_name = f\"{online_store_class_type}Config\"\n\n return get_class_from_type(module_name, config_class_name, config_class_name)\n\n\ndef get_offline_config_from_type(offline_store_type: str):\n if offline_store_type in OFFLINE_STORE_CLASS_FOR_TYPE:\n offline_store_type = OFFLINE_STORE_CLASS_FOR_TYPE[offline_store_type]\n else:\n assert offline_store_type.endswith(\"OfflineStore\")\n module_name, offline_store_class_type = offline_store_type.rsplit(\".\", 1)\n config_class_name = f\"{offline_store_class_type}Config\"\n\n return get_class_from_type(module_name, config_class_name, config_class_name)\n\n\ndef load_repo_config(repo_path: Path) -> RepoConfig:\n config_path = repo_path / \"feature_store.yaml\"\n\n with open(config_path) as f:\n raw_config = yaml.safe_load(f)\n try:\n c = RepoConfig(**raw_config)\n c.repo_path = repo_path\n return c\n except ValidationError as e:\n raise FeastConfigError(e, config_path)\n", "path": "sdk/python/feast/repo_config.py"}], "after_files": [{"content": "from pathlib import Path\nfrom typing import Any\n\nimport yaml\nfrom pydantic import (\n BaseModel,\n StrictInt,\n StrictStr,\n ValidationError,\n root_validator,\n validator,\n)\nfrom pydantic.error_wrappers import ErrorWrapper\nfrom pydantic.typing import Dict, Optional, Union\n\nfrom feast.importer import get_class_from_type\nfrom feast.usage import log_exceptions\n\n# These dict exists so that:\n# - existing values for the online store type in featurestore.yaml files continue to work in a backwards compatible way\n# - first party and third party implementations can use the same class loading code path.\nONLINE_STORE_CLASS_FOR_TYPE = {\n \"sqlite\": \"feast.infra.online_stores.sqlite.SqliteOnlineStore\",\n \"datastore\": \"feast.infra.online_stores.datastore.DatastoreOnlineStore\",\n \"redis\": \"feast.infra.online_stores.redis.RedisOnlineStore\",\n \"dynamodb\": \"feast.infra.online_stores.dynamodb.DynamoDBOnlineStore\",\n}\n\nOFFLINE_STORE_CLASS_FOR_TYPE = {\n \"file\": \"feast.infra.offline_stores.file.FileOfflineStore\",\n \"bigquery\": \"feast.infra.offline_stores.bigquery.BigQueryOfflineStore\",\n \"redshift\": \"feast.infra.offline_stores.redshift.RedshiftOfflineStore\",\n}\n\n\nclass FeastBaseModel(BaseModel):\n \"\"\" Feast Pydantic Configuration Class \"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\"\n\n\nclass FeastConfigBaseModel(BaseModel):\n \"\"\" Feast Pydantic Configuration Class \"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"forbid\"\n\n\nclass RegistryConfig(FeastBaseModel):\n \"\"\" Metadata Store Configuration. Configuration that relates to reading from and writing to the Feast registry.\"\"\"\n\n path: StrictStr\n \"\"\" str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI \"\"\"\n\n cache_ttl_seconds: StrictInt = 600\n \"\"\"int: The cache TTL is the amount of time registry state will be cached in memory. If this TTL is exceeded then\n the registry will be refreshed when any feature store method asks for access to registry state. The TTL can be\n set to infinity by setting TTL to 0 seconds, which means the cache will only be loaded once and will never\n expire. Users can manually refresh the cache by calling feature_store.refresh_registry() \"\"\"\n\n\nclass RepoConfig(FeastBaseModel):\n \"\"\" Repo config. Typically loaded from `feature_store.yaml` \"\"\"\n\n registry: Union[StrictStr, RegistryConfig] = \"data/registry.db\"\n \"\"\" str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI \"\"\"\n\n project: StrictStr\n \"\"\" str: Feast project id. This can be any alphanumeric string up to 16 characters.\n You can have multiple independent feature repositories deployed to the same cloud\n provider account, as long as they have different project ids.\n \"\"\"\n\n provider: StrictStr\n \"\"\" str: local or gcp or aws \"\"\"\n\n online_store: Any\n \"\"\" OnlineStoreConfig: Online store configuration (optional depending on provider) \"\"\"\n\n offline_store: Any\n \"\"\" OfflineStoreConfig: Offline store configuration (optional depending on provider) \"\"\"\n\n repo_path: Optional[Path] = None\n\n def __init__(self, **data: Any):\n super().__init__(**data)\n\n if isinstance(self.online_store, Dict):\n self.online_store = get_online_config_from_type(self.online_store[\"type\"])(\n **self.online_store\n )\n elif isinstance(self.online_store, str):\n self.online_store = get_online_config_from_type(self.online_store)()\n\n if isinstance(self.offline_store, Dict):\n self.offline_store = get_offline_config_from_type(\n self.offline_store[\"type\"]\n )(**self.offline_store)\n elif isinstance(self.offline_store, str):\n self.offline_store = get_offline_config_from_type(self.offline_store)()\n\n def get_registry_config(self):\n if isinstance(self.registry, str):\n return RegistryConfig(path=self.registry)\n else:\n return self.registry\n\n @root_validator(pre=True)\n @log_exceptions\n def _validate_online_store_config(cls, values):\n # This method will validate whether the online store configurations are set correctly. This explicit validation\n # is necessary because Pydantic Unions throw very verbose and cryptic exceptions. We also use this method to\n # impute the default online store type based on the selected provider. For the time being this method should be\n # considered tech debt until we can implement https://github.com/samuelcolvin/pydantic/issues/619 or a more\n # granular configuration system\n\n # Set empty online_store config if it isn't set explicitly\n if \"online_store\" not in values:\n values[\"online_store\"] = dict()\n\n # Skip if we aren't creating the configuration from a dict\n if not isinstance(values[\"online_store\"], Dict):\n return values\n\n # Make sure that the provider configuration is set. We need it to set the defaults\n assert \"provider\" in values\n\n # Set the default type\n # This is only direct reference to a provider or online store that we should have\n # for backwards compatibility.\n if \"type\" not in values[\"online_store\"]:\n if values[\"provider\"] == \"local\":\n values[\"online_store\"][\"type\"] = \"sqlite\"\n elif values[\"provider\"] == \"gcp\":\n values[\"online_store\"][\"type\"] = \"datastore\"\n elif values[\"provider\"] == \"aws\":\n values[\"online_store\"][\"type\"] = \"dynamodb\"\n\n online_store_type = values[\"online_store\"][\"type\"]\n\n # Validate the dict to ensure one of the union types match\n try:\n online_config_class = get_online_config_from_type(online_store_type)\n online_config_class(**values[\"online_store\"])\n except ValidationError as e:\n raise ValidationError(\n [ErrorWrapper(e, loc=\"online_store\")], model=RepoConfig,\n )\n\n return values\n\n @root_validator(pre=True)\n def _validate_offline_store_config(cls, values):\n # Set empty offline_store config if it isn't set explicitly\n if \"offline_store\" not in values:\n values[\"offline_store\"] = dict()\n\n # Skip if we aren't creating the configuration from a dict\n if not isinstance(values[\"offline_store\"], Dict):\n return values\n\n # Make sure that the provider configuration is set. We need it to set the defaults\n assert \"provider\" in values\n\n # Set the default type\n if \"type\" not in values[\"offline_store\"]:\n if values[\"provider\"] == \"local\":\n values[\"offline_store\"][\"type\"] = \"file\"\n elif values[\"provider\"] == \"gcp\":\n values[\"offline_store\"][\"type\"] = \"bigquery\"\n elif values[\"provider\"] == \"aws\":\n values[\"offline_store\"][\"type\"] = \"redshift\"\n\n offline_store_type = values[\"offline_store\"][\"type\"]\n\n # Validate the dict to ensure one of the union types match\n try:\n offline_config_class = get_offline_config_from_type(offline_store_type)\n offline_config_class(**values[\"offline_store\"])\n except ValidationError as e:\n raise ValidationError(\n [ErrorWrapper(e, loc=\"offline_store\")], model=RepoConfig,\n )\n\n return values\n\n @validator(\"project\")\n def _validate_project_name(cls, v):\n from feast.repo_operations import is_valid_name\n\n if not is_valid_name(v):\n raise ValueError(\n f\"Project name, {v}, should only have \"\n f\"alphanumerical values and underscores but not start with an underscore.\"\n )\n return v\n\n\nclass FeastConfigError(Exception):\n def __init__(self, error_message, config_path):\n self._error_message = error_message\n self._config_path = config_path\n super().__init__(self._error_message)\n\n def __str__(self) -> str:\n return f\"{self._error_message}\\nat {self._config_path}\"\n\n def __repr__(self) -> str:\n return (\n f\"FeastConfigError({repr(self._error_message)}, {repr(self._config_path)})\"\n )\n\n\ndef get_data_source_class_from_type(data_source_type: str):\n module_name, config_class_name = data_source_type.rsplit(\".\", 1)\n return get_class_from_type(module_name, config_class_name, \"Source\")\n\n\ndef get_online_config_from_type(online_store_type: str):\n if online_store_type in ONLINE_STORE_CLASS_FOR_TYPE:\n online_store_type = ONLINE_STORE_CLASS_FOR_TYPE[online_store_type]\n else:\n assert online_store_type.endswith(\"OnlineStore\")\n module_name, online_store_class_type = online_store_type.rsplit(\".\", 1)\n config_class_name = f\"{online_store_class_type}Config\"\n\n return get_class_from_type(module_name, config_class_name, config_class_name)\n\n\ndef get_offline_config_from_type(offline_store_type: str):\n if offline_store_type in OFFLINE_STORE_CLASS_FOR_TYPE:\n offline_store_type = OFFLINE_STORE_CLASS_FOR_TYPE[offline_store_type]\n else:\n assert offline_store_type.endswith(\"OfflineStore\")\n module_name, offline_store_class_type = offline_store_type.rsplit(\".\", 1)\n config_class_name = f\"{offline_store_class_type}Config\"\n\n return get_class_from_type(module_name, config_class_name, config_class_name)\n\n\ndef load_repo_config(repo_path: Path) -> RepoConfig:\n config_path = repo_path / \"feature_store.yaml\"\n\n with open(config_path) as f:\n raw_config = yaml.safe_load(f)\n try:\n c = RepoConfig(**raw_config)\n c.repo_path = repo_path\n return c\n except ValidationError as e:\n raise FeastConfigError(e, config_path)\n", "path": "sdk/python/feast/repo_config.py"}]}
| 3,227 | 270 |
gh_patches_debug_20926
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-323
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When server is launched, browser window should open automagically
When guest lecturing at GMU, I observed that users did not read the instructions to visit 127.0.0.1... in their browser. I think jupyter spoiled them ;-). We should add functionality to have the browser auto launch when someone kicks off a model.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesa/visualization/ModularVisualization.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 ModularServer
4 =============
5
6 A visualization server which renders a model via one or more elements.
7
8 The concept for the modular visualization server as follows:
9 A visualization is composed of VisualizationElements, each of which defines how
10 to generate some visualization from a model instance and render it on the
11 client. VisualizationElements may be anything from a simple text display to
12 a multilayered HTML5 canvas.
13
14 The actual server is launched with one or more VisualizationElements;
15 it runs the model object through each of them, generating data to be sent to
16 the client. The client page is also generated based on the JavaScript code
17 provided by each element.
18
19 This file consists of the following classes:
20
21 VisualizationElement: Parent class for all other visualization elements, with
22 the minimal necessary options.
23 PageHandler: The handler for the visualization page, generated from a template
24 and built from the various visualization elements.
25 SocketHandler: Handles the websocket connection between the client page and
26 the server.
27 ModularServer: The overall visualization application class which stores and
28 controls the model and visualization instance.
29
30
31 ModularServer should *not* need to be subclassed on a model-by-model basis; it
32 should be primarily a pass-through for VisualizationElement subclasses, which
33 define the actual visualization specifics.
34
35 For example, suppose we have created two visualization elements for our model,
36 called canvasvis and graphvis; we would launch a server with:
37
38 server = ModularServer(MyModel, [canvasvis, graphvis], name="My Model")
39 server.launch()
40
41 The client keeps track of what step it is showing. Clicking the Step button in
42 the browser sends a message requesting the viz_state corresponding to the next
43 step position, which is then sent back to the client via the websocket.
44
45 The websocket protocol is as follows:
46 Each message is a JSON object, with a "type" property which defines the rest of
47 the structure.
48
49 Server -> Client:
50 Send over the model state to visualize.
51 Model state is a list, with each element corresponding to a div; each div
52 is expected to have a render function associated with it, which knows how
53 to render that particular data. The example below includes two elements:
54 the first is data for a CanvasGrid, the second for a raw text display.
55
56 {
57 "type": "viz_state",
58 "data": [{0:[ {"Shape": "circle", "x": 0, "y": 0, "r": 0.5,
59 "Color": "#AAAAAA", "Filled": "true", "Layer": 0,
60 "text": 'A', "text_color": "white" }]},
61 "Shape Count: 1"]
62 }
63
64 Informs the client that the model is over.
65 {"type": "end"}
66
67 Client -> Server:
68 Reset the model.
69 TODO: Allow this to come with parameters
70 {
71 "type": "reset"
72 }
73
74 Get a given state.
75 {
76 "type": "get_step",
77 "step:" index of the step to get.
78 }
79
80 """
81 import os
82
83 import tornado.ioloop
84 import tornado.template
85 import tornado.web
86 import tornado.websocket
87 import tornado.escape
88 import tornado.gen
89
90 # Suppress several pylint warnings for this file.
91 # Attributes being defined outside of init is a Tornado feature.
92 # pylint: disable=attribute-defined-outside-init
93
94
95 class VisualizationElement:
96 """
97 Defines an element of the visualization.
98
99 Attributes:
100 package_includes: A list of external JavaScript files to include that
101 are part of the Mesa packages.
102 local_includes: A list of JavaScript files that are local to the
103 directory that the server is being run in.
104 js_code: A JavaScript code string to instantiate the element.
105
106 Methods:
107 render: Takes a model object, and produces JSON data which can be sent
108 to the client.
109
110 """
111
112 package_includes = []
113 local_includes = []
114 js_code = ''
115 render_args = {}
116
117 def __init__(self):
118 pass
119
120 def render(self, model):
121 """ Build visualization data from a model object.
122
123 Args:
124 model: A model object
125
126 Returns:
127 A JSON-ready object.
128
129 """
130 return "<b>VisualizationElement goes here</b>."
131
132 # =============================================================================
133 # Actual Tornado code starts here:
134
135
136 class PageHandler(tornado.web.RequestHandler):
137 """ Handler for the HTML template which holds the visualization. """
138
139 def get(self):
140 elements = self.application.visualization_elements
141 for i, element in enumerate(elements):
142 element.index = i
143 self.render("modular_template.html", port=self.application.port,
144 model_name=self.application.model_name,
145 package_includes=self.application.package_includes,
146 local_includes=self.application.local_includes,
147 scripts=self.application.js_code)
148
149
150 class SocketHandler(tornado.websocket.WebSocketHandler):
151 """ Handler for websocket. """
152 def open(self):
153 if self.application.verbose:
154 print("Socket opened!")
155
156 def check_origin(self, origin):
157 return True
158
159 def on_message(self, message):
160 """ Receiving a message from the websocket, parse, and act accordingly.
161
162 """
163 if self.application.verbose:
164 print(message)
165 msg = tornado.escape.json_decode(message)
166
167 if msg["type"] == "get_step":
168 self.application.model.step()
169 self.write_message({"type": "viz_state",
170 "data": self.application.render_model()})
171
172 elif msg["type"] == "reset":
173 self.application.reset_model()
174 self.write_message({"type": "viz_state",
175 "data": self.application.render_model()})
176
177 else:
178 if self.application.verbose:
179 print("Unexpected message!")
180
181
182 class ModularServer(tornado.web.Application):
183 """ Main visualization application. """
184 verbose = True
185
186 model_name = "Mesa Model"
187 model_cls = None # A model class
188 portrayal_method = None
189 port = 8888 # Default port to listen on
190 canvas_width = 500
191 canvas_height = 500
192 grid_height = 0
193 grid_width = 0
194
195 max_steps = 100000
196
197 model_args = ()
198 model_kwargs = {}
199
200 # Handlers and other globals:
201 page_handler = (r'/', PageHandler)
202 socket_handler = (r'/ws', SocketHandler)
203 static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,
204 {"path": os.path.dirname(__file__) + "/templates"})
205 local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,
206 {"path": ''})
207
208 handlers = [page_handler, socket_handler, static_handler, local_handler]
209
210 settings = {"debug": True,
211 "template_path": os.path.dirname(__file__) + "/templates"}
212
213 def __init__(self, model_cls, visualization_elements, name="Mesa Model",
214 *args, **kwargs):
215 """ Create a new visualization server with the given elements. """
216 # Prep visualization elements:
217 self.visualization_elements = visualization_elements
218 self.package_includes = set()
219 self.local_includes = set()
220 self.js_code = []
221 for element in self.visualization_elements:
222 for include_file in element.package_includes:
223 self.package_includes.add(include_file)
224 for include_file in element.local_includes:
225 self.local_includes.add(include_file)
226 self.js_code.append(element.js_code)
227
228 # Initializing the model
229 self.model_name = name
230 self.model_cls = model_cls
231
232 self.model_args = args
233 self.model_kwargs = kwargs
234 self.reset_model()
235
236 # Initializing the application itself:
237 super().__init__(self.handlers, **self.settings)
238
239 def reset_model(self):
240 """ Reinstantiate the model object, using the current parameters. """
241 self.model = self.model_cls(*self.model_args, **self.model_kwargs)
242
243 def render_model(self):
244 """ Turn the current state of the model into a dictionary of
245 visualizations
246
247 """
248 visualization_state = []
249 for element in self.visualization_elements:
250 element_state = element.render(self.model)
251 visualization_state.append(element_state)
252 return visualization_state
253
254 def launch(self, port=None):
255 """ Run the app. """
256 if port is not None:
257 self.port = port
258 print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port))
259 self.listen(self.port)
260 tornado.ioloop.IOLoop.instance().start()
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py
--- a/mesa/visualization/ModularVisualization.py
+++ b/mesa/visualization/ModularVisualization.py
@@ -81,12 +81,13 @@
import os
import tornado.ioloop
-import tornado.template
import tornado.web
import tornado.websocket
import tornado.escape
import tornado.gen
+import webbrowser
+
# Suppress several pylint warnings for this file.
# Attributes being defined outside of init is a Tornado feature.
# pylint: disable=attribute-defined-outside-init
@@ -255,6 +256,8 @@
""" Run the app. """
if port is not None:
self.port = port
- print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port))
+ url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)
+ print('Interface starting at {url}'.format(url=url))
self.listen(self.port)
+ webbrowser.open(url)
tornado.ioloop.IOLoop.instance().start()
|
{"golden_diff": "diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py\n--- a/mesa/visualization/ModularVisualization.py\n+++ b/mesa/visualization/ModularVisualization.py\n@@ -81,12 +81,13 @@\n import os\n \n import tornado.ioloop\n-import tornado.template\n import tornado.web\n import tornado.websocket\n import tornado.escape\n import tornado.gen\n \n+import webbrowser\n+\n # Suppress several pylint warnings for this file.\n # Attributes being defined outside of init is a Tornado feature.\n # pylint: disable=attribute-defined-outside-init\n@@ -255,6 +256,8 @@\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n- print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port))\n+ url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)\n+ print('Interface starting at {url}'.format(url=url))\n self.listen(self.port)\n+ webbrowser.open(url)\n tornado.ioloop.IOLoop.instance().start()\n", "issue": "When server is launched, browser window should open automagically \nWhen guest lecturing at GMU, I observed that users did not read the instructions to visit 127.0.0.1... in their browser. I think jupyter spoiled them ;-). We should add functionality to have the browser auto launch when someone kicks off a model.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n\"\"\"\nimport os\n\nimport tornado.ioloop\nimport tornado.template\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript files to include that\n are part of the Mesa packages.\n local_includes: A list of JavaScript files that are local to the\n directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = ''\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\" Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\" Handler for the HTML template which holds the visualization. \"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\"modular_template.html\", port=self.application.port,\n model_name=self.application.model_name,\n package_includes=self.application.package_includes,\n local_includes=self.application.local_includes,\n scripts=self.application.js_code)\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\" Handler for websocket. \"\"\"\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n\n def check_origin(self, origin):\n return True\n\n def on_message(self, message):\n \"\"\" Receiving a message from the websocket, parse, and act accordingly.\n\n \"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n self.application.model.step()\n self.write_message({\"type\": \"viz_state\",\n \"data\": self.application.render_model()})\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message({\"type\": \"viz_state\",\n \"data\": self.application.render_model()})\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\" Main visualization application. \"\"\"\n verbose = True\n\n model_name = \"Mesa Model\"\n model_cls = None # A model class\n portrayal_method = None\n port = 8888 # Default port to listen on\n canvas_width = 500\n canvas_height = 500\n grid_height = 0\n grid_width = 0\n\n max_steps = 100000\n\n model_args = ()\n model_kwargs = {}\n\n # Handlers and other globals:\n page_handler = (r'/', PageHandler)\n socket_handler = (r'/ws', SocketHandler)\n static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"})\n local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,\n {\"path\": ''})\n\n handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n settings = {\"debug\": True,\n \"template_path\": os.path.dirname(__file__) + \"/templates\"}\n\n def __init__(self, model_cls, visualization_elements, name=\"Mesa Model\",\n *args, **kwargs):\n \"\"\" Create a new visualization server with the given elements. \"\"\"\n # Prep visualization elements:\n self.visualization_elements = visualization_elements\n self.package_includes = set()\n self.local_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n self.package_includes.add(include_file)\n for include_file in element.local_includes:\n self.local_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n\n self.model_args = args\n self.model_kwargs = kwargs\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n def reset_model(self):\n \"\"\" Reinstantiate the model object, using the current parameters. \"\"\"\n self.model = self.model_cls(*self.model_args, **self.model_kwargs)\n\n def render_model(self):\n \"\"\" Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None):\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port))\n self.listen(self.port)\n tornado.ioloop.IOLoop.instance().start()\n", "path": "mesa/visualization/ModularVisualization.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n\"\"\"\nimport os\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\n\nimport webbrowser\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript files to include that\n are part of the Mesa packages.\n local_includes: A list of JavaScript files that are local to the\n directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = ''\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\" Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\" Handler for the HTML template which holds the visualization. \"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\"modular_template.html\", port=self.application.port,\n model_name=self.application.model_name,\n package_includes=self.application.package_includes,\n local_includes=self.application.local_includes,\n scripts=self.application.js_code)\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\" Handler for websocket. \"\"\"\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n\n def check_origin(self, origin):\n return True\n\n def on_message(self, message):\n \"\"\" Receiving a message from the websocket, parse, and act accordingly.\n\n \"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n self.application.model.step()\n self.write_message({\"type\": \"viz_state\",\n \"data\": self.application.render_model()})\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message({\"type\": \"viz_state\",\n \"data\": self.application.render_model()})\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\" Main visualization application. \"\"\"\n verbose = True\n\n model_name = \"Mesa Model\"\n model_cls = None # A model class\n portrayal_method = None\n port = 8888 # Default port to listen on\n canvas_width = 500\n canvas_height = 500\n grid_height = 0\n grid_width = 0\n\n max_steps = 100000\n\n model_args = ()\n model_kwargs = {}\n\n # Handlers and other globals:\n page_handler = (r'/', PageHandler)\n socket_handler = (r'/ws', SocketHandler)\n static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"})\n local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,\n {\"path\": ''})\n\n handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n settings = {\"debug\": True,\n \"template_path\": os.path.dirname(__file__) + \"/templates\"}\n\n def __init__(self, model_cls, visualization_elements, name=\"Mesa Model\",\n *args, **kwargs):\n \"\"\" Create a new visualization server with the given elements. \"\"\"\n # Prep visualization elements:\n self.visualization_elements = visualization_elements\n self.package_includes = set()\n self.local_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n self.package_includes.add(include_file)\n for include_file in element.local_includes:\n self.local_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n\n self.model_args = args\n self.model_kwargs = kwargs\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n def reset_model(self):\n \"\"\" Reinstantiate the model object, using the current parameters. \"\"\"\n self.model = self.model_cls(*self.model_args, **self.model_kwargs)\n\n def render_model(self):\n \"\"\" Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None):\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)\n print('Interface starting at {url}'.format(url=url))\n self.listen(self.port)\n webbrowser.open(url)\n tornado.ioloop.IOLoop.instance().start()\n", "path": "mesa/visualization/ModularVisualization.py"}]}
| 2,841 | 253 |
gh_patches_debug_38158
|
rasdani/github-patches
|
git_diff
|
Flexget__Flexget-171
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Input plugin "imdb_list" currently failing to fetch lists behind authentication
Message: `There was an error during imdb_list input (Unable to get imdb list: 404 Client Error: Not Found), using cache instead."`
Same issue as, http://flexget.com/ticket/2313 but even with the most recent fix applied it still fails.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/input/imdb_list.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 import logging
3 import csv
4 import re
5 from cgi import parse_header
6
7 from flexget import plugin
8 from flexget.event import event
9 from flexget.utils import requests
10 from flexget.utils.imdb import make_url
11 from flexget.utils.cached_input import cached
12 from flexget.utils.tools import decode_html
13 from flexget.entry import Entry
14 from flexget.utils.soup import get_soup
15
16 log = logging.getLogger('imdb_list')
17
18 USER_ID_RE = r'^ur\d{7,8}$'
19
20
21 class ImdbList(object):
22 """"Creates an entry for each movie in your imdb list."""
23
24 schema = {
25 'type': 'object',
26 'properties': {
27 'user_id': {
28 'type': 'string',
29 'pattern': USER_ID_RE,
30 'error_pattern': 'user_id must be in the form urXXXXXXX'
31 },
32 'username': {'type': 'string'},
33 'password': {'type': 'string'},
34 'list': {'type': 'string'}
35 },
36 'required': ['list'],
37 'additionalProperties': False
38 }
39
40 @cached('imdb_list', persist='2 hours')
41 def on_task_input(self, task, config):
42 sess = requests.Session()
43 if config.get('username') and config.get('password'):
44
45 log.verbose('Logging in ...')
46
47 # Log in to imdb with our handler
48 params = {'login': config['username'], 'password': config['password']}
49 try:
50 # First get the login page so we can get the hidden input value
51 soup = get_soup(sess.get('https://secure.imdb.com/register-imdb/login').content)
52
53 # Fix for bs4 bug. see #2313 and github#118
54 auxsoup = soup.find('div', id='nb20').next_sibling.next_sibling
55 tag = auxsoup.find('input', attrs={'name': '49e6c'})
56 if tag:
57 params['49e6c'] = tag['value']
58 else:
59 log.warning('Unable to find required info for imdb login, maybe their login method has changed.')
60 # Now we do the actual login with appropriate parameters
61 r = sess.post('https://secure.imdb.com/register-imdb/login', data=params, raise_status=False)
62 except requests.RequestException as e:
63 raise plugin.PluginError('Unable to login to imdb: %s' % e.message)
64
65 # IMDb redirects us upon a successful login.
66 # removed - doesn't happen always?
67 # if r.status_code != 302:
68 # log.warning('It appears logging in to IMDb was unsuccessful.')
69
70 # try to automatically figure out user_id from watchlist redirect url
71 if not 'user_id' in config:
72 log.verbose('Getting user_id ...')
73 try:
74 response = sess.get('http://www.imdb.com/list/watchlist')
75 except requests.RequestException as e:
76 log.error('Error retrieving user ID from imdb: %s' % e.message)
77 user_id = ''
78 else:
79 log.debug('redirected to %s' % response.url)
80 user_id = response.url.split('/')[-2]
81 if re.match(USER_ID_RE, user_id):
82 config['user_id'] = user_id
83 else:
84 raise plugin.PluginError('Couldn\'t figure out user_id, please configure it manually.')
85
86 if not 'user_id' in config:
87 raise plugin.PluginError('Configuration option `user_id` required.')
88
89 log.verbose('Retrieving list %s ...' % config['list'])
90
91 # Get the imdb list in csv format
92 try:
93 url = 'http://www.imdb.com/list/export'
94 params = {'list_id': config['list'], 'author_id': config['user_id']}
95 log.debug('Requesting %s' % url)
96 opener = sess.get(url, params=params)
97 mime_type = parse_header(opener.headers['content-type'])[0]
98 log.debug('mime_type: %s' % mime_type)
99 if mime_type != 'text/csv':
100 raise plugin.PluginError('Didn\'t get CSV export as response. Probably specified list `%s` '
101 'does not exist.' % config['list'])
102 csv_rows = csv.reader(opener.iter_lines())
103 except requests.RequestException as e:
104 raise plugin.PluginError('Unable to get imdb list: %s' % e.message)
105
106 # Create an Entry for each movie in the list
107 entries = []
108 for row in csv_rows:
109 if not row or row[0] == 'position':
110 # Don't use blank rows or the headings row
111 continue
112 try:
113 title = decode_html(row[5]).decode('utf-8')
114 entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title))
115 except IndexError:
116 log.critical('IndexError! Unable to handle row: %s' % row)
117 return entries
118
119
120 @event('plugin.register')
121 def register_plugin():
122 plugin.register(ImdbList, 'imdb_list', api_ver=2)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/flexget/plugins/input/imdb_list.py b/flexget/plugins/input/imdb_list.py
--- a/flexget/plugins/input/imdb_list.py
+++ b/flexget/plugins/input/imdb_list.py
@@ -1,13 +1,13 @@
from __future__ import unicode_literals, division, absolute_import
import logging
-import csv
+import feedparser
import re
from cgi import parse_header
from flexget import plugin
from flexget.event import event
from flexget.utils import requests
-from flexget.utils.imdb import make_url
+from flexget.utils.imdb import make_url, extract_id
from flexget.utils.cached_input import cached
from flexget.utils.tools import decode_html
from flexget.entry import Entry
@@ -88,32 +88,27 @@
log.verbose('Retrieving list %s ...' % config['list'])
- # Get the imdb list in csv format
+ # Get the imdb list in RSS format
try:
- url = 'http://www.imdb.com/list/export'
- params = {'list_id': config['list'], 'author_id': config['user_id']}
+ if config['list'] in ['watchlist', 'ratings', 'checkins']:
+ url = 'http://rss.imdb.com/user/%s/%s' % (config['user_id'], config['list'])
+ else:
+ url = 'http://rss.imdb.com/list/%s' % config['list']
log.debug('Requesting %s' % url)
- opener = sess.get(url, params=params)
- mime_type = parse_header(opener.headers['content-type'])[0]
- log.debug('mime_type: %s' % mime_type)
- if mime_type != 'text/csv':
- raise plugin.PluginError('Didn\'t get CSV export as response. Probably specified list `%s` '
- 'does not exist.' % config['list'])
- csv_rows = csv.reader(opener.iter_lines())
+ try:
+ rss = feedparser.parse(url)
+ except LookupError as e:
+ raise plugin.PluginError('Failed to parse RSS feed for list `%s` correctly: %s' % (config['list'], e))
except requests.RequestException as e:
raise plugin.PluginError('Unable to get imdb list: %s' % e.message)
# Create an Entry for each movie in the list
entries = []
- for row in csv_rows:
- if not row or row[0] == 'position':
- # Don't use blank rows or the headings row
- continue
+ for entry in rss.entries:
try:
- title = decode_html(row[5]).decode('utf-8')
- entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title))
+ entries.append(Entry(title=entry.title, url=entry.link, imdb_id=extract_id(entry.link), imdb_name=entry.title))
except IndexError:
- log.critical('IndexError! Unable to handle row: %s' % row)
+ log.critical('IndexError! Unable to handle RSS entry: %s' % entry)
return entries
|
{"golden_diff": "diff --git a/flexget/plugins/input/imdb_list.py b/flexget/plugins/input/imdb_list.py\n--- a/flexget/plugins/input/imdb_list.py\n+++ b/flexget/plugins/input/imdb_list.py\n@@ -1,13 +1,13 @@\n from __future__ import unicode_literals, division, absolute_import\n import logging\n-import csv\n+import feedparser\n import re\n from cgi import parse_header\n \n from flexget import plugin\n from flexget.event import event\n from flexget.utils import requests\n-from flexget.utils.imdb import make_url\n+from flexget.utils.imdb import make_url, extract_id\n from flexget.utils.cached_input import cached\n from flexget.utils.tools import decode_html\n from flexget.entry import Entry\n@@ -88,32 +88,27 @@\n \n log.verbose('Retrieving list %s ...' % config['list'])\n \n- # Get the imdb list in csv format\n+ # Get the imdb list in RSS format\n try:\n- url = 'http://www.imdb.com/list/export'\n- params = {'list_id': config['list'], 'author_id': config['user_id']}\n+ if config['list'] in ['watchlist', 'ratings', 'checkins']:\n+ url = 'http://rss.imdb.com/user/%s/%s' % (config['user_id'], config['list'])\n+ else:\n+ url = 'http://rss.imdb.com/list/%s' % config['list']\n log.debug('Requesting %s' % url)\n- opener = sess.get(url, params=params)\n- mime_type = parse_header(opener.headers['content-type'])[0]\n- log.debug('mime_type: %s' % mime_type)\n- if mime_type != 'text/csv':\n- raise plugin.PluginError('Didn\\'t get CSV export as response. Probably specified list `%s` '\n- 'does not exist.' % config['list'])\n- csv_rows = csv.reader(opener.iter_lines())\n+ try:\n+ rss = feedparser.parse(url)\n+ except LookupError as e:\n+ raise plugin.PluginError('Failed to parse RSS feed for list `%s` correctly: %s' % (config['list'], e))\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to get imdb list: %s' % e.message)\n \n # Create an Entry for each movie in the list\n entries = []\n- for row in csv_rows:\n- if not row or row[0] == 'position':\n- # Don't use blank rows or the headings row\n- continue\n+ for entry in rss.entries:\n try:\n- title = decode_html(row[5]).decode('utf-8')\n- entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title))\n+ entries.append(Entry(title=entry.title, url=entry.link, imdb_id=extract_id(entry.link), imdb_name=entry.title))\n except IndexError:\n- log.critical('IndexError! Unable to handle row: %s' % row)\n+ log.critical('IndexError! Unable to handle RSS entry: %s' % entry)\n return entries\n", "issue": "Input plugin \"imdb_list\" currently failing to fetch lists behind authentication\nMessage: `There was an error during imdb_list input (Unable to get imdb list: 404 Client Error: Not Found), using cache instead.\"`\n\nSame issue as, http://flexget.com/ticket/2313 but even with the most recent fix applied it still fails.\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nimport csv\nimport re\nfrom cgi import parse_header\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils import requests\nfrom flexget.utils.imdb import make_url\nfrom flexget.utils.cached_input import cached\nfrom flexget.utils.tools import decode_html\nfrom flexget.entry import Entry\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('imdb_list')\n\nUSER_ID_RE = r'^ur\\d{7,8}$'\n\n\nclass ImdbList(object):\n \"\"\"\"Creates an entry for each movie in your imdb list.\"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'user_id': {\n 'type': 'string',\n 'pattern': USER_ID_RE,\n 'error_pattern': 'user_id must be in the form urXXXXXXX'\n },\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'list': {'type': 'string'}\n },\n 'required': ['list'],\n 'additionalProperties': False\n }\n\n @cached('imdb_list', persist='2 hours')\n def on_task_input(self, task, config):\n sess = requests.Session()\n if config.get('username') and config.get('password'):\n\n log.verbose('Logging in ...')\n\n # Log in to imdb with our handler\n params = {'login': config['username'], 'password': config['password']}\n try:\n # First get the login page so we can get the hidden input value\n soup = get_soup(sess.get('https://secure.imdb.com/register-imdb/login').content)\n\n # Fix for bs4 bug. see #2313 and github#118\n auxsoup = soup.find('div', id='nb20').next_sibling.next_sibling\n tag = auxsoup.find('input', attrs={'name': '49e6c'})\n if tag:\n params['49e6c'] = tag['value']\n else:\n log.warning('Unable to find required info for imdb login, maybe their login method has changed.')\n # Now we do the actual login with appropriate parameters\n r = sess.post('https://secure.imdb.com/register-imdb/login', data=params, raise_status=False)\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to login to imdb: %s' % e.message)\n\n # IMDb redirects us upon a successful login.\n # removed - doesn't happen always?\n # if r.status_code != 302:\n # log.warning('It appears logging in to IMDb was unsuccessful.')\n\n # try to automatically figure out user_id from watchlist redirect url\n if not 'user_id' in config:\n log.verbose('Getting user_id ...')\n try:\n response = sess.get('http://www.imdb.com/list/watchlist')\n except requests.RequestException as e:\n log.error('Error retrieving user ID from imdb: %s' % e.message)\n user_id = ''\n else:\n log.debug('redirected to %s' % response.url)\n user_id = response.url.split('/')[-2]\n if re.match(USER_ID_RE, user_id):\n config['user_id'] = user_id\n else:\n raise plugin.PluginError('Couldn\\'t figure out user_id, please configure it manually.')\n\n if not 'user_id' in config:\n raise plugin.PluginError('Configuration option `user_id` required.')\n\n log.verbose('Retrieving list %s ...' % config['list'])\n\n # Get the imdb list in csv format\n try:\n url = 'http://www.imdb.com/list/export'\n params = {'list_id': config['list'], 'author_id': config['user_id']}\n log.debug('Requesting %s' % url)\n opener = sess.get(url, params=params)\n mime_type = parse_header(opener.headers['content-type'])[0]\n log.debug('mime_type: %s' % mime_type)\n if mime_type != 'text/csv':\n raise plugin.PluginError('Didn\\'t get CSV export as response. Probably specified list `%s` '\n 'does not exist.' % config['list'])\n csv_rows = csv.reader(opener.iter_lines())\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to get imdb list: %s' % e.message)\n\n # Create an Entry for each movie in the list\n entries = []\n for row in csv_rows:\n if not row or row[0] == 'position':\n # Don't use blank rows or the headings row\n continue\n try:\n title = decode_html(row[5]).decode('utf-8')\n entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title))\n except IndexError:\n log.critical('IndexError! Unable to handle row: %s' % row)\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(ImdbList, 'imdb_list', api_ver=2)\n", "path": "flexget/plugins/input/imdb_list.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nimport feedparser\nimport re\nfrom cgi import parse_header\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils import requests\nfrom flexget.utils.imdb import make_url, extract_id\nfrom flexget.utils.cached_input import cached\nfrom flexget.utils.tools import decode_html\nfrom flexget.entry import Entry\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('imdb_list')\n\nUSER_ID_RE = r'^ur\\d{7,8}$'\n\n\nclass ImdbList(object):\n \"\"\"\"Creates an entry for each movie in your imdb list.\"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'user_id': {\n 'type': 'string',\n 'pattern': USER_ID_RE,\n 'error_pattern': 'user_id must be in the form urXXXXXXX'\n },\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'list': {'type': 'string'}\n },\n 'required': ['list'],\n 'additionalProperties': False\n }\n\n @cached('imdb_list', persist='2 hours')\n def on_task_input(self, task, config):\n sess = requests.Session()\n if config.get('username') and config.get('password'):\n\n log.verbose('Logging in ...')\n\n # Log in to imdb with our handler\n params = {'login': config['username'], 'password': config['password']}\n try:\n # First get the login page so we can get the hidden input value\n soup = get_soup(sess.get('https://secure.imdb.com/register-imdb/login').content)\n\n # Fix for bs4 bug. see #2313 and github#118\n auxsoup = soup.find('div', id='nb20').next_sibling.next_sibling\n tag = auxsoup.find('input', attrs={'name': '49e6c'})\n if tag:\n params['49e6c'] = tag['value']\n else:\n log.warning('Unable to find required info for imdb login, maybe their login method has changed.')\n # Now we do the actual login with appropriate parameters\n r = sess.post('https://secure.imdb.com/register-imdb/login', data=params, raise_status=False)\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to login to imdb: %s' % e.message)\n\n # IMDb redirects us upon a successful login.\n # removed - doesn't happen always?\n # if r.status_code != 302:\n # log.warning('It appears logging in to IMDb was unsuccessful.')\n\n # try to automatically figure out user_id from watchlist redirect url\n if not 'user_id' in config:\n log.verbose('Getting user_id ...')\n try:\n response = sess.get('http://www.imdb.com/list/watchlist')\n except requests.RequestException as e:\n log.error('Error retrieving user ID from imdb: %s' % e.message)\n user_id = ''\n else:\n log.debug('redirected to %s' % response.url)\n user_id = response.url.split('/')[-2]\n if re.match(USER_ID_RE, user_id):\n config['user_id'] = user_id\n else:\n raise plugin.PluginError('Couldn\\'t figure out user_id, please configure it manually.')\n\n if not 'user_id' in config:\n raise plugin.PluginError('Configuration option `user_id` required.')\n\n log.verbose('Retrieving list %s ...' % config['list'])\n\n # Get the imdb list in RSS format\n try:\n if config['list'] in ['watchlist', 'ratings', 'checkins']:\n url = 'http://rss.imdb.com/user/%s/%s' % (config['user_id'], config['list'])\n else:\n url = 'http://rss.imdb.com/list/%s' % config['list']\n log.debug('Requesting %s' % url)\n try:\n rss = feedparser.parse(url)\n except LookupError as e:\n raise plugin.PluginError('Failed to parse RSS feed for list `%s` correctly: %s' % (config['list'], e))\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to get imdb list: %s' % e.message)\n\n # Create an Entry for each movie in the list\n entries = []\n for entry in rss.entries:\n try:\n entries.append(Entry(title=entry.title, url=entry.link, imdb_id=extract_id(entry.link), imdb_name=entry.title))\n except IndexError:\n log.critical('IndexError! Unable to handle RSS entry: %s' % entry)\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(ImdbList, 'imdb_list', api_ver=2)\n", "path": "flexget/plugins/input/imdb_list.py"}]}
| 1,708 | 704 |
gh_patches_debug_18491
|
rasdani/github-patches
|
git_diff
|
pretalx__pretalx-464
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Setting speaker availibilty to "all day" for the last day of the conference triggers an error message of "Submitted availability is not within the event timeframe."
## Expected Behavior
Setting the availibilty to "All Day" should be possible for all days of the conference, even if it ends at some point during this day.
## Current Behavior
Setting the availibility to "All Day" on all days of the conference (in our case monday to sunday) triggers an error message of "Submitted availability is not within the event timeframe."
## Steps to Reproduce
1. Set the conference date to start at 2018-10-22 and end at 2018-10-28
2. Edit a speaker
3. Set the availability to all day for all conference days
4. On the bottom of the page, you the the error message "Submitted availability is not within the event timeframe."
if needed, we can provide a speaker account on the instance for testing.
## Your Environment
* Version used: 0.7.1
* Environment name and version (e.g. Chrome 39, python 3.5): Firefox 61
* Operating System and version (desktop or mobile): Linux Desktop (Debian Unstable)
* Link to your instance, if in production: https://conference.c3w.at
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretalx/schedule/forms.py`
Content:
```
1 import datetime
2 import json
3
4 import django.forms as forms
5 import pytz
6 from django.db import transaction
7 from django.utils.dateparse import parse_datetime
8 from django.utils.translation import ugettext_lazy as _
9 from i18nfield.forms import I18nModelForm
10
11 from pretalx.common.mixins.forms import ReadOnlyFlag
12 from pretalx.schedule.models import Availability, Room
13
14
15 class AvailabilitiesFormMixin(forms.Form):
16 availabilities = forms.CharField(
17 label=_('Availability'),
18 help_text=_('Please click and drag to mark the availability during the conference.'),
19 widget=forms.TextInput(attrs={'class': 'availabilities-editor-data'}),
20 required=False,
21 )
22
23 def _serialize(self, event, instance):
24 if instance:
25 availabilities = [
26 avail.serialize()
27 for avail in instance.availabilities.all()
28 ]
29 else:
30 availabilities = []
31
32 return json.dumps({
33 'availabilities': availabilities,
34 'event': {
35 'timezone': event.timezone,
36 'date_from': str(event.date_from),
37 'date_to': str(event.date_to),
38 }
39 })
40
41 def __init__(self, *args, event=None, **kwargs):
42 self.event = event
43 initial = kwargs.pop('initial', dict())
44 initial['availabilities'] = self._serialize(self.event, kwargs['instance'])
45 kwargs['initial'] = initial
46 super().__init__(*args, **kwargs)
47
48 def _parse_availabilities_json(self, jsonavailabilities):
49 try:
50 rawdata = json.loads(jsonavailabilities)
51 except ValueError:
52 raise forms.ValidationError("Submitted availabilities are not valid json.")
53
54 try:
55 assert isinstance(rawdata, dict)
56 availabilities = rawdata['availabilities']
57 assert isinstance(availabilities, list)
58 return availabilities
59 except (ValueError, AssertionError, LookupError):
60 raise forms.ValidationError("Submitted json does not comply with format.")
61
62 def _parse_datetime(self, strdate):
63 tz = pytz.timezone(self.event.timezone)
64
65 obj = parse_datetime(strdate)
66 assert obj
67 if obj.tzinfo is None:
68 obj = tz.localize(obj)
69
70 return obj
71
72 def _validate_availability(self, rawavail):
73 try:
74 assert isinstance(rawavail, dict)
75 rawavail.pop('id', None)
76 rawavail.pop('allDay', None)
77 assert len(rawavail) == 2
78 assert 'start' in rawavail
79 assert 'end' in rawavail
80 except AssertionError:
81 raise forms.ValidationError("Submitted availability does not comply with format.")
82
83 try:
84 rawavail['start'] = self._parse_datetime(rawavail['start'])
85 rawavail['end'] = self._parse_datetime(rawavail['end'])
86 except (AssertionError, TypeError, ValueError):
87 raise forms.ValidationError("Submitted availability contains an invalid date.")
88
89 tz = pytz.timezone(self.event.timezone)
90
91 try:
92 timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))
93 assert rawavail['start'] >= timeframe_start
94 timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time()))
95 timeframe_end += datetime.timedelta(days=1)
96 assert rawavail['end'] <= timeframe_end
97 except AssertionError:
98 raise forms.ValidationError("Submitted availability is not within the event timeframe.")
99
100 def clean_availabilities(self):
101 if self.cleaned_data['availabilities'] == '':
102 return None
103
104 rawavailabilities = self._parse_availabilities_json(self.cleaned_data['availabilities'])
105 availabilities = []
106
107 for rawavail in rawavailabilities:
108 self._validate_availability(rawavail)
109 availabilities.append(Availability(event_id=self.event.id, **rawavail))
110
111 return availabilities
112
113 def _set_foreignkeys(self, instance, availabilities):
114 """
115 Set the reference to `instance` in each given availability.
116
117 For example, set the availabilitiy.room_id to instance.id, in case instance of type Room.
118 """
119 reference_name = instance.availabilities.field.name + '_id'
120
121 for avail in availabilities:
122 setattr(avail, reference_name, instance.id)
123
124 def _replace_availabilities(self, instance, availabilities):
125 with transaction.atomic():
126 # TODO: do not recreate objects unnecessarily, give the client the IDs, so we can track modifications and leave unchanged objects alone
127 instance.availabilities.all().delete()
128 Availability.objects.bulk_create(availabilities)
129
130 def save(self, *args, **kwargs):
131 instance = super().save(*args, **kwargs)
132 availabilities = self.cleaned_data['availabilities']
133
134 if availabilities is not None:
135 self._set_foreignkeys(instance, availabilities)
136 self._replace_availabilities(instance, availabilities)
137
138 return instance
139
140
141 class RoomForm(AvailabilitiesFormMixin, ReadOnlyFlag, I18nModelForm):
142
143 def __init__(self, *args, **kwargs):
144 super().__init__(*args, **kwargs)
145 self.fields['name'].widget.attrs['placeholder'] = _('Room I')
146 self.fields['description'].widget.attrs['placeholder'] = _('Description, e.g.: Our main meeting place, Room I, enter from the right.')
147 self.fields['speaker_info'].widget.attrs['placeholder'] = _('Information for speakers, e.g.: Projector has only HDMI input.')
148 self.fields['capacity'].widget.attrs['placeholder'] = '300'
149
150 class Meta:
151 model = Room
152 fields = ['name', 'description', 'speaker_info', 'capacity', 'position']
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pretalx/schedule/forms.py b/src/pretalx/schedule/forms.py
--- a/src/pretalx/schedule/forms.py
+++ b/src/pretalx/schedule/forms.py
@@ -91,8 +91,11 @@
try:
timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))
assert rawavail['start'] >= timeframe_start
- timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time()))
- timeframe_end += datetime.timedelta(days=1)
+
+ # add 1 day, not 24 hours, https://stackoverflow.com/a/25427822/2486196
+ timeframe_end = datetime.datetime.combine(self.event.date_to, datetime.time())
+ timeframe_end = timeframe_end + datetime.timedelta(days=1)
+ timeframe_end = tz.localize(timeframe_end, is_dst=None)
assert rawavail['end'] <= timeframe_end
except AssertionError:
raise forms.ValidationError("Submitted availability is not within the event timeframe.")
|
{"golden_diff": "diff --git a/src/pretalx/schedule/forms.py b/src/pretalx/schedule/forms.py\n--- a/src/pretalx/schedule/forms.py\n+++ b/src/pretalx/schedule/forms.py\n@@ -91,8 +91,11 @@\n try:\n timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))\n assert rawavail['start'] >= timeframe_start\n- timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time()))\n- timeframe_end += datetime.timedelta(days=1)\n+\n+ # add 1 day, not 24 hours, https://stackoverflow.com/a/25427822/2486196\n+ timeframe_end = datetime.datetime.combine(self.event.date_to, datetime.time())\n+ timeframe_end = timeframe_end + datetime.timedelta(days=1)\n+ timeframe_end = tz.localize(timeframe_end, is_dst=None)\n assert rawavail['end'] <= timeframe_end\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability is not within the event timeframe.\")\n", "issue": "Setting speaker availibilty to \"all day\" for the last day of the conference triggers an error message of \"Submitted availability is not within the event timeframe.\"\n## Expected Behavior\r\nSetting the availibilty to \"All Day\" should be possible for all days of the conference, even if it ends at some point during this day.\r\n\r\n## Current Behavior\r\nSetting the availibility to \"All Day\" on all days of the conference (in our case monday to sunday) triggers an error message of \"Submitted availability is not within the event timeframe.\"\r\n\r\n\r\n## Steps to Reproduce\r\n\r\n1. Set the conference date to start at 2018-10-22 and end at 2018-10-28\r\n2. Edit a speaker\r\n3. Set the availability to all day for all conference days\r\n4. On the bottom of the page, you the the error message \"Submitted availability is not within the event timeframe.\"\r\n\r\nif needed, we can provide a speaker account on the instance for testing.\r\n\r\n## Your Environment\r\n* Version used: 0.7.1\r\n* Environment name and version (e.g. Chrome 39, python 3.5): Firefox 61\r\n* Operating System and version (desktop or mobile): Linux Desktop (Debian Unstable)\r\n* Link to your instance, if in production: https://conference.c3w.at\r\n\n", "before_files": [{"content": "import datetime\nimport json\n\nimport django.forms as forms\nimport pytz\nfrom django.db import transaction\nfrom django.utils.dateparse import parse_datetime\nfrom django.utils.translation import ugettext_lazy as _\nfrom i18nfield.forms import I18nModelForm\n\nfrom pretalx.common.mixins.forms import ReadOnlyFlag\nfrom pretalx.schedule.models import Availability, Room\n\n\nclass AvailabilitiesFormMixin(forms.Form):\n availabilities = forms.CharField(\n label=_('Availability'),\n help_text=_('Please click and drag to mark the availability during the conference.'),\n widget=forms.TextInput(attrs={'class': 'availabilities-editor-data'}),\n required=False,\n )\n\n def _serialize(self, event, instance):\n if instance:\n availabilities = [\n avail.serialize()\n for avail in instance.availabilities.all()\n ]\n else:\n availabilities = []\n\n return json.dumps({\n 'availabilities': availabilities,\n 'event': {\n 'timezone': event.timezone,\n 'date_from': str(event.date_from),\n 'date_to': str(event.date_to),\n }\n })\n\n def __init__(self, *args, event=None, **kwargs):\n self.event = event\n initial = kwargs.pop('initial', dict())\n initial['availabilities'] = self._serialize(self.event, kwargs['instance'])\n kwargs['initial'] = initial\n super().__init__(*args, **kwargs)\n\n def _parse_availabilities_json(self, jsonavailabilities):\n try:\n rawdata = json.loads(jsonavailabilities)\n except ValueError:\n raise forms.ValidationError(\"Submitted availabilities are not valid json.\")\n\n try:\n assert isinstance(rawdata, dict)\n availabilities = rawdata['availabilities']\n assert isinstance(availabilities, list)\n return availabilities\n except (ValueError, AssertionError, LookupError):\n raise forms.ValidationError(\"Submitted json does not comply with format.\")\n\n def _parse_datetime(self, strdate):\n tz = pytz.timezone(self.event.timezone)\n\n obj = parse_datetime(strdate)\n assert obj\n if obj.tzinfo is None:\n obj = tz.localize(obj)\n\n return obj\n\n def _validate_availability(self, rawavail):\n try:\n assert isinstance(rawavail, dict)\n rawavail.pop('id', None)\n rawavail.pop('allDay', None)\n assert len(rawavail) == 2\n assert 'start' in rawavail\n assert 'end' in rawavail\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability does not comply with format.\")\n\n try:\n rawavail['start'] = self._parse_datetime(rawavail['start'])\n rawavail['end'] = self._parse_datetime(rawavail['end'])\n except (AssertionError, TypeError, ValueError):\n raise forms.ValidationError(\"Submitted availability contains an invalid date.\")\n\n tz = pytz.timezone(self.event.timezone)\n\n try:\n timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))\n assert rawavail['start'] >= timeframe_start\n timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time()))\n timeframe_end += datetime.timedelta(days=1)\n assert rawavail['end'] <= timeframe_end\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability is not within the event timeframe.\")\n\n def clean_availabilities(self):\n if self.cleaned_data['availabilities'] == '':\n return None\n\n rawavailabilities = self._parse_availabilities_json(self.cleaned_data['availabilities'])\n availabilities = []\n\n for rawavail in rawavailabilities:\n self._validate_availability(rawavail)\n availabilities.append(Availability(event_id=self.event.id, **rawavail))\n\n return availabilities\n\n def _set_foreignkeys(self, instance, availabilities):\n \"\"\"\n Set the reference to `instance` in each given availability.\n\n For example, set the availabilitiy.room_id to instance.id, in case instance of type Room.\n \"\"\"\n reference_name = instance.availabilities.field.name + '_id'\n\n for avail in availabilities:\n setattr(avail, reference_name, instance.id)\n\n def _replace_availabilities(self, instance, availabilities):\n with transaction.atomic():\n # TODO: do not recreate objects unnecessarily, give the client the IDs, so we can track modifications and leave unchanged objects alone\n instance.availabilities.all().delete()\n Availability.objects.bulk_create(availabilities)\n\n def save(self, *args, **kwargs):\n instance = super().save(*args, **kwargs)\n availabilities = self.cleaned_data['availabilities']\n\n if availabilities is not None:\n self._set_foreignkeys(instance, availabilities)\n self._replace_availabilities(instance, availabilities)\n\n return instance\n\n\nclass RoomForm(AvailabilitiesFormMixin, ReadOnlyFlag, I18nModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs['placeholder'] = _('Room I')\n self.fields['description'].widget.attrs['placeholder'] = _('Description, e.g.: Our main meeting place, Room I, enter from the right.')\n self.fields['speaker_info'].widget.attrs['placeholder'] = _('Information for speakers, e.g.: Projector has only HDMI input.')\n self.fields['capacity'].widget.attrs['placeholder'] = '300'\n\n class Meta:\n model = Room\n fields = ['name', 'description', 'speaker_info', 'capacity', 'position']\n", "path": "src/pretalx/schedule/forms.py"}], "after_files": [{"content": "import datetime\nimport json\n\nimport django.forms as forms\nimport pytz\nfrom django.db import transaction\nfrom django.utils.dateparse import parse_datetime\nfrom django.utils.translation import ugettext_lazy as _\nfrom i18nfield.forms import I18nModelForm\n\nfrom pretalx.common.mixins.forms import ReadOnlyFlag\nfrom pretalx.schedule.models import Availability, Room\n\n\nclass AvailabilitiesFormMixin(forms.Form):\n availabilities = forms.CharField(\n label=_('Availability'),\n help_text=_('Please click and drag to mark the availability during the conference.'),\n widget=forms.TextInput(attrs={'class': 'availabilities-editor-data'}),\n required=False,\n )\n\n def _serialize(self, event, instance):\n if instance:\n availabilities = [\n avail.serialize()\n for avail in instance.availabilities.all()\n ]\n else:\n availabilities = []\n\n return json.dumps({\n 'availabilities': availabilities,\n 'event': {\n 'timezone': event.timezone,\n 'date_from': str(event.date_from),\n 'date_to': str(event.date_to),\n }\n })\n\n def __init__(self, *args, event=None, **kwargs):\n self.event = event\n initial = kwargs.pop('initial', dict())\n initial['availabilities'] = self._serialize(self.event, kwargs['instance'])\n kwargs['initial'] = initial\n super().__init__(*args, **kwargs)\n\n def _parse_availabilities_json(self, jsonavailabilities):\n try:\n rawdata = json.loads(jsonavailabilities)\n except ValueError:\n raise forms.ValidationError(\"Submitted availabilities are not valid json.\")\n\n try:\n assert isinstance(rawdata, dict)\n availabilities = rawdata['availabilities']\n assert isinstance(availabilities, list)\n return availabilities\n except (ValueError, AssertionError, LookupError):\n raise forms.ValidationError(\"Submitted json does not comply with format.\")\n\n def _parse_datetime(self, strdate):\n tz = pytz.timezone(self.event.timezone)\n\n obj = parse_datetime(strdate)\n assert obj\n if obj.tzinfo is None:\n obj = tz.localize(obj)\n\n return obj\n\n def _validate_availability(self, rawavail):\n try:\n assert isinstance(rawavail, dict)\n rawavail.pop('id', None)\n rawavail.pop('allDay', None)\n assert len(rawavail) == 2\n assert 'start' in rawavail\n assert 'end' in rawavail\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability does not comply with format.\")\n\n try:\n rawavail['start'] = self._parse_datetime(rawavail['start'])\n rawavail['end'] = self._parse_datetime(rawavail['end'])\n except (AssertionError, TypeError, ValueError):\n raise forms.ValidationError(\"Submitted availability contains an invalid date.\")\n\n tz = pytz.timezone(self.event.timezone)\n\n try:\n timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))\n assert rawavail['start'] >= timeframe_start\n\n # add 1 day, not 24 hours, https://stackoverflow.com/a/25427822/2486196\n timeframe_end = datetime.datetime.combine(self.event.date_to, datetime.time())\n timeframe_end = timeframe_end + datetime.timedelta(days=1)\n timeframe_end = tz.localize(timeframe_end, is_dst=None)\n assert rawavail['end'] <= timeframe_end\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability is not within the event timeframe.\")\n\n def clean_availabilities(self):\n if self.cleaned_data['availabilities'] == '':\n return None\n\n rawavailabilities = self._parse_availabilities_json(self.cleaned_data['availabilities'])\n availabilities = []\n\n for rawavail in rawavailabilities:\n self._validate_availability(rawavail)\n availabilities.append(Availability(event_id=self.event.id, **rawavail))\n\n return availabilities\n\n def _set_foreignkeys(self, instance, availabilities):\n \"\"\"\n Set the reference to `instance` in each given availability.\n\n For example, set the availabilitiy.room_id to instance.id, in case instance of type Room.\n \"\"\"\n reference_name = instance.availabilities.field.name + '_id'\n\n for avail in availabilities:\n setattr(avail, reference_name, instance.id)\n\n def _replace_availabilities(self, instance, availabilities):\n with transaction.atomic():\n # TODO: do not recreate objects unnecessarily, give the client the IDs, so we can track modifications and leave unchanged objects alone\n instance.availabilities.all().delete()\n Availability.objects.bulk_create(availabilities)\n\n def save(self, *args, **kwargs):\n instance = super().save(*args, **kwargs)\n availabilities = self.cleaned_data['availabilities']\n\n if availabilities is not None:\n self._set_foreignkeys(instance, availabilities)\n self._replace_availabilities(instance, availabilities)\n\n return instance\n\n\nclass RoomForm(AvailabilitiesFormMixin, ReadOnlyFlag, I18nModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs['placeholder'] = _('Room I')\n self.fields['description'].widget.attrs['placeholder'] = _('Description, e.g.: Our main meeting place, Room I, enter from the right.')\n self.fields['speaker_info'].widget.attrs['placeholder'] = _('Information for speakers, e.g.: Projector has only HDMI input.')\n self.fields['capacity'].widget.attrs['placeholder'] = '300'\n\n class Meta:\n model = Room\n fields = ['name', 'description', 'speaker_info', 'capacity', 'position']\n", "path": "src/pretalx/schedule/forms.py"}]}
| 2,087 | 236 |
gh_patches_debug_19551
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-1074
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CommandHandler should not check filters if not listening to the command
I have a filter to restrict some users from call some commands. I call `AdminFilter` and the relevant code can be seen below (actually, I check the admin users from a database).
```python
class AdminFilter(BaseFilter):
def __init__(self, *admin_users):
self.admin_users = admin_users
def filter(self, message):
if message.from_user.username in self.admin_users:
return True
else:
message.reply_text("You're not an admin!", quote=True)
return False
```
With this filter applied, when an user that is not an admin calls the forbidden command, then receives a reply warning. And here is the issue.
### Steps to reproduce
The following code illustrate the problem. In this scenario I have an administrative command `/jungle` and a normal command `/rain`.
If I am the admin, I call any command and see no issue, but If am not admin, I receive the warning for both commands.
```python
def jungle(bot, update):
bot.send_message(update.message.chat_id, 'Welcome to the Jungle!')
def rain(bot, update):
bot.send_message(update.message.chat_id, 'November Rain...')
dispatcher.add_handler(CommandHandler('jungle', jungle, AdminFilter('wagnermacedo')))
dispatcher.add_handler(CommandHandler('rain', rain))
```
### Expected behaviour
Suppose I'm not the bot administrator, then in a conversation with the bot, I expect the following:
```
me: /jungle
bot: > reply /jungle
You're not an admin!
```
```
me: /rain
bot: November Rain...
```
### Actual behaviour
What happens instead is that bot reply that I'm not an admin for both commands.
For `/jungle`, it's okay, it was expected
```
me: /jungle
bot: > reply /jungle
You're not an admin!
```
But `/rain` hasn't the admin filter, so it should not reply the warning.
```
me: /rain
bot: > reply /rain
You're not an admin!
bot: November Rain...
```
:point_up_2: Surprisingly the command still works, even replying I'm not admin...
### Configuration
**Version of Python, python-telegram-bot & dependencies:**
I tested with the last version from master (59659ea).
```
$ python -m telegram
python-telegram-bot 10.0.1
certifi 2018.01.18
future 0.16.0
Python 3.4.8 (default, Apr 13 2018, 16:18:01) [GCC 5.4.0 20160609]
```
### Fix
I have already fixed the issue in my machine, I'm going soon to create a pull request.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/ext/commandhandler.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the CommandHandler class."""
20 import warnings
21
22 from future.utils import string_types
23
24 from .handler import Handler
25 from telegram import Update
26
27
28 class CommandHandler(Handler):
29 """Handler class to handle Telegram commands.
30
31 Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the
32 bot's name and/or some additional text.
33
34 Attributes:
35 command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler
36 should listen for.
37 callback (:obj:`callable`): The callback function for this handler.
38 filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these
39 Filters.
40 allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept
41 edited messages.
42 pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed
43 ``args``.
44 pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be
45 passed to the callback function.
46 pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to
47 the callback function.
48 pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to
49 the callback function.
50 pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to
51 the callback function.
52
53 Note:
54 :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you
55 can use to keep any data in will be sent to the :attr:`callback` function.. Related to
56 either the user or the chat that the update was sent in. For each update from the same user
57 or in the same chat, it will be the same ``dict``.
58
59 Args:
60 command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler
61 should listen for.
62 callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.
63 It will be called when the :attr:`check_update` has determined that an update should be
64 processed by this handler.
65 filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from
66 :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in
67 :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise
68 operators (& for and, | for or, ~ for not).
69 allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept
70 edited messages. Default is ``False``.
71 pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the
72 arguments passed to the command as a keyword argument called ``args``. It will contain
73 a list of strings, which is the text following the command split on single or
74 consecutive whitespace characters. Default is ``False``
75 pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
76 ``update_queue`` will be passed to the callback function. It will be the ``Queue``
77 instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`
78 that contains new updates which can be used to insert updates. Default is ``False``.
79 pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
80 ``job_queue`` will be passed to the callback function. It will be a
81 :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`
82 which can be used to schedule new jobs. Default is ``False``.
83 pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
84 ``user_data`` will be passed to the callback function. Default is ``False``.
85 pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
86 ``chat_data`` will be passed to the callback function. Default is ``False``.
87
88 """
89
90 def __init__(self,
91 command,
92 callback,
93 filters=None,
94 allow_edited=False,
95 pass_args=False,
96 pass_update_queue=False,
97 pass_job_queue=False,
98 pass_user_data=False,
99 pass_chat_data=False):
100 super(CommandHandler, self).__init__(
101 callback,
102 pass_update_queue=pass_update_queue,
103 pass_job_queue=pass_job_queue,
104 pass_user_data=pass_user_data,
105 pass_chat_data=pass_chat_data)
106
107 if isinstance(command, string_types):
108 self.command = [command.lower()]
109 else:
110 self.command = [x.lower() for x in command]
111 self.filters = filters
112 self.allow_edited = allow_edited
113 self.pass_args = pass_args
114
115 # We put this up here instead of with the rest of checking code
116 # in check_update since we don't wanna spam a ton
117 if isinstance(self.filters, list):
118 warnings.warn('Using a list of filters in MessageHandler is getting '
119 'deprecated, please use bitwise operators (& and |) '
120 'instead. More info: https://git.io/vPTbc.')
121
122 def check_update(self, update):
123 """Determines whether an update should be passed to this handlers :attr:`callback`.
124
125 Args:
126 update (:class:`telegram.Update`): Incoming telegram update.
127
128 Returns:
129 :obj:`bool`
130
131 """
132 if (isinstance(update, Update)
133 and (update.message or update.edited_message and self.allow_edited)):
134 message = update.message or update.edited_message
135
136 if message.text and message.text.startswith('/') and len(message.text) > 1:
137 first_word = message.text_html.split(None, 1)[0]
138 if len(first_word) > 1 and first_word.startswith('/'):
139 command = first_word[1:].split('@')
140 command.append(
141 message.bot.username) # in case the command was sent without a username
142
143 if self.filters is None:
144 res = True
145 elif isinstance(self.filters, list):
146 res = any(func(message) for func in self.filters)
147 else:
148 res = self.filters(message)
149
150 return res and (command[0].lower() in self.command
151 and command[1].lower() == message.bot.username.lower())
152
153 return False
154
155 def handle_update(self, update, dispatcher):
156 """Send the update to the :attr:`callback`.
157
158 Args:
159 update (:class:`telegram.Update`): Incoming telegram update.
160 dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.
161
162 """
163 optional_args = self.collect_optional_args(dispatcher, update)
164
165 message = update.message or update.edited_message
166
167 if self.pass_args:
168 optional_args['args'] = message.text.split()[1:]
169
170 return self.callback(dispatcher.bot, update, **optional_args)
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/telegram/ext/commandhandler.py b/telegram/ext/commandhandler.py
--- a/telegram/ext/commandhandler.py
+++ b/telegram/ext/commandhandler.py
@@ -140,6 +140,10 @@
command.append(
message.bot.username) # in case the command was sent without a username
+ if not (command[0].lower() in self.command
+ and command[1].lower() == message.bot.username.lower()):
+ return False
+
if self.filters is None:
res = True
elif isinstance(self.filters, list):
@@ -147,8 +151,7 @@
else:
res = self.filters(message)
- return res and (command[0].lower() in self.command
- and command[1].lower() == message.bot.username.lower())
+ return res
return False
|
{"golden_diff": "diff --git a/telegram/ext/commandhandler.py b/telegram/ext/commandhandler.py\n--- a/telegram/ext/commandhandler.py\n+++ b/telegram/ext/commandhandler.py\n@@ -140,6 +140,10 @@\n command.append(\n message.bot.username) # in case the command was sent without a username\n \n+ if not (command[0].lower() in self.command\n+ and command[1].lower() == message.bot.username.lower()):\n+ return False\n+\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n@@ -147,8 +151,7 @@\n else:\n res = self.filters(message)\n \n- return res and (command[0].lower() in self.command\n- and command[1].lower() == message.bot.username.lower())\n+ return res\n \n return False\n", "issue": "CommandHandler should not check filters if not listening to the command\nI have a filter to restrict some users from call some commands. I call `AdminFilter` and the relevant code can be seen below (actually, I check the admin users from a database).\r\n\r\n```python\r\nclass AdminFilter(BaseFilter):\r\n def __init__(self, *admin_users):\r\n self.admin_users = admin_users\r\n\r\n def filter(self, message):\r\n if message.from_user.username in self.admin_users:\r\n return True\r\n else:\r\n message.reply_text(\"You're not an admin!\", quote=True)\r\n return False\r\n```\r\n\r\nWith this filter applied, when an user that is not an admin calls the forbidden command, then receives a reply warning. And here is the issue.\r\n\r\n### Steps to reproduce\r\nThe following code illustrate the problem. In this scenario I have an administrative command `/jungle` and a normal command `/rain`.\r\n\r\nIf I am the admin, I call any command and see no issue, but If am not admin, I receive the warning for both commands.\r\n\r\n```python\r\ndef jungle(bot, update):\r\n bot.send_message(update.message.chat_id, 'Welcome to the Jungle!')\r\n\r\ndef rain(bot, update):\r\n bot.send_message(update.message.chat_id, 'November Rain...')\r\n\r\ndispatcher.add_handler(CommandHandler('jungle', jungle, AdminFilter('wagnermacedo')))\r\ndispatcher.add_handler(CommandHandler('rain', rain))\r\n```\r\n\r\n### Expected behaviour\r\nSuppose I'm not the bot administrator, then in a conversation with the bot, I expect the following:\r\n\r\n```\r\nme: /jungle\r\nbot: > reply /jungle\r\n You're not an admin!\r\n```\r\n```\r\nme: /rain\r\nbot: November Rain...\r\n```\r\n\r\n### Actual behaviour\r\nWhat happens instead is that bot reply that I'm not an admin for both commands.\r\n\r\nFor `/jungle`, it's okay, it was expected\r\n```\r\nme: /jungle\r\nbot: > reply /jungle\r\n You're not an admin!\r\n```\r\n\r\nBut `/rain` hasn't the admin filter, so it should not reply the warning.\r\n```\r\nme: /rain\r\nbot: > reply /rain\r\n You're not an admin!\r\nbot: November Rain...\r\n```\r\n\r\n:point_up_2: Surprisingly the command still works, even replying I'm not admin...\r\n\r\n### Configuration\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\nI tested with the last version from master (59659ea).\r\n\r\n```\r\n$ python -m telegram\r\npython-telegram-bot 10.0.1\r\ncertifi 2018.01.18\r\nfuture 0.16.0\r\nPython 3.4.8 (default, Apr 13 2018, 16:18:01) [GCC 5.4.0 20160609]\r\n```\r\n\r\n### Fix\r\nI have already fixed the issue in my machine, I'm going soon to create a pull request.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport warnings\n\nfrom future.utils import string_types\n\nfrom .handler import Handler\nfrom telegram import Update\n\n\nclass CommandHandler(Handler):\n \"\"\"Handler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text.\n\n Attributes:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): The callback function for this handler.\n filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these\n Filters.\n allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept\n edited messages.\n pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed\n ``args``.\n pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be\n passed to the callback function.\n pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to\n the callback function.\n pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to\n the callback function.\n pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to\n the callback function.\n\n Note:\n :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you\n can use to keep any data in will be sent to the :attr:`callback` function.. Related to\n either the user or the chat that the update was sent in. For each update from the same user\n or in the same chat, it will be the same ``dict``.\n\n Args:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.\n It will be called when the :attr:`check_update` has determined that an update should be\n processed by this handler.\n filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise\n operators (& for and, | for or, ~ for not).\n allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept\n edited messages. Default is ``False``.\n pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the\n arguments passed to the command as a keyword argument called ``args``. It will contain\n a list of strings, which is the text following the command split on single or\n consecutive whitespace characters. Default is ``False``\n pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``update_queue`` will be passed to the callback function. It will be the ``Queue``\n instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`\n that contains new updates which can be used to insert updates. Default is ``False``.\n pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``job_queue`` will be passed to the callback function. It will be a\n :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`\n which can be used to schedule new jobs. Default is ``False``.\n pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``user_data`` will be passed to the callback function. Default is ``False``.\n pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``chat_data`` will be passed to the callback function. Default is ``False``.\n\n \"\"\"\n\n def __init__(self,\n command,\n callback,\n filters=None,\n allow_edited=False,\n pass_args=False,\n pass_update_queue=False,\n pass_job_queue=False,\n pass_user_data=False,\n pass_chat_data=False):\n super(CommandHandler, self).__init__(\n callback,\n pass_update_queue=pass_update_queue,\n pass_job_queue=pass_job_queue,\n pass_user_data=pass_user_data,\n pass_chat_data=pass_chat_data)\n\n if isinstance(command, string_types):\n self.command = [command.lower()]\n else:\n self.command = [x.lower() for x in command]\n self.filters = filters\n self.allow_edited = allow_edited\n self.pass_args = pass_args\n\n # We put this up here instead of with the rest of checking code\n # in check_update since we don't wanna spam a ton\n if isinstance(self.filters, list):\n warnings.warn('Using a list of filters in MessageHandler is getting '\n 'deprecated, please use bitwise operators (& and |) '\n 'instead. More info: https://git.io/vPTbc.')\n\n def check_update(self, update):\n \"\"\"Determines whether an update should be passed to this handlers :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if (isinstance(update, Update)\n and (update.message or update.edited_message and self.allow_edited)):\n message = update.message or update.edited_message\n\n if message.text and message.text.startswith('/') and len(message.text) > 1:\n first_word = message.text_html.split(None, 1)[0]\n if len(first_word) > 1 and first_word.startswith('/'):\n command = first_word[1:].split('@')\n command.append(\n message.bot.username) # in case the command was sent without a username\n\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n res = any(func(message) for func in self.filters)\n else:\n res = self.filters(message)\n\n return res and (command[0].lower() in self.command\n and command[1].lower() == message.bot.username.lower())\n\n return False\n\n def handle_update(self, update, dispatcher):\n \"\"\"Send the update to the :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.\n\n \"\"\"\n optional_args = self.collect_optional_args(dispatcher, update)\n\n message = update.message or update.edited_message\n\n if self.pass_args:\n optional_args['args'] = message.text.split()[1:]\n\n return self.callback(dispatcher.bot, update, **optional_args)\n", "path": "telegram/ext/commandhandler.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport warnings\n\nfrom future.utils import string_types\n\nfrom .handler import Handler\nfrom telegram import Update\n\n\nclass CommandHandler(Handler):\n \"\"\"Handler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text.\n\n Attributes:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): The callback function for this handler.\n filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these\n Filters.\n allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept\n edited messages.\n pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed\n ``args``.\n pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be\n passed to the callback function.\n pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to\n the callback function.\n pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to\n the callback function.\n pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to\n the callback function.\n\n Note:\n :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you\n can use to keep any data in will be sent to the :attr:`callback` function.. Related to\n either the user or the chat that the update was sent in. For each update from the same user\n or in the same chat, it will be the same ``dict``.\n\n Args:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.\n It will be called when the :attr:`check_update` has determined that an update should be\n processed by this handler.\n filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise\n operators (& for and, | for or, ~ for not).\n allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept\n edited messages. Default is ``False``.\n pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the\n arguments passed to the command as a keyword argument called ``args``. It will contain\n a list of strings, which is the text following the command split on single or\n consecutive whitespace characters. Default is ``False``\n pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``update_queue`` will be passed to the callback function. It will be the ``Queue``\n instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`\n that contains new updates which can be used to insert updates. Default is ``False``.\n pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``job_queue`` will be passed to the callback function. It will be a\n :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`\n which can be used to schedule new jobs. Default is ``False``.\n pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``user_data`` will be passed to the callback function. Default is ``False``.\n pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``chat_data`` will be passed to the callback function. Default is ``False``.\n\n \"\"\"\n\n def __init__(self,\n command,\n callback,\n filters=None,\n allow_edited=False,\n pass_args=False,\n pass_update_queue=False,\n pass_job_queue=False,\n pass_user_data=False,\n pass_chat_data=False):\n super(CommandHandler, self).__init__(\n callback,\n pass_update_queue=pass_update_queue,\n pass_job_queue=pass_job_queue,\n pass_user_data=pass_user_data,\n pass_chat_data=pass_chat_data)\n\n if isinstance(command, string_types):\n self.command = [command.lower()]\n else:\n self.command = [x.lower() for x in command]\n self.filters = filters\n self.allow_edited = allow_edited\n self.pass_args = pass_args\n\n # We put this up here instead of with the rest of checking code\n # in check_update since we don't wanna spam a ton\n if isinstance(self.filters, list):\n warnings.warn('Using a list of filters in MessageHandler is getting '\n 'deprecated, please use bitwise operators (& and |) '\n 'instead. More info: https://git.io/vPTbc.')\n\n def check_update(self, update):\n \"\"\"Determines whether an update should be passed to this handlers :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if (isinstance(update, Update)\n and (update.message or update.edited_message and self.allow_edited)):\n message = update.message or update.edited_message\n\n if message.text and message.text.startswith('/') and len(message.text) > 1:\n first_word = message.text_html.split(None, 1)[0]\n if len(first_word) > 1 and first_word.startswith('/'):\n command = first_word[1:].split('@')\n command.append(\n message.bot.username) # in case the command was sent without a username\n\n if not (command[0].lower() in self.command\n and command[1].lower() == message.bot.username.lower()):\n return False\n\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n res = any(func(message) for func in self.filters)\n else:\n res = self.filters(message)\n\n return res\n\n return False\n\n def handle_update(self, update, dispatcher):\n \"\"\"Send the update to the :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.\n\n \"\"\"\n optional_args = self.collect_optional_args(dispatcher, update)\n\n message = update.message or update.edited_message\n\n if self.pass_args:\n optional_args['args'] = message.text.split()[1:]\n\n return self.callback(dispatcher.bot, update, **optional_args)\n", "path": "telegram/ext/commandhandler.py"}]}
| 2,997 | 193 |
gh_patches_debug_21247
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-1727
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No or incorrect output [BUG]
Sometimes when my students run a program they don't get any or incorrect output. It is hard to pinpoint when exactly this happens. I think it has to do with pressing "Run Code" while a program is still running. Student do this a lot and I see this issue multiple times per lesson. Running the program keeps on giving incorrect results once this has happened. My workaround is to save the program, refresh the webpage in the browser and try again. This usually works.
One example of this issue arises when I run the code below and press the "Run Code" button multiple times during execution. At one point it will create two figures and raise an error (see screenshot). However the issue is certainly not limited to drawings, it also happens when using the `repeat` and `ask` commands.
```
hoek = 90
repeat 10 times
turn hoek
forward 50
forward 100
```

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/achievements.py`
Content:
```
1 from website import database
2 from hedyweb import AchievementTranslations
3 from website.auth import requires_login, current_user
4 from flask import request, jsonify, session
5 import hedy
6
7
8 class Achievements:
9
10 def __init__(self):
11 self.DATABASE = database.Database()
12 self.TRANSLATIONS = AchievementTranslations()
13 self.all_commands = self.get_all_commands()
14
15 def get_all_commands(self):
16 commands = []
17 for i in range(1, hedy.HEDY_MAX_LEVEL+1):
18 for command in hedy.commands_per_level.get(i):
19 commands.append(command)
20 return set(commands)
21
22 def initialize_user_data_if_necessary(self):
23 if 'achieved' not in session:
24 achievements_data = self.DATABASE.progress_by_username(current_user()['username'])
25 session['new_achieved'] = []
26 session['new_commands'] = []
27 session['previous_code'] = None
28 session['identical_consecutive_errors'] = 0
29 session['consecutive_errors'] = 0
30 if not achievements_data:
31 achievements_data = {}
32 if 'achieved' in achievements_data:
33 session['achieved'] = achievements_data['achieved']
34 else:
35 session['achieved'] = []
36 if 'commands' in achievements_data:
37 session['commands'] = achievements_data['commands']
38 else:
39 session['commands'] = []
40 if 'run_programs' in achievements_data:
41 session['run_programs'] = achievements_data['run_programs']
42 else:
43 session['run_programs'] = 0
44 if 'saved_programs' in achievements_data:
45 session['saved_programs'] = achievements_data['saved_programs']
46 else:
47 session['saved_programs'] = 0
48 if 'submitted_programs' in achievements_data:
49 session['submitted_programs'] = achievements_data['submitted_programs']
50 else:
51 session['submitted_programs'] = 0
52
53 def routes(self, app, database):
54 global DATABASE
55 DATABASE = database
56
57 @app.route('/achievements', methods=['POST'])
58 @requires_login
59 def push_new_achievement(user):
60 body = request.json
61 if "achievement" in body:
62 self.initialize_user_data_if_necessary()
63 if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']):
64 return jsonify({"achievements": self.verify_pushed_achievement(user.get('username'), body['achievement'])})
65 return jsonify({})
66
67 def increase_count(self, category):
68 self.initialize_user_data_if_necessary()
69 if category == "run":
70 session['run_programs'] += 1
71 elif category == "saved":
72 session['saved_programs'] += 1
73 elif category == "submitted":
74 session['submitted_programs'] += 1
75
76 def add_single_achievement(self, username, achievement):
77 self.initialize_user_data_if_necessary()
78 if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']):
79 return self.verify_pushed_achievement(username, achievement)
80 else:
81 return None
82
83 def verify_run_achievements(self, username, code=None, level=None, response=None):
84 self.initialize_user_data_if_necessary()
85 self.check_programs_run()
86 if code and level:
87 self.check_code_achievements(code, level)
88 if code and response:
89 self.check_response_achievements(code, response)
90
91 if len(session['new_commands']) > 0:
92 for command in session['new_commands']:
93 session['commands'].append(command)
94 session['new_commands'] = []
95 self.DATABASE.add_commands_to_username(username, session['commands'])
96
97 if len(session['new_achieved']) > 0:
98 self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
99 for achievement in session['new_achieved']:
100 session['achieved'].append(achievement)
101 return True
102 return False
103
104 def verify_save_achievements(self, username, adventure=None):
105 self.initialize_user_data_if_necessary()
106 self.check_programs_saved()
107 if adventure and 'adventure_is_worthwhile' not in session['achieved']:
108 session['new_achieved'].append("adventure_is_worthwhile")
109 if len(session['new_achieved']) > 0:
110 self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
111 for achievement in session['new_achieved']:
112 session['achieved'].append(achievement)
113 return True
114 return False
115
116 def verify_submit_achievements(self, username):
117 self.initialize_user_data_if_necessary()
118 self.check_programs_submitted()
119
120 if len(session['new_achieved']) > 0:
121 self.DATABASE.add_achievements_to_username(username, session['new_achieved'])
122 for achievement in session['new_achieved']:
123 session['achieved'].append(achievement)
124 return True
125 return False
126
127 def verify_pushed_achievement(self, username, achievement):
128 self.initialize_user_data_if_necessary()
129 session['new_achieved'] = [achievement]
130 self.DATABASE.add_achievement_to_username(username, achievement)
131 session['achieved'].append(achievement)
132 return self.get_earned_achievements()
133
134 def get_earned_achievements(self):
135 self.initialize_user_data_if_necessary()
136 translations = self.TRANSLATIONS.get_translations(session['lang'])
137 translated_achievements = []
138 for achievement in session['new_achieved']:
139 translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']])
140 session['new_achieved'] = [] #Once we get earned achievements -> empty the array with "waiting" ones
141 session['new_commands'] = []
142 return translated_achievements
143
144 def check_programs_run(self):
145 self.initialize_user_data_if_necessary()
146 if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1:
147 session['new_achieved'].append("getting_started_I")
148 if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10:
149 session['new_achieved'].append("getting_started_II")
150 if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50:
151 session['new_achieved'].append("getting_started_III")
152 if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200:
153 session['new_achieved'].append("getting_started_IV")
154 if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500:
155 session['new_achieved'].append("getting_started_V")
156
157 def check_programs_saved(self):
158 self.initialize_user_data_if_necessary()
159 if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1:
160 session['new_achieved'].append("one_to_remember_I")
161 if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5:
162 session['new_achieved'].append("one_to_remember_II")
163 if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10:
164 session['new_achieved'].append("one_to_remember_III")
165 if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25:
166 session['new_achieved'].append("one_to_remember_IV")
167 if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50:
168 session['new_achieved'].append("one_to_remember_V")
169
170 def check_programs_submitted(self):
171 self.initialize_user_data_if_necessary()
172 if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1:
173 session['new_achieved'].append("deadline_daredevil_I")
174 if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3:
175 session['new_achieved'].append("deadline_daredevil_II")
176 if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10:
177 session['new_achieved'].append("deadline_daredevil_III")
178
179 def check_code_achievements(self, code, level):
180 self.initialize_user_data_if_necessary()
181 commands_in_code = hedy.all_commands(code, level, session['lang'])
182 if 'trying_is_key' not in session['achieved']:
183 for command in set(commands_in_code):
184 if command not in session['commands'] and command not in session['new_commands']:
185 session['new_commands'].append(command)
186 if set(session['commands']).union(session['new_commands']) == self.all_commands:
187 session['new_achieved'].append("trying_is_key")
188 if 'did_you_say_please' not in session['achieved'] and "ask" in hedy.all_commands(code, level, session['lang']):
189 session['new_achieved'].append("did_you_say_please")
190 if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count("ask") >= 5:
191 session['new_achieved'].append("talk-talk-talk")
192 if 'hedy_honor' not in session['achieved'] and "Hedy" in code:
193 session['new_achieved'].append("hedy_honor")
194 if 'hedy-ious' not in session['achieved']:
195 all_print_arguments = hedy.all_print_arguments(code, level, session['lang'])
196 for argument in all_print_arguments:
197 if all_print_arguments.count(argument) >= 10:
198 session['new_achieved'].append("hedy-ious")
199 break
200
201 def check_response_achievements(self, code, response):
202 self.initialize_user_data_if_necessary()
203 if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']:
204 session['new_achieved'].append("ninja_turtle")
205 if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']:
206 session['new_achieved'].append("watch_out")
207 if 'Error' in response and response['Error']:
208 session['consecutive_errors'] += 1
209 if session['previous_code'] == code:
210 if session['identical_consecutive_errors'] == 0:
211 session['identical_consecutive_errors'] += 2 #We have to count the first one too!
212 else:
213 session['identical_consecutive_errors'] += 1
214 if session['identical_consecutive_errors'] >= 3:
215 if 'programming_panic' not in session['achieved']:
216 session['new_achieved'].append("programming_panic")
217 session['previous_code'] = code
218 else:
219 if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1:
220 session['new_achieved'].append("programming_protagonist")
221 session['consecutive_errors'] = 0
222 session['identical_consecutive_errors'] = 0
223
224
225
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/achievements.py b/website/achievements.py
--- a/website/achievements.py
+++ b/website/achievements.py
@@ -183,8 +183,8 @@
for command in set(commands_in_code):
if command not in session['commands'] and command not in session['new_commands']:
session['new_commands'].append(command)
- if set(session['commands']).union(session['new_commands']) == self.all_commands:
- session['new_achieved'].append("trying_is_key")
+ if set(session['commands']).union(session['new_commands']) == self.all_commands:
+ session['new_achieved'].append("trying_is_key")
if 'did_you_say_please' not in session['achieved'] and "ask" in hedy.all_commands(code, level, session['lang']):
session['new_achieved'].append("did_you_say_please")
if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count("ask") >= 5:
|
{"golden_diff": "diff --git a/website/achievements.py b/website/achievements.py\n--- a/website/achievements.py\n+++ b/website/achievements.py\n@@ -183,8 +183,8 @@\n for command in set(commands_in_code):\n if command not in session['commands'] and command not in session['new_commands']:\n session['new_commands'].append(command)\n- if set(session['commands']).union(session['new_commands']) == self.all_commands:\n- session['new_achieved'].append(\"trying_is_key\")\n+ if set(session['commands']).union(session['new_commands']) == self.all_commands:\n+ session['new_achieved'].append(\"trying_is_key\")\n if 'did_you_say_please' not in session['achieved'] and \"ask\" in hedy.all_commands(code, level, session['lang']):\n session['new_achieved'].append(\"did_you_say_please\")\n if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count(\"ask\") >= 5:\n", "issue": "No or incorrect output [BUG]\nSometimes when my students run a program they don't get any or incorrect output. It is hard to pinpoint when exactly this happens. I think it has to do with pressing \"Run Code\" while a program is still running. Student do this a lot and I see this issue multiple times per lesson. Running the program keeps on giving incorrect results once this has happened. My workaround is to save the program, refresh the webpage in the browser and try again. This usually works.\r\n\r\nOne example of this issue arises when I run the code below and press the \"Run Code\" button multiple times during execution. At one point it will create two figures and raise an error (see screenshot). However the issue is certainly not limited to drawings, it also happens when using the `repeat` and `ask` commands.\r\n\r\n```\r\nhoek = 90\r\nrepeat 10 times\r\n turn hoek\r\n forward 50\r\nforward 100\r\n```\r\n\r\n\r\n\n", "before_files": [{"content": "from website import database\nfrom hedyweb import AchievementTranslations\nfrom website.auth import requires_login, current_user\nfrom flask import request, jsonify, session\nimport hedy\n\n\nclass Achievements:\n\n def __init__(self):\n self.DATABASE = database.Database()\n self.TRANSLATIONS = AchievementTranslations()\n self.all_commands = self.get_all_commands()\n\n def get_all_commands(self):\n commands = []\n for i in range(1, hedy.HEDY_MAX_LEVEL+1):\n for command in hedy.commands_per_level.get(i):\n commands.append(command)\n return set(commands)\n\n def initialize_user_data_if_necessary(self):\n if 'achieved' not in session:\n achievements_data = self.DATABASE.progress_by_username(current_user()['username'])\n session['new_achieved'] = []\n session['new_commands'] = []\n session['previous_code'] = None\n session['identical_consecutive_errors'] = 0\n session['consecutive_errors'] = 0\n if not achievements_data:\n achievements_data = {}\n if 'achieved' in achievements_data:\n session['achieved'] = achievements_data['achieved']\n else:\n session['achieved'] = []\n if 'commands' in achievements_data:\n session['commands'] = achievements_data['commands']\n else:\n session['commands'] = []\n if 'run_programs' in achievements_data:\n session['run_programs'] = achievements_data['run_programs']\n else:\n session['run_programs'] = 0\n if 'saved_programs' in achievements_data:\n session['saved_programs'] = achievements_data['saved_programs']\n else:\n session['saved_programs'] = 0\n if 'submitted_programs' in achievements_data:\n session['submitted_programs'] = achievements_data['submitted_programs']\n else:\n session['submitted_programs'] = 0\n\n def routes(self, app, database):\n global DATABASE\n DATABASE = database\n\n @app.route('/achievements', methods=['POST'])\n @requires_login\n def push_new_achievement(user):\n body = request.json\n if \"achievement\" in body:\n self.initialize_user_data_if_necessary()\n if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']):\n return jsonify({\"achievements\": self.verify_pushed_achievement(user.get('username'), body['achievement'])})\n return jsonify({})\n\n def increase_count(self, category):\n self.initialize_user_data_if_necessary()\n if category == \"run\":\n session['run_programs'] += 1\n elif category == \"saved\":\n session['saved_programs'] += 1\n elif category == \"submitted\":\n session['submitted_programs'] += 1\n\n def add_single_achievement(self, username, achievement):\n self.initialize_user_data_if_necessary()\n if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']):\n return self.verify_pushed_achievement(username, achievement)\n else:\n return None\n\n def verify_run_achievements(self, username, code=None, level=None, response=None):\n self.initialize_user_data_if_necessary()\n self.check_programs_run()\n if code and level:\n self.check_code_achievements(code, level)\n if code and response:\n self.check_response_achievements(code, response)\n\n if len(session['new_commands']) > 0:\n for command in session['new_commands']:\n session['commands'].append(command)\n session['new_commands'] = []\n self.DATABASE.add_commands_to_username(username, session['commands'])\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_save_achievements(self, username, adventure=None):\n self.initialize_user_data_if_necessary()\n self.check_programs_saved()\n if adventure and 'adventure_is_worthwhile' not in session['achieved']:\n session['new_achieved'].append(\"adventure_is_worthwhile\")\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_submit_achievements(self, username):\n self.initialize_user_data_if_necessary()\n self.check_programs_submitted()\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_pushed_achievement(self, username, achievement):\n self.initialize_user_data_if_necessary()\n session['new_achieved'] = [achievement]\n self.DATABASE.add_achievement_to_username(username, achievement)\n session['achieved'].append(achievement)\n return self.get_earned_achievements()\n\n def get_earned_achievements(self):\n self.initialize_user_data_if_necessary()\n translations = self.TRANSLATIONS.get_translations(session['lang'])\n translated_achievements = []\n for achievement in session['new_achieved']:\n translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']])\n session['new_achieved'] = [] #Once we get earned achievements -> empty the array with \"waiting\" ones\n session['new_commands'] = []\n return translated_achievements\n\n def check_programs_run(self):\n self.initialize_user_data_if_necessary()\n if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1:\n session['new_achieved'].append(\"getting_started_I\")\n if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10:\n session['new_achieved'].append(\"getting_started_II\")\n if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50:\n session['new_achieved'].append(\"getting_started_III\")\n if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200:\n session['new_achieved'].append(\"getting_started_IV\")\n if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500:\n session['new_achieved'].append(\"getting_started_V\")\n\n def check_programs_saved(self):\n self.initialize_user_data_if_necessary()\n if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1:\n session['new_achieved'].append(\"one_to_remember_I\")\n if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5:\n session['new_achieved'].append(\"one_to_remember_II\")\n if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10:\n session['new_achieved'].append(\"one_to_remember_III\")\n if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25:\n session['new_achieved'].append(\"one_to_remember_IV\")\n if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50:\n session['new_achieved'].append(\"one_to_remember_V\")\n\n def check_programs_submitted(self):\n self.initialize_user_data_if_necessary()\n if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1:\n session['new_achieved'].append(\"deadline_daredevil_I\")\n if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3:\n session['new_achieved'].append(\"deadline_daredevil_II\")\n if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10:\n session['new_achieved'].append(\"deadline_daredevil_III\")\n\n def check_code_achievements(self, code, level):\n self.initialize_user_data_if_necessary()\n commands_in_code = hedy.all_commands(code, level, session['lang'])\n if 'trying_is_key' not in session['achieved']:\n for command in set(commands_in_code):\n if command not in session['commands'] and command not in session['new_commands']:\n session['new_commands'].append(command)\n if set(session['commands']).union(session['new_commands']) == self.all_commands:\n session['new_achieved'].append(\"trying_is_key\")\n if 'did_you_say_please' not in session['achieved'] and \"ask\" in hedy.all_commands(code, level, session['lang']):\n session['new_achieved'].append(\"did_you_say_please\")\n if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count(\"ask\") >= 5:\n session['new_achieved'].append(\"talk-talk-talk\")\n if 'hedy_honor' not in session['achieved'] and \"Hedy\" in code:\n session['new_achieved'].append(\"hedy_honor\")\n if 'hedy-ious' not in session['achieved']:\n all_print_arguments = hedy.all_print_arguments(code, level, session['lang'])\n for argument in all_print_arguments:\n if all_print_arguments.count(argument) >= 10:\n session['new_achieved'].append(\"hedy-ious\")\n break\n\n def check_response_achievements(self, code, response):\n self.initialize_user_data_if_necessary()\n if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']:\n session['new_achieved'].append(\"ninja_turtle\")\n if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']:\n session['new_achieved'].append(\"watch_out\")\n if 'Error' in response and response['Error']:\n session['consecutive_errors'] += 1\n if session['previous_code'] == code:\n if session['identical_consecutive_errors'] == 0:\n session['identical_consecutive_errors'] += 2 #We have to count the first one too!\n else:\n session['identical_consecutive_errors'] += 1\n if session['identical_consecutive_errors'] >= 3:\n if 'programming_panic' not in session['achieved']:\n session['new_achieved'].append(\"programming_panic\")\n session['previous_code'] = code\n else:\n if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1:\n session['new_achieved'].append(\"programming_protagonist\")\n session['consecutive_errors'] = 0\n session['identical_consecutive_errors'] = 0\n\n\n\n", "path": "website/achievements.py"}], "after_files": [{"content": "from website import database\nfrom hedyweb import AchievementTranslations\nfrom website.auth import requires_login, current_user\nfrom flask import request, jsonify, session\nimport hedy\n\n\nclass Achievements:\n\n def __init__(self):\n self.DATABASE = database.Database()\n self.TRANSLATIONS = AchievementTranslations()\n self.all_commands = self.get_all_commands()\n\n def get_all_commands(self):\n commands = []\n for i in range(1, hedy.HEDY_MAX_LEVEL+1):\n for command in hedy.commands_per_level.get(i):\n commands.append(command)\n return set(commands)\n\n def initialize_user_data_if_necessary(self):\n if 'achieved' not in session:\n achievements_data = self.DATABASE.progress_by_username(current_user()['username'])\n session['new_achieved'] = []\n session['new_commands'] = []\n session['previous_code'] = None\n session['identical_consecutive_errors'] = 0\n session['consecutive_errors'] = 0\n if not achievements_data:\n achievements_data = {}\n if 'achieved' in achievements_data:\n session['achieved'] = achievements_data['achieved']\n else:\n session['achieved'] = []\n if 'commands' in achievements_data:\n session['commands'] = achievements_data['commands']\n else:\n session['commands'] = []\n if 'run_programs' in achievements_data:\n session['run_programs'] = achievements_data['run_programs']\n else:\n session['run_programs'] = 0\n if 'saved_programs' in achievements_data:\n session['saved_programs'] = achievements_data['saved_programs']\n else:\n session['saved_programs'] = 0\n if 'submitted_programs' in achievements_data:\n session['submitted_programs'] = achievements_data['submitted_programs']\n else:\n session['submitted_programs'] = 0\n\n def routes(self, app, database):\n global DATABASE\n DATABASE = database\n\n @app.route('/achievements', methods=['POST'])\n @requires_login\n def push_new_achievement(user):\n body = request.json\n if \"achievement\" in body:\n self.initialize_user_data_if_necessary()\n if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']):\n return jsonify({\"achievements\": self.verify_pushed_achievement(user.get('username'), body['achievement'])})\n return jsonify({})\n\n def increase_count(self, category):\n self.initialize_user_data_if_necessary()\n if category == \"run\":\n session['run_programs'] += 1\n elif category == \"saved\":\n session['saved_programs'] += 1\n elif category == \"submitted\":\n session['submitted_programs'] += 1\n\n def add_single_achievement(self, username, achievement):\n self.initialize_user_data_if_necessary()\n if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']):\n return self.verify_pushed_achievement(username, achievement)\n else:\n return None\n\n def verify_run_achievements(self, username, code=None, level=None, response=None):\n self.initialize_user_data_if_necessary()\n self.check_programs_run()\n if code and level:\n self.check_code_achievements(code, level)\n if code and response:\n self.check_response_achievements(code, response)\n\n if len(session['new_commands']) > 0:\n for command in session['new_commands']:\n session['commands'].append(command)\n session['new_commands'] = []\n self.DATABASE.add_commands_to_username(username, session['commands'])\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_save_achievements(self, username, adventure=None):\n self.initialize_user_data_if_necessary()\n self.check_programs_saved()\n if adventure and 'adventure_is_worthwhile' not in session['achieved']:\n session['new_achieved'].append(\"adventure_is_worthwhile\")\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_submit_achievements(self, username):\n self.initialize_user_data_if_necessary()\n self.check_programs_submitted()\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_pushed_achievement(self, username, achievement):\n self.initialize_user_data_if_necessary()\n session['new_achieved'] = [achievement]\n self.DATABASE.add_achievement_to_username(username, achievement)\n session['achieved'].append(achievement)\n return self.get_earned_achievements()\n\n def get_earned_achievements(self):\n self.initialize_user_data_if_necessary()\n translations = self.TRANSLATIONS.get_translations(session['lang'])\n translated_achievements = []\n for achievement in session['new_achieved']:\n translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']])\n session['new_achieved'] = [] #Once we get earned achievements -> empty the array with \"waiting\" ones\n session['new_commands'] = []\n return translated_achievements\n\n def check_programs_run(self):\n self.initialize_user_data_if_necessary()\n if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1:\n session['new_achieved'].append(\"getting_started_I\")\n if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10:\n session['new_achieved'].append(\"getting_started_II\")\n if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50:\n session['new_achieved'].append(\"getting_started_III\")\n if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200:\n session['new_achieved'].append(\"getting_started_IV\")\n if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500:\n session['new_achieved'].append(\"getting_started_V\")\n\n def check_programs_saved(self):\n self.initialize_user_data_if_necessary()\n if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1:\n session['new_achieved'].append(\"one_to_remember_I\")\n if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5:\n session['new_achieved'].append(\"one_to_remember_II\")\n if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10:\n session['new_achieved'].append(\"one_to_remember_III\")\n if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25:\n session['new_achieved'].append(\"one_to_remember_IV\")\n if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50:\n session['new_achieved'].append(\"one_to_remember_V\")\n\n def check_programs_submitted(self):\n self.initialize_user_data_if_necessary()\n if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1:\n session['new_achieved'].append(\"deadline_daredevil_I\")\n if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3:\n session['new_achieved'].append(\"deadline_daredevil_II\")\n if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10:\n session['new_achieved'].append(\"deadline_daredevil_III\")\n\n def check_code_achievements(self, code, level):\n self.initialize_user_data_if_necessary()\n commands_in_code = hedy.all_commands(code, level, session['lang'])\n if 'trying_is_key' not in session['achieved']:\n for command in set(commands_in_code):\n if command not in session['commands'] and command not in session['new_commands']:\n session['new_commands'].append(command)\n if set(session['commands']).union(session['new_commands']) == self.all_commands:\n session['new_achieved'].append(\"trying_is_key\")\n if 'did_you_say_please' not in session['achieved'] and \"ask\" in hedy.all_commands(code, level, session['lang']):\n session['new_achieved'].append(\"did_you_say_please\")\n if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count(\"ask\") >= 5:\n session['new_achieved'].append(\"talk-talk-talk\")\n if 'hedy_honor' not in session['achieved'] and \"Hedy\" in code:\n session['new_achieved'].append(\"hedy_honor\")\n if 'hedy-ious' not in session['achieved']:\n all_print_arguments = hedy.all_print_arguments(code, level, session['lang'])\n for argument in all_print_arguments:\n if all_print_arguments.count(argument) >= 10:\n session['new_achieved'].append(\"hedy-ious\")\n break\n\n def check_response_achievements(self, code, response):\n self.initialize_user_data_if_necessary()\n if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']:\n session['new_achieved'].append(\"ninja_turtle\")\n if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']:\n session['new_achieved'].append(\"watch_out\")\n if 'Error' in response and response['Error']:\n session['consecutive_errors'] += 1\n if session['previous_code'] == code:\n if session['identical_consecutive_errors'] == 0:\n session['identical_consecutive_errors'] += 2 #We have to count the first one too!\n else:\n session['identical_consecutive_errors'] += 1\n if session['identical_consecutive_errors'] >= 3:\n if 'programming_panic' not in session['achieved']:\n session['new_achieved'].append(\"programming_panic\")\n session['previous_code'] = code\n else:\n if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1:\n session['new_achieved'].append(\"programming_protagonist\")\n session['consecutive_errors'] = 0\n session['identical_consecutive_errors'] = 0\n\n\n\n", "path": "website/achievements.py"}]}
| 3,619 | 240 |
gh_patches_debug_17840
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleSpeech-1354
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The version between the setup.py and __init__.py is not synchronized.
The version in setup.py is 0.1.1, but the version in __init.py is 0.1.0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import contextlib
15 import inspect
16 import io
17 import os
18 import subprocess as sp
19 import sys
20 from pathlib import Path
21
22 from setuptools import Command
23 from setuptools import find_packages
24 from setuptools import setup
25 from setuptools.command.develop import develop
26 from setuptools.command.install import install
27
28 HERE = Path(os.path.abspath(os.path.dirname(__file__)))
29
30 requirements = {
31 "install": [
32 "editdistance",
33 "g2p_en",
34 "g2pM",
35 "h5py",
36 "inflect",
37 "jieba",
38 "jsonlines",
39 "kaldiio",
40 "librosa",
41 "loguru",
42 "matplotlib",
43 "nara_wpe",
44 "pandas",
45 "paddleaudio",
46 "paddlenlp",
47 "paddlespeech_feat",
48 "praatio==5.0.0",
49 "pypinyin",
50 "python-dateutil",
51 "pyworld",
52 "resampy==0.2.2",
53 "sacrebleu",
54 "scipy",
55 "sentencepiece~=0.1.96",
56 "soundfile~=0.10",
57 "textgrid",
58 "timer",
59 "tqdm",
60 "typeguard",
61 "visualdl",
62 "webrtcvad",
63 "yacs~=0.1.8",
64 ],
65 "develop": [
66 "ConfigArgParse",
67 "coverage",
68 "gpustat",
69 "paddlespeech_ctcdecoders",
70 "phkit",
71 "Pillow",
72 "pybind11",
73 "pypi-kenlm",
74 "snakeviz",
75 "sox",
76 "soxbindings",
77 "unidecode",
78 "yq",
79 "pre-commit",
80 "zhon",
81 ]
82 }
83
84
85 @contextlib.contextmanager
86 def pushd(new_dir):
87 old_dir = os.getcwd()
88 os.chdir(new_dir)
89 print(new_dir)
90 yield
91 os.chdir(old_dir)
92 print(old_dir)
93
94
95 def read(*names, **kwargs):
96 with io.open(
97 os.path.join(os.path.dirname(__file__), *names),
98 encoding=kwargs.get("encoding", "utf8")) as fp:
99 return fp.read()
100
101
102 def check_call(cmd: str, shell=False, executable=None):
103 try:
104 sp.check_call(
105 cmd.split(),
106 shell=shell,
107 executable="/bin/bash" if shell else executable)
108 except sp.CalledProcessError as e:
109 print(
110 f"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:",
111 e.output,
112 file=sys.stderr)
113 raise e
114
115
116 def _remove(files: str):
117 for f in files:
118 f.unlink()
119
120
121 def _post_install(install_lib_dir):
122 # tools/make
123 tool_dir = HERE / "tools"
124 _remove(tool_dir.glob("*.done"))
125 with pushd(tool_dir):
126 check_call("make")
127 print("tools install.")
128
129 # ctcdecoder
130 ctcdecoder_dir = HERE / 'third_party/ctc_decoders'
131 with pushd(ctcdecoder_dir):
132 check_call("bash -e setup.sh")
133 print("ctcdecoder install.")
134
135
136 class DevelopCommand(develop):
137 def run(self):
138 develop.run(self)
139 # must after develop.run, or pkg install by shell will not see
140 self.execute(_post_install, (self.install_lib, ), msg="Post Install...")
141
142
143 class InstallCommand(install):
144 def run(self):
145 install.run(self)
146
147
148 # cmd: python setup.py upload
149 class UploadCommand(Command):
150 description = "Build and publish the package."
151 user_options = []
152
153 def initialize_options(self):
154 pass
155
156 def finalize_options(self):
157 pass
158
159 def run(self):
160 try:
161 print("Removing previous dist/ ...")
162 shutil.rmtree(str(HERE / "dist"))
163 except OSError:
164 pass
165 print("Building source distribution...")
166 sp.check_call([sys.executable, "setup.py", "sdist"])
167 print("Uploading package to PyPi...")
168 sp.check_call(["twine", "upload", "dist/*"])
169 sys.exit()
170
171
172 setup_info = dict(
173 # Metadata
174 name='paddlespeech',
175 version='0.1.1',
176 author='PaddlePaddle Speech and Language Team',
177 author_email='[email protected]',
178 url='https://github.com/PaddlePaddle/PaddleSpeech',
179 license='Apache 2.0',
180 description='Speech tools and models based on Paddlepaddle',
181 long_description=read("README.md"),
182 long_description_content_type="text/markdown",
183 keywords=[
184 "speech",
185 "asr",
186 "tts",
187 "speaker verfication",
188 "speech classfication",
189 "text frontend",
190 "MFA",
191 "paddlepaddle",
192 "beam search",
193 "ctcdecoder",
194 "deepspeech2",
195 "transformer",
196 "conformer",
197 "fastspeech",
198 "vocoder",
199 "pwgan",
200 "gan",
201 ],
202 python_requires='>=3.7',
203 install_requires=requirements["install"],
204 extras_require={
205 'develop':
206 requirements["develop"],
207 'doc': [
208 "sphinx", "sphinx-rtd-theme", "numpydoc", "myst_parser",
209 "recommonmark>=0.5.0", "sphinx-markdown-tables", "sphinx-autobuild"
210 ],
211 },
212 cmdclass={
213 'develop': DevelopCommand,
214 'install': InstallCommand,
215 'upload': UploadCommand,
216 },
217
218 # Package info
219 packages=find_packages(include=('paddlespeech*')),
220 zip_safe=True,
221 classifiers=[
222 'Development Status :: 5 - Production/Stable',
223 'Intended Audience :: Developers',
224 'Intended Audience :: Science/Research',
225 'Topic :: Scientific/Engineering :: Artificial Intelligence',
226 'License :: OSI Approved :: Apache Software License',
227 'Programming Language :: Python',
228 'Programming Language :: Python :: 3',
229 'Programming Language :: Python :: 3.7',
230 'Programming Language :: Python :: 3.8',
231 'Programming Language :: Python :: 3.9',
232 ],
233 entry_points={
234 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']
235 })
236
237 setup(**setup_info)
238
```
Path: `paddlespeech/__init__.py`
Content:
```
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 __version__ = '0.1.0'
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/paddlespeech/__init__.py b/paddlespeech/__init__.py
--- a/paddlespeech/__init__.py
+++ b/paddlespeech/__init__.py
@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-__version__ = '0.1.0'
+__version__ = '0.1.1'
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -17,6 +17,7 @@
import os
import subprocess as sp
import sys
+import paddlespeech
from pathlib import Path
from setuptools import Command
@@ -172,7 +173,7 @@
setup_info = dict(
# Metadata
name='paddlespeech',
- version='0.1.1',
+ version=paddlespeech.__version__,
author='PaddlePaddle Speech and Language Team',
author_email='[email protected]',
url='https://github.com/PaddlePaddle/PaddleSpeech',
|
{"golden_diff": "diff --git a/paddlespeech/__init__.py b/paddlespeech/__init__.py\n--- a/paddlespeech/__init__.py\n+++ b/paddlespeech/__init__.py\n@@ -12,4 +12,4 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-__version__ = '0.1.0'\n+__version__ = '0.1.1'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,6 +17,7 @@\n import os\n import subprocess as sp\n import sys\n+import paddlespeech\n from pathlib import Path\n \n from setuptools import Command\n@@ -172,7 +173,7 @@\n setup_info = dict(\n # Metadata\n name='paddlespeech',\n- version='0.1.1',\n+ version=paddlespeech.__version__,\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n", "issue": "The version between the setup.py and __init__.py is not synchronized.\nThe version in setup.py is 0.1.1, but the version in __init.py is 0.1.0.\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n \"zhon\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'third_party/ctc_decoders'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version='0.1.1',\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n", "path": "setup.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version__ = '0.1.0'\n", "path": "paddlespeech/__init__.py"}], "after_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nimport paddlespeech\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n \"zhon\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'third_party/ctc_decoders'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version=paddlespeech.__version__,\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n", "path": "setup.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version__ = '0.1.1'\n", "path": "paddlespeech/__init__.py"}]}
| 2,632 | 249 |
gh_patches_debug_8367
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-928
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use 500 page for true application errors
A clear and concise description of the task.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [ ] Use 500 / service is down for true application errors, EV server offline, Auth provider offline, etc.
## Additional context
<!-- Add any other context about the task here -->
Related to https://github.com/cal-itp/benefits/issues/913 https://github.com/cal-itp/benefits/issues/757 #914
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/viewmodels.py`
Content:
```
1 """
2 The core application: view model definitions for the root of the webapp.
3 """
4 from django.utils.translation import pgettext, gettext_lazy as _
5 from django.urls import reverse
6
7 from benefits.core import models
8
9 from . import session
10
11
12 class Button:
13 """
14 Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):
15 * classes: str, str[]
16 * id: str
17 * fallback_text: str
18 * label: str
19 * text: str
20 * url: str
21 * target: str
22 * rel: str
23 """
24
25 def __init__(self, **kwargs):
26 classes = kwargs.get("classes", [])
27 if isinstance(classes, str):
28 classes = classes.split()
29
30 self.classes = ["btn", "btn-lg"]
31 self.classes.extend(classes)
32 self.id = kwargs.get("id")
33 self.fallback_text = kwargs.get("fallback_text")
34 self.label = kwargs.get("label")
35 self.text = kwargs.get("text", "Button")
36 self.url = kwargs.get("url")
37 self.target = kwargs.get("target")
38 self.rel = kwargs.get("rel")
39
40 @staticmethod
41 def agency_contact_links(agency):
42 """Create link buttons for agency contact information."""
43 return [
44 Button.link(classes="agency", label=agency.long_name, text=agency.phone, url=f"tel:{agency.phone}"),
45 Button.link(
46 classes="agency", text=agency.info_url, url=agency.info_url, target="_blank", rel="noopener noreferrer"
47 ),
48 ]
49
50 @staticmethod
51 def home(request, text=None):
52 """Create a button back to this session's origin."""
53 if text is None:
54 text = _("core.buttons.return_home")
55
56 return Button.primary(text=text, url=session.origin(request))
57
58 @staticmethod
59 def link(**kwargs):
60 classes = kwargs.pop("classes", [])
61 if isinstance(classes, str):
62 classes = classes.split(" ")
63 classes.insert(0, "btn-link")
64 return Button(classes=classes, **kwargs)
65
66 @staticmethod
67 def primary(**kwargs):
68 classes = kwargs.pop("classes", [])
69 if isinstance(classes, str):
70 classes = classes.split(" ")
71 classes.insert(0, "btn-primary")
72 return Button(classes=classes, **kwargs)
73
74 @staticmethod
75 def outline_primary(**kwargs):
76 classes = kwargs.pop("classes", [])
77 if isinstance(classes, str):
78 classes = classes.split(" ")
79 classes.insert(0, "btn-outline-primary")
80 return Button(classes=classes, **kwargs)
81
82 @staticmethod
83 def login(**kwargs):
84 """Create a login.gov button, with a login.gov logo and fallback text"""
85 btn = Button.primary(fallback_text="Login.gov", id="login", **kwargs)
86 return btn
87
88 @staticmethod
89 def logout(**kwargs):
90 """Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text"""
91 btn = Button.primary(fallback_text="Login.gov", id="login", url=reverse("oauth:logout"), text="", **kwargs)
92 return btn
93
94
95 class Icon:
96 """Represents an icon."""
97
98 def __init__(self, icon, alt):
99 self.src = f"img/icon/{icon}.svg"
100 self.alt = alt
101
102
103 class Page:
104 """
105 Represents a page of content:
106 * title: str
107 * noimage: bool
108 * icon: core.viewmodels.Icon
109 * content_title: str
110 * paragraphs: str[]
111 * form: django.forms.Form
112 * forms: django.forms.Form[]
113 * button: core.viewmodels.Button
114 * buttons: core.viewmodels.Button[]
115 * classes: str[]
116 """
117
118 def __init__(self, **kwargs):
119 self.title = kwargs.get("title")
120 if self.title is None:
121 self.title = _("core.pages.index.prefix")
122 else:
123 self.title = f"{_('core.pages.index.prefix')}: {self.title}"
124
125 self.noimage = kwargs.get("noimage", False)
126 self.icon = kwargs.get("icon")
127 self.content_title = kwargs.get("content_title")
128 self.paragraphs = kwargs.get("paragraphs", [])
129 self.steps = kwargs.get("steps")
130
131 self.forms = kwargs.get("forms", [])
132 if not isinstance(self.forms, list):
133 self.forms = [self.forms]
134 if "form" in kwargs:
135 self.forms.append(kwargs.get("form"))
136
137 self.buttons = kwargs.get("buttons", [])
138 if not isinstance(self.buttons, list):
139 self.buttons = [self.buttons]
140 if "button" in kwargs:
141 self.buttons.append(kwargs.get("button"))
142
143 self.classes = kwargs.get("classes", [])
144 if not isinstance(self.classes, list):
145 self.classes = self.classes.split(" ")
146 if not self.noimage:
147 self.classes.append("with-image")
148
149 def context_dict(self):
150 """Return a context dict for a Page."""
151 return {"page": self}
152
153
154 class ErrorPage(Page):
155 """
156 Represents an error page:
157 * title: str
158 * icon: core.viewmodels.Icon
159 * content_title: str
160 * paragraphs: str[]
161 * button: core.viewmodels.Button
162 """
163
164 def __init__(self, **kwargs):
165 super().__init__(
166 title=kwargs.get("title", _("core.pages.error.title")),
167 icon=kwargs.get("icon", Icon("sadbus", pgettext("image alt text", "core.icons.sadbus"))),
168 content_title=kwargs.get("content_title", _("core.pages.error.title")),
169 paragraphs=kwargs.get("paragraphs", [_("core.pages.server_error.content_title")]),
170 button=kwargs.get("button"),
171 noimage=True,
172 )
173
174 @staticmethod
175 def user_error(
176 title=_("core.pages.user_error.title"),
177 content_title=_("core.pages.user_error.content_title"),
178 paragraphs=[_("core.pages.user_error.p[0]")],
179 **kwargs,
180 ):
181 """Create a new core.viewmodels.ErrorPage instance with defaults for a user error."""
182 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)
183
184 @staticmethod
185 def server_error(
186 title=_("core.pages.server_error.title"),
187 content_title=_("core.pages.server_error.title"),
188 paragraphs=[_("core.pages.server_error.p[0]"), _("core.pages.server_error.p[1]")],
189 **kwargs,
190 ):
191 """Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error."""
192 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)
193
194 @staticmethod
195 def not_found(
196 title=_("core.pages.not_found.title"),
197 content_title=_("core.pages.not_found.content_title"),
198 paragraphs=[_("core.pages.not_found.p[0]")],
199 **kwargs,
200 ):
201 """Create a new core.viewmodels.ErrorPage with defaults for a 404."""
202 path = kwargs.pop("path", None)
203 if path and title:
204 title = f"{title}: {path}"
205 elif path and not title:
206 title = path
207 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)
208
209
210 class PaymentProcessor:
211 """
212 Represents a core.models.PaymentProcessor:
213 * model: core.models.PaymentProcessor
214 * access_token_url: str
215 * element_id: str
216 * color: str
217 * [name: str]
218 * [loading_text: str]
219 """
220
221 def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_("core.buttons.wait")):
222 if isinstance(model, models.PaymentProcessor):
223 self.access_token_url = access_token_url
224 self.element_id = element_id
225 self.color = color
226 self.name = name or model.name
227 self.loading_text = loading_text
228 self.card_tokenize_url = model.card_tokenize_url
229 self.card_tokenize_func = model.card_tokenize_func
230 self.card_tokenize_env = model.card_tokenize_env
231
232 def context_dict(self):
233 """Return a context dict for a PaymentProcessor."""
234 return {"payment_processor": self}
235
236
237 class TransitAgency:
238 """
239 Represents a core.models.TransitAgency:
240 * model: core.models.TransitAgency
241 """
242
243 def __init__(self, model):
244 if isinstance(model, models.TransitAgency):
245 self.slug = model.slug
246 self.short_name = model.short_name
247 self.long_name = model.long_name
248 self.agency_id = model.agency_id
249 self.merchant_id = model.merchant_id
250 self.info_url = model.info_url
251 self.phone = model.phone
252
253 def context_dict(self):
254 """Return a context dict for a TransitAgency."""
255 return {"agency": self}
256
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py
--- a/benefits/core/viewmodels.py
+++ b/benefits/core/viewmodels.py
@@ -185,7 +185,7 @@
def server_error(
title=_("core.pages.server_error.title"),
content_title=_("core.pages.server_error.title"),
- paragraphs=[_("core.pages.server_error.p[0]"), _("core.pages.server_error.p[1]")],
+ paragraphs=[_("core.pages.server_error.p[0]")],
**kwargs,
):
"""Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error."""
|
{"golden_diff": "diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py\n--- a/benefits/core/viewmodels.py\n+++ b/benefits/core/viewmodels.py\n@@ -185,7 +185,7 @@\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n- paragraphs=[_(\"core.pages.server_error.p[0]\"), _(\"core.pages.server_error.p[1]\")],\n+ paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n", "issue": "Use 500 page for true application errors\nA clear and concise description of the task.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] Use 500 / service is down for true application errors, EV server offline, Auth provider offline, etc.\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the task here -->\r\nRelated to https://github.com/cal-itp/benefits/issues/913 https://github.com/cal-itp/benefits/issues/757 #914 \n", "before_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * noimage: bool\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.noimage = kwargs.get(\"noimage\", False)\n self.icon = kwargs.get(\"icon\")\n self.content_title = kwargs.get(\"content_title\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n if not self.noimage:\n self.classes.append(\"with-image\")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n content_title=kwargs.get(\"content_title\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.content_title\")]),\n button=kwargs.get(\"button\"),\n noimage=True,\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n content_title=_(\"core.pages.user_error.content_title\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\"), _(\"core.pages.server_error.p[1]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n content_title=_(\"core.pages.not_found.content_title\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * noimage: bool\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.noimage = kwargs.get(\"noimage\", False)\n self.icon = kwargs.get(\"icon\")\n self.content_title = kwargs.get(\"content_title\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n if not self.noimage:\n self.classes.append(\"with-image\")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n content_title=kwargs.get(\"content_title\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.content_title\")]),\n button=kwargs.get(\"button\"),\n noimage=True,\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n content_title=_(\"core.pages.user_error.content_title\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n content_title=_(\"core.pages.not_found.content_title\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}]}
| 2,922 | 141 |
gh_patches_debug_36150
|
rasdani/github-patches
|
git_diff
|
prowler-cloud__prowler-2736
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ecr_repositories_scan_vulnerabilities_in_latest_image: Configure level
### New feature motivation
Hi, is it possible to configure the level from which the test shall fail?
AWS tags some findings as medium which I might want to ignore, but of course I don't want to mute critical findings for the image.
### Solution Proposed
none
### Describe alternatives you've considered
none
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py`
Content:
```
1 from prowler.lib.check.models import Check, Check_Report_AWS
2 from prowler.providers.aws.services.ecr.ecr_client import ecr_client
3
4
5 class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
6 def execute(self):
7 findings = []
8 for registry in ecr_client.registries.values():
9 for repository in registry.repositories:
10 # First check if the repository has images
11 if len(repository.images_details) > 0:
12 # We only want to check the latest image pushed
13 image = repository.images_details[-1]
14
15 report = Check_Report_AWS(self.metadata())
16 report.region = repository.region
17 report.resource_id = repository.name
18 report.resource_arn = repository.arn
19 report.resource_tags = repository.tags
20 report.status = "PASS"
21 report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings."
22 if not image.scan_findings_status:
23 report.status = "FAIL"
24 report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan."
25 elif image.scan_findings_status == "FAILED":
26 report.status = "FAIL"
27 report.status_extended = (
28 f"ECR repository {repository.name} with scan status FAILED."
29 )
30 elif image.scan_findings_status != "FAILED":
31 if image.scan_findings_severity_count and (
32 image.scan_findings_severity_count.critical
33 or image.scan_findings_severity_count.high
34 or image.scan_findings_severity_count.medium
35 ):
36 report.status = "FAIL"
37 report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}."
38
39 findings.append(report)
40
41 return findings
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py
--- a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py
+++ b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py
@@ -5,6 +5,12 @@
class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):
def execute(self):
findings = []
+
+ # Get minimun severity to report
+ minimum_severity = ecr_client.audit_config.get(
+ "ecr_repository_vulnerability_minimum_severity", "MEDIUM"
+ )
+
for registry in ecr_client.registries.values():
for repository in registry.repositories:
# First check if the repository has images
@@ -27,8 +33,23 @@
report.status_extended = (
f"ECR repository {repository.name} with scan status FAILED."
)
- elif image.scan_findings_status != "FAILED":
- if image.scan_findings_severity_count and (
+ elif (
+ image.scan_findings_status != "FAILED"
+ and image.scan_findings_severity_count
+ ):
+ if (
+ minimum_severity == "CRITICAL"
+ and image.scan_findings_severity_count.critical
+ ):
+ report.status = "FAIL"
+ report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}."
+ elif minimum_severity == "HIGH" and (
+ image.scan_findings_severity_count.critical
+ or image.scan_findings_severity_count.high
+ ):
+ report.status = "FAIL"
+ report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}."
+ elif minimum_severity == "MEDIUM" and (
image.scan_findings_severity_count.critical
or image.scan_findings_severity_count.high
or image.scan_findings_severity_count.medium
|
{"golden_diff": "diff --git a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py\n--- a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py\n+++ b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py\n@@ -5,6 +5,12 @@\n class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):\n def execute(self):\n findings = []\n+\n+ # Get minimun severity to report\n+ minimum_severity = ecr_client.audit_config.get(\n+ \"ecr_repository_vulnerability_minimum_severity\", \"MEDIUM\"\n+ )\n+\n for registry in ecr_client.registries.values():\n for repository in registry.repositories:\n # First check if the repository has images\n@@ -27,8 +33,23 @@\n report.status_extended = (\n f\"ECR repository {repository.name} with scan status FAILED.\"\n )\n- elif image.scan_findings_status != \"FAILED\":\n- if image.scan_findings_severity_count and (\n+ elif (\n+ image.scan_findings_status != \"FAILED\"\n+ and image.scan_findings_severity_count\n+ ):\n+ if (\n+ minimum_severity == \"CRITICAL\"\n+ and image.scan_findings_severity_count.critical\n+ ):\n+ report.status = \"FAIL\"\n+ report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}.\"\n+ elif minimum_severity == \"HIGH\" and (\n+ image.scan_findings_severity_count.critical\n+ or image.scan_findings_severity_count.high\n+ ):\n+ report.status = \"FAIL\"\n+ report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}.\"\n+ elif minimum_severity == \"MEDIUM\" and (\n image.scan_findings_severity_count.critical\n or image.scan_findings_severity_count.high\n or image.scan_findings_severity_count.medium\n", "issue": "ecr_repositories_scan_vulnerabilities_in_latest_image: Configure level\n### New feature motivation\n\nHi, is it possible to configure the level from which the test shall fail?\r\nAWS tags some findings as medium which I might want to ignore, but of course I don't want to mute critical findings for the image.\n\n### Solution Proposed\n\nnone\n\n### Describe alternatives you've considered\n\nnone\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.ecr.ecr_client import ecr_client\n\n\nclass ecr_repositories_scan_vulnerabilities_in_latest_image(Check):\n def execute(self):\n findings = []\n for registry in ecr_client.registries.values():\n for repository in registry.repositories:\n # First check if the repository has images\n if len(repository.images_details) > 0:\n # We only want to check the latest image pushed\n image = repository.images_details[-1]\n\n report = Check_Report_AWS(self.metadata())\n report.region = repository.region\n report.resource_id = repository.name\n report.resource_arn = repository.arn\n report.resource_tags = repository.tags\n report.status = \"PASS\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings.\"\n if not image.scan_findings_status:\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan.\"\n elif image.scan_findings_status == \"FAILED\":\n report.status = \"FAIL\"\n report.status_extended = (\n f\"ECR repository {repository.name} with scan status FAILED.\"\n )\n elif image.scan_findings_status != \"FAILED\":\n if image.scan_findings_severity_count and (\n image.scan_findings_severity_count.critical\n or image.scan_findings_severity_count.high\n or image.scan_findings_severity_count.medium\n ):\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}.\"\n\n findings.append(report)\n\n return findings\n", "path": "prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py"}], "after_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.ecr.ecr_client import ecr_client\n\n\nclass ecr_repositories_scan_vulnerabilities_in_latest_image(Check):\n def execute(self):\n findings = []\n\n # Get minimun severity to report\n minimum_severity = ecr_client.audit_config.get(\n \"ecr_repository_vulnerability_minimum_severity\", \"MEDIUM\"\n )\n\n for registry in ecr_client.registries.values():\n for repository in registry.repositories:\n # First check if the repository has images\n if len(repository.images_details) > 0:\n # We only want to check the latest image pushed\n image = repository.images_details[-1]\n\n report = Check_Report_AWS(self.metadata())\n report.region = repository.region\n report.resource_id = repository.name\n report.resource_arn = repository.arn\n report.resource_tags = repository.tags\n report.status = \"PASS\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings.\"\n if not image.scan_findings_status:\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan.\"\n elif image.scan_findings_status == \"FAILED\":\n report.status = \"FAIL\"\n report.status_extended = (\n f\"ECR repository {repository.name} with scan status FAILED.\"\n )\n elif (\n image.scan_findings_status != \"FAILED\"\n and image.scan_findings_severity_count\n ):\n if (\n minimum_severity == \"CRITICAL\"\n and image.scan_findings_severity_count.critical\n ):\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}.\"\n elif minimum_severity == \"HIGH\" and (\n image.scan_findings_severity_count.critical\n or image.scan_findings_severity_count.high\n ):\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}.\"\n elif minimum_severity == \"MEDIUM\" and (\n image.scan_findings_severity_count.critical\n or image.scan_findings_severity_count.high\n or image.scan_findings_severity_count.medium\n ):\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}.\"\n\n findings.append(report)\n\n return findings\n", "path": "prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py"}]}
| 861 | 559 |
gh_patches_debug_19484
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-contrib-310
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Resource must be immutable
The boto instrumentation changes a span's resource to set attributes, this goes against the spec.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Instrument `Boto`_ to trace service requests.
16
17 There are two options for instrumenting code. The first option is to use the
18 ``opentelemetry-instrument`` executable which will automatically
19 instrument your Boto client. The second is to programmatically enable
20 instrumentation via the following code:
21
22 .. _boto: https://pypi.org/project/boto/
23
24 Usage
25 -----
26
27 .. code:: python
28
29 from opentelemetry.instrumentation.boto import BotoInstrumentor
30 import boto
31
32
33 # Instrument Boto
34 BotoInstrumentor().instrument()
35
36 # This will create a span with Boto-specific attributes
37 ec2 = boto.ec2.connect_to_region("us-west-2")
38 ec2.get_all_instances()
39
40 API
41 ---
42 """
43
44 import logging
45 from inspect import currentframe
46
47 from boto.connection import AWSAuthConnection, AWSQueryConnection
48 from wrapt import wrap_function_wrapper
49
50 from opentelemetry.instrumentation.boto.version import __version__
51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
52 from opentelemetry.instrumentation.utils import unwrap
53 from opentelemetry.sdk.trace import Resource
54 from opentelemetry.trace import SpanKind, get_tracer
55
56 logger = logging.getLogger(__name__)
57
58 SERVICE_PARAMS_BLOCK_LIST = {"s3": ["params.Body"]}
59
60
61 def _get_instance_region_name(instance):
62 region = getattr(instance, "region", None)
63
64 if not region:
65 return None
66 if isinstance(region, str):
67 return region.split(":")[1]
68 return region.name
69
70
71 class BotoInstrumentor(BaseInstrumentor):
72 """A instrumentor for Boto
73
74 See `BaseInstrumentor`
75 """
76
77 def __init__(self):
78 super().__init__()
79 self._original_boto = None
80
81 def _instrument(self, **kwargs):
82 # AWSQueryConnection and AWSAuthConnection are two different classes
83 # called by different services for connection.
84 # For exemple EC2 uses AWSQueryConnection and S3 uses
85 # AWSAuthConnection
86
87 # pylint: disable=attribute-defined-outside-init
88 self._tracer = get_tracer(
89 __name__, __version__, kwargs.get("tracer_provider")
90 )
91
92 wrap_function_wrapper(
93 "boto.connection",
94 "AWSQueryConnection.make_request",
95 self._patched_query_request,
96 )
97 wrap_function_wrapper(
98 "boto.connection",
99 "AWSAuthConnection.make_request",
100 self._patched_auth_request,
101 )
102
103 def _uninstrument(self, **kwargs):
104 unwrap(AWSQueryConnection, "make_request")
105 unwrap(AWSAuthConnection, "make_request")
106
107 def _common_request( # pylint: disable=too-many-locals
108 self,
109 args_name,
110 traced_args,
111 operation_name,
112 original_func,
113 instance,
114 args,
115 kwargs,
116 ):
117
118 endpoint_name = getattr(instance, "host").split(".")[0]
119
120 with self._tracer.start_as_current_span(
121 "{}.command".format(endpoint_name), kind=SpanKind.CONSUMER,
122 ) as span:
123 if args:
124 http_method = args[0]
125 span.resource = Resource(
126 attributes={
127 "endpoint": endpoint_name,
128 "http_method": http_method.lower(),
129 }
130 )
131 else:
132 span.resource = Resource(
133 attributes={"endpoint": endpoint_name}
134 )
135
136 # Original func returns a boto.connection.HTTPResponse object
137 result = original_func(*args, **kwargs)
138
139 if span.is_recording():
140 add_span_arg_tags(
141 span, endpoint_name, args, args_name, traced_args,
142 )
143
144 # Obtaining region name
145 region_name = _get_instance_region_name(instance)
146
147 meta = {
148 "aws.agent": "boto",
149 "aws.operation": operation_name,
150 }
151 if region_name:
152 meta["aws.region"] = region_name
153
154 for key, value in meta.items():
155 span.set_attribute(key, value)
156
157 span.set_attribute(
158 "http.status_code", getattr(result, "status")
159 )
160 span.set_attribute("http.method", getattr(result, "_method"))
161
162 return result
163
164 def _patched_query_request(self, original_func, instance, args, kwargs):
165
166 return self._common_request(
167 ("operation_name", "params", "path", "verb"),
168 ["operation_name", "params", "path"],
169 args[0] if args else None,
170 original_func,
171 instance,
172 args,
173 kwargs,
174 )
175
176 def _patched_auth_request(self, original_func, instance, args, kwargs):
177 operation_name = None
178
179 frame = currentframe().f_back
180 operation_name = None
181 while frame:
182 if frame.f_code.co_name == "make_request":
183 operation_name = frame.f_back.f_code.co_name
184 break
185 frame = frame.f_back
186
187 return self._common_request(
188 (
189 "method",
190 "path",
191 "headers",
192 "data",
193 "host",
194 "auth_path",
195 "sender",
196 ),
197 ["path", "data", "host"],
198 operation_name,
199 original_func,
200 instance,
201 args,
202 kwargs,
203 )
204
205
206 def flatten_dict(dict_, sep=".", prefix=""):
207 """
208 Returns a normalized dict of depth 1 with keys in order of embedding
209 """
210 # NOTE: This should probably be in `opentelemetry.instrumentation.utils`.
211 # adapted from https://stackoverflow.com/a/19647596
212 return (
213 {
214 prefix + sep + k if prefix else k: v
215 for kk, vv in dict_.items()
216 for k, v in flatten_dict(vv, sep, kk).items()
217 }
218 if isinstance(dict_, dict)
219 else {prefix: dict_}
220 )
221
222
223 def add_span_arg_tags(span, aws_service, args, args_names, args_traced):
224 def truncate_arg_value(value, max_len=1024):
225 """Truncate values which are bytes and greater than `max_len`.
226 Useful for parameters like "Body" in `put_object` operations.
227 """
228 if isinstance(value, bytes) and len(value) > max_len:
229 return b"..."
230
231 return value
232
233 if not span.is_recording():
234 return
235
236 # Do not trace `Key Management Service` or `Secure Token Service` API calls
237 # over concerns of security leaks.
238 if aws_service not in {"kms", "sts"}:
239 tags = dict(
240 (name, value)
241 for (name, value) in zip(args_names, args)
242 if name in args_traced
243 )
244 tags = flatten_dict(tags)
245
246 for param_key, value in tags.items():
247 if param_key in SERVICE_PARAMS_BLOCK_LIST.get(aws_service, {}):
248 continue
249
250 span.set_attribute(param_key, truncate_arg_value(value))
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py
@@ -120,18 +120,10 @@
with self._tracer.start_as_current_span(
"{}.command".format(endpoint_name), kind=SpanKind.CONSUMER,
) as span:
+ span.set_attribute("endpoint", endpoint_name)
if args:
http_method = args[0]
- span.resource = Resource(
- attributes={
- "endpoint": endpoint_name,
- "http_method": http_method.lower(),
- }
- )
- else:
- span.resource = Resource(
- attributes={"endpoint": endpoint_name}
- )
+ span.set_attribute("http_method", http_method.lower())
# Original func returns a boto.connection.HTTPResponse object
result = original_func(*args, **kwargs)
|
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py\n@@ -120,18 +120,10 @@\n with self._tracer.start_as_current_span(\n \"{}.command\".format(endpoint_name), kind=SpanKind.CONSUMER,\n ) as span:\n+ span.set_attribute(\"endpoint\", endpoint_name)\n if args:\n http_method = args[0]\n- span.resource = Resource(\n- attributes={\n- \"endpoint\": endpoint_name,\n- \"http_method\": http_method.lower(),\n- }\n- )\n- else:\n- span.resource = Resource(\n- attributes={\"endpoint\": endpoint_name}\n- )\n+ span.set_attribute(\"http_method\", http_method.lower())\n \n # Original func returns a boto.connection.HTTPResponse object\n result = original_func(*args, **kwargs)\n", "issue": "Resource must be immutable\nThe boto instrumentation changes a span's resource to set attributes, this goes against the spec.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInstrument `Boto`_ to trace service requests.\n\nThere are two options for instrumenting code. The first option is to use the\n``opentelemetry-instrument`` executable which will automatically\ninstrument your Boto client. The second is to programmatically enable\ninstrumentation via the following code:\n\n.. _boto: https://pypi.org/project/boto/\n\nUsage\n-----\n\n.. code:: python\n\n from opentelemetry.instrumentation.boto import BotoInstrumentor\n import boto\n\n\n # Instrument Boto\n BotoInstrumentor().instrument()\n\n # This will create a span with Boto-specific attributes\n ec2 = boto.ec2.connect_to_region(\"us-west-2\")\n ec2.get_all_instances()\n\nAPI\n---\n\"\"\"\n\nimport logging\nfrom inspect import currentframe\n\nfrom boto.connection import AWSAuthConnection, AWSQueryConnection\nfrom wrapt import wrap_function_wrapper\n\nfrom opentelemetry.instrumentation.boto.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.sdk.trace import Resource\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nSERVICE_PARAMS_BLOCK_LIST = {\"s3\": [\"params.Body\"]}\n\n\ndef _get_instance_region_name(instance):\n region = getattr(instance, \"region\", None)\n\n if not region:\n return None\n if isinstance(region, str):\n return region.split(\":\")[1]\n return region.name\n\n\nclass BotoInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for Boto\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_boto = None\n\n def _instrument(self, **kwargs):\n # AWSQueryConnection and AWSAuthConnection are two different classes\n # called by different services for connection.\n # For exemple EC2 uses AWSQueryConnection and S3 uses\n # AWSAuthConnection\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = get_tracer(\n __name__, __version__, kwargs.get(\"tracer_provider\")\n )\n\n wrap_function_wrapper(\n \"boto.connection\",\n \"AWSQueryConnection.make_request\",\n self._patched_query_request,\n )\n wrap_function_wrapper(\n \"boto.connection\",\n \"AWSAuthConnection.make_request\",\n self._patched_auth_request,\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(AWSQueryConnection, \"make_request\")\n unwrap(AWSAuthConnection, \"make_request\")\n\n def _common_request( # pylint: disable=too-many-locals\n self,\n args_name,\n traced_args,\n operation_name,\n original_func,\n instance,\n args,\n kwargs,\n ):\n\n endpoint_name = getattr(instance, \"host\").split(\".\")[0]\n\n with self._tracer.start_as_current_span(\n \"{}.command\".format(endpoint_name), kind=SpanKind.CONSUMER,\n ) as span:\n if args:\n http_method = args[0]\n span.resource = Resource(\n attributes={\n \"endpoint\": endpoint_name,\n \"http_method\": http_method.lower(),\n }\n )\n else:\n span.resource = Resource(\n attributes={\"endpoint\": endpoint_name}\n )\n\n # Original func returns a boto.connection.HTTPResponse object\n result = original_func(*args, **kwargs)\n\n if span.is_recording():\n add_span_arg_tags(\n span, endpoint_name, args, args_name, traced_args,\n )\n\n # Obtaining region name\n region_name = _get_instance_region_name(instance)\n\n meta = {\n \"aws.agent\": \"boto\",\n \"aws.operation\": operation_name,\n }\n if region_name:\n meta[\"aws.region\"] = region_name\n\n for key, value in meta.items():\n span.set_attribute(key, value)\n\n span.set_attribute(\n \"http.status_code\", getattr(result, \"status\")\n )\n span.set_attribute(\"http.method\", getattr(result, \"_method\"))\n\n return result\n\n def _patched_query_request(self, original_func, instance, args, kwargs):\n\n return self._common_request(\n (\"operation_name\", \"params\", \"path\", \"verb\"),\n [\"operation_name\", \"params\", \"path\"],\n args[0] if args else None,\n original_func,\n instance,\n args,\n kwargs,\n )\n\n def _patched_auth_request(self, original_func, instance, args, kwargs):\n operation_name = None\n\n frame = currentframe().f_back\n operation_name = None\n while frame:\n if frame.f_code.co_name == \"make_request\":\n operation_name = frame.f_back.f_code.co_name\n break\n frame = frame.f_back\n\n return self._common_request(\n (\n \"method\",\n \"path\",\n \"headers\",\n \"data\",\n \"host\",\n \"auth_path\",\n \"sender\",\n ),\n [\"path\", \"data\", \"host\"],\n operation_name,\n original_func,\n instance,\n args,\n kwargs,\n )\n\n\ndef flatten_dict(dict_, sep=\".\", prefix=\"\"):\n \"\"\"\n Returns a normalized dict of depth 1 with keys in order of embedding\n \"\"\"\n # NOTE: This should probably be in `opentelemetry.instrumentation.utils`.\n # adapted from https://stackoverflow.com/a/19647596\n return (\n {\n prefix + sep + k if prefix else k: v\n for kk, vv in dict_.items()\n for k, v in flatten_dict(vv, sep, kk).items()\n }\n if isinstance(dict_, dict)\n else {prefix: dict_}\n )\n\n\ndef add_span_arg_tags(span, aws_service, args, args_names, args_traced):\n def truncate_arg_value(value, max_len=1024):\n \"\"\"Truncate values which are bytes and greater than `max_len`.\n Useful for parameters like \"Body\" in `put_object` operations.\n \"\"\"\n if isinstance(value, bytes) and len(value) > max_len:\n return b\"...\"\n\n return value\n\n if not span.is_recording():\n return\n\n # Do not trace `Key Management Service` or `Secure Token Service` API calls\n # over concerns of security leaks.\n if aws_service not in {\"kms\", \"sts\"}:\n tags = dict(\n (name, value)\n for (name, value) in zip(args_names, args)\n if name in args_traced\n )\n tags = flatten_dict(tags)\n\n for param_key, value in tags.items():\n if param_key in SERVICE_PARAMS_BLOCK_LIST.get(aws_service, {}):\n continue\n\n span.set_attribute(param_key, truncate_arg_value(value))\n", "path": "instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInstrument `Boto`_ to trace service requests.\n\nThere are two options for instrumenting code. The first option is to use the\n``opentelemetry-instrument`` executable which will automatically\ninstrument your Boto client. The second is to programmatically enable\ninstrumentation via the following code:\n\n.. _boto: https://pypi.org/project/boto/\n\nUsage\n-----\n\n.. code:: python\n\n from opentelemetry.instrumentation.boto import BotoInstrumentor\n import boto\n\n\n # Instrument Boto\n BotoInstrumentor().instrument()\n\n # This will create a span with Boto-specific attributes\n ec2 = boto.ec2.connect_to_region(\"us-west-2\")\n ec2.get_all_instances()\n\nAPI\n---\n\"\"\"\n\nimport logging\nfrom inspect import currentframe\n\nfrom boto.connection import AWSAuthConnection, AWSQueryConnection\nfrom wrapt import wrap_function_wrapper\n\nfrom opentelemetry.instrumentation.boto.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.sdk.trace import Resource\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nSERVICE_PARAMS_BLOCK_LIST = {\"s3\": [\"params.Body\"]}\n\n\ndef _get_instance_region_name(instance):\n region = getattr(instance, \"region\", None)\n\n if not region:\n return None\n if isinstance(region, str):\n return region.split(\":\")[1]\n return region.name\n\n\nclass BotoInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for Boto\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_boto = None\n\n def _instrument(self, **kwargs):\n # AWSQueryConnection and AWSAuthConnection are two different classes\n # called by different services for connection.\n # For exemple EC2 uses AWSQueryConnection and S3 uses\n # AWSAuthConnection\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = get_tracer(\n __name__, __version__, kwargs.get(\"tracer_provider\")\n )\n\n wrap_function_wrapper(\n \"boto.connection\",\n \"AWSQueryConnection.make_request\",\n self._patched_query_request,\n )\n wrap_function_wrapper(\n \"boto.connection\",\n \"AWSAuthConnection.make_request\",\n self._patched_auth_request,\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(AWSQueryConnection, \"make_request\")\n unwrap(AWSAuthConnection, \"make_request\")\n\n def _common_request( # pylint: disable=too-many-locals\n self,\n args_name,\n traced_args,\n operation_name,\n original_func,\n instance,\n args,\n kwargs,\n ):\n\n endpoint_name = getattr(instance, \"host\").split(\".\")[0]\n\n with self._tracer.start_as_current_span(\n \"{}.command\".format(endpoint_name), kind=SpanKind.CONSUMER,\n ) as span:\n span.set_attribute(\"endpoint\", endpoint_name)\n if args:\n http_method = args[0]\n span.set_attribute(\"http_method\", http_method.lower())\n\n # Original func returns a boto.connection.HTTPResponse object\n result = original_func(*args, **kwargs)\n\n if span.is_recording():\n add_span_arg_tags(\n span, endpoint_name, args, args_name, traced_args,\n )\n\n # Obtaining region name\n region_name = _get_instance_region_name(instance)\n\n meta = {\n \"aws.agent\": \"boto\",\n \"aws.operation\": operation_name,\n }\n if region_name:\n meta[\"aws.region\"] = region_name\n\n for key, value in meta.items():\n span.set_attribute(key, value)\n\n span.set_attribute(\n \"http.status_code\", getattr(result, \"status\")\n )\n span.set_attribute(\"http.method\", getattr(result, \"_method\"))\n\n return result\n\n def _patched_query_request(self, original_func, instance, args, kwargs):\n\n return self._common_request(\n (\"operation_name\", \"params\", \"path\", \"verb\"),\n [\"operation_name\", \"params\", \"path\"],\n args[0] if args else None,\n original_func,\n instance,\n args,\n kwargs,\n )\n\n def _patched_auth_request(self, original_func, instance, args, kwargs):\n operation_name = None\n\n frame = currentframe().f_back\n operation_name = None\n while frame:\n if frame.f_code.co_name == \"make_request\":\n operation_name = frame.f_back.f_code.co_name\n break\n frame = frame.f_back\n\n return self._common_request(\n (\n \"method\",\n \"path\",\n \"headers\",\n \"data\",\n \"host\",\n \"auth_path\",\n \"sender\",\n ),\n [\"path\", \"data\", \"host\"],\n operation_name,\n original_func,\n instance,\n args,\n kwargs,\n )\n\n\ndef flatten_dict(dict_, sep=\".\", prefix=\"\"):\n \"\"\"\n Returns a normalized dict of depth 1 with keys in order of embedding\n \"\"\"\n # NOTE: This should probably be in `opentelemetry.instrumentation.utils`.\n # adapted from https://stackoverflow.com/a/19647596\n return (\n {\n prefix + sep + k if prefix else k: v\n for kk, vv in dict_.items()\n for k, v in flatten_dict(vv, sep, kk).items()\n }\n if isinstance(dict_, dict)\n else {prefix: dict_}\n )\n\n\ndef add_span_arg_tags(span, aws_service, args, args_names, args_traced):\n def truncate_arg_value(value, max_len=1024):\n \"\"\"Truncate values which are bytes and greater than `max_len`.\n Useful for parameters like \"Body\" in `put_object` operations.\n \"\"\"\n if isinstance(value, bytes) and len(value) > max_len:\n return b\"...\"\n\n return value\n\n if not span.is_recording():\n return\n\n # Do not trace `Key Management Service` or `Secure Token Service` API calls\n # over concerns of security leaks.\n if aws_service not in {\"kms\", \"sts\"}:\n tags = dict(\n (name, value)\n for (name, value) in zip(args_names, args)\n if name in args_traced\n )\n tags = flatten_dict(tags)\n\n for param_key, value in tags.items():\n if param_key in SERVICE_PARAMS_BLOCK_LIST.get(aws_service, {}):\n continue\n\n span.set_attribute(param_key, truncate_arg_value(value))\n", "path": "instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py"}]}
| 2,552 | 273 |
gh_patches_debug_13964
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-701
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Predict zero for nodata pixels on semantic segmentation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/task/semantic_segmentation.py`
Content:
```
1 from typing import List
2 import logging
3
4 import numpy as np
5
6 from .task import Task
7 from rastervision.core.box import Box
8 from rastervision.data.scene import Scene
9 from rastervision.data.label import SemanticSegmentationLabels
10
11 log = logging.getLogger(__name__)
12
13
14 def get_random_sample_train_windows(label_store, chip_size, class_map, extent,
15 chip_options, filter_windows):
16 prob = chip_options.negative_survival_probability
17 target_count_threshold = chip_options.target_count_threshold
18 target_classes = chip_options.target_classes
19 chips_per_scene = chip_options.chips_per_scene
20
21 if not target_classes:
22 all_class_ids = [item.id for item in class_map.get_items()]
23 target_classes = all_class_ids
24
25 windows = []
26 attempts = 0
27 while (attempts < chips_per_scene):
28 candidate_window = extent.make_random_square(chip_size)
29 if not filter_windows([candidate_window]):
30 continue
31 attempts = attempts + 1
32
33 if (prob >= 1.0):
34 windows.append(candidate_window)
35 elif attempts == chips_per_scene and len(windows) == 0:
36 windows.append(candidate_window)
37 else:
38 good = label_store.enough_target_pixels(
39 candidate_window, target_count_threshold, target_classes)
40 if good or (np.random.rand() < prob):
41 windows.append(candidate_window)
42
43 return windows
44
45
46 class SemanticSegmentation(Task):
47 """Task-derived type that implements the semantic segmentation task."""
48
49 def get_train_windows(self, scene: Scene) -> List[Box]:
50 """Get training windows covering a scene.
51
52 Args:
53 scene: The scene over-which windows are to be generated.
54
55 Returns:
56 A list of windows, list(Box)
57
58 """
59
60 def filter_windows(windows):
61 if scene.aoi_polygons:
62 windows = Box.filter_by_aoi(windows, scene.aoi_polygons)
63 return windows
64
65 raster_source = scene.raster_source
66 extent = raster_source.get_extent()
67 label_store = scene.ground_truth_label_source
68 chip_size = self.config.chip_size
69
70 chip_options = self.config.chip_options
71
72 if chip_options.window_method == 'random_sample':
73 return get_random_sample_train_windows(
74 label_store, chip_size, self.config.class_map, extent,
75 chip_options, filter_windows)
76 elif chip_options.window_method == 'sliding':
77 stride = chip_options.stride
78 if stride is None:
79 stride = chip_size / 2
80
81 return list(
82 filter_windows((extent.get_windows(chip_size, stride))))
83
84 def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:
85 """Get the training labels for the given window in the given scene.
86
87 Args:
88 window: The window over-which the labels are to be
89 retrieved.
90 scene: The scene from-which the window of labels is to be
91 extracted.
92
93 Returns:
94 An appropriately-shaped 2d np.ndarray with the labels
95 encoded as packed pixels.
96
97 """
98 label_store = scene.ground_truth_label_source
99 return label_store.get_labels(window)
100
101 def get_predict_windows(self, extent: Box) -> List[Box]:
102 """Get windows over-which predictions will be calculated.
103
104 Args:
105 extent: The overall extent of the area.
106
107 Returns:
108 An sequence of windows.
109
110 """
111 chip_size = self.config.chip_size
112 return extent.get_windows(chip_size, chip_size)
113
114 def post_process_predictions(self, labels, scene):
115 return labels
116
117 def save_debug_predict_image(self, scene, debug_dir_uri):
118 # TODO implement this
119 pass
120
121 def predict_scene(self, scene, tmp_dir):
122 """Predict on a single scene, and return the labels."""
123 log.info('Making predictions for scene')
124 raster_source = scene.raster_source
125 windows = self.get_predict_windows(raster_source.get_extent())
126
127 def label_fn(window):
128 chip = raster_source.get_chip(window)
129 if np.any(chip):
130 chip = raster_source.get_chip(window)
131 labels = self.backend.predict([chip], [window], tmp_dir)
132 label_arr = labels.get_label_arr(window)
133 else:
134 label_arr = np.zeros((window.get_height(), window.get_width()))
135 print('.', end='', flush=True)
136 return label_arr
137
138 return SemanticSegmentationLabels(windows, label_fn)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rastervision/task/semantic_segmentation.py b/rastervision/task/semantic_segmentation.py
--- a/rastervision/task/semantic_segmentation.py
+++ b/rastervision/task/semantic_segmentation.py
@@ -126,12 +126,12 @@
def label_fn(window):
chip = raster_source.get_chip(window)
- if np.any(chip):
- chip = raster_source.get_chip(window)
- labels = self.backend.predict([chip], [window], tmp_dir)
- label_arr = labels.get_label_arr(window)
- else:
- label_arr = np.zeros((window.get_height(), window.get_width()))
+ labels = self.backend.predict([chip], [window], tmp_dir)
+ label_arr = labels.get_label_arr(window)
+
+ # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)
+ label_arr[np.sum(chip, axis=2) == 0] = 0
+
print('.', end='', flush=True)
return label_arr
|
{"golden_diff": "diff --git a/rastervision/task/semantic_segmentation.py b/rastervision/task/semantic_segmentation.py\n--- a/rastervision/task/semantic_segmentation.py\n+++ b/rastervision/task/semantic_segmentation.py\n@@ -126,12 +126,12 @@\n \n def label_fn(window):\n chip = raster_source.get_chip(window)\n- if np.any(chip):\n- chip = raster_source.get_chip(window)\n- labels = self.backend.predict([chip], [window], tmp_dir)\n- label_arr = labels.get_label_arr(window)\n- else:\n- label_arr = np.zeros((window.get_height(), window.get_width()))\n+ labels = self.backend.predict([chip], [window], tmp_dir)\n+ label_arr = labels.get_label_arr(window)\n+\n+ # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)\n+ label_arr[np.sum(chip, axis=2) == 0] = 0\n+\n print('.', end='', flush=True)\n return label_arr\n", "issue": "Predict zero for nodata pixels on semantic segmentation\n\n", "before_files": [{"content": "from typing import List\nimport logging\n\nimport numpy as np\n\nfrom .task import Task\nfrom rastervision.core.box import Box\nfrom rastervision.data.scene import Scene\nfrom rastervision.data.label import SemanticSegmentationLabels\n\nlog = logging.getLogger(__name__)\n\n\ndef get_random_sample_train_windows(label_store, chip_size, class_map, extent,\n chip_options, filter_windows):\n prob = chip_options.negative_survival_probability\n target_count_threshold = chip_options.target_count_threshold\n target_classes = chip_options.target_classes\n chips_per_scene = chip_options.chips_per_scene\n\n if not target_classes:\n all_class_ids = [item.id for item in class_map.get_items()]\n target_classes = all_class_ids\n\n windows = []\n attempts = 0\n while (attempts < chips_per_scene):\n candidate_window = extent.make_random_square(chip_size)\n if not filter_windows([candidate_window]):\n continue\n attempts = attempts + 1\n\n if (prob >= 1.0):\n windows.append(candidate_window)\n elif attempts == chips_per_scene and len(windows) == 0:\n windows.append(candidate_window)\n else:\n good = label_store.enough_target_pixels(\n candidate_window, target_count_threshold, target_classes)\n if good or (np.random.rand() < prob):\n windows.append(candidate_window)\n\n return windows\n\n\nclass SemanticSegmentation(Task):\n \"\"\"Task-derived type that implements the semantic segmentation task.\"\"\"\n\n def get_train_windows(self, scene: Scene) -> List[Box]:\n \"\"\"Get training windows covering a scene.\n\n Args:\n scene: The scene over-which windows are to be generated.\n\n Returns:\n A list of windows, list(Box)\n\n \"\"\"\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\n\n def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:\n \"\"\"Get the training labels for the given window in the given scene.\n\n Args:\n window: The window over-which the labels are to be\n retrieved.\n scene: The scene from-which the window of labels is to be\n extracted.\n\n Returns:\n An appropriately-shaped 2d np.ndarray with the labels\n encoded as packed pixels.\n\n \"\"\"\n label_store = scene.ground_truth_label_source\n return label_store.get_labels(window)\n\n def get_predict_windows(self, extent: Box) -> List[Box]:\n \"\"\"Get windows over-which predictions will be calculated.\n\n Args:\n extent: The overall extent of the area.\n\n Returns:\n An sequence of windows.\n\n \"\"\"\n chip_size = self.config.chip_size\n return extent.get_windows(chip_size, chip_size)\n\n def post_process_predictions(self, labels, scene):\n return labels\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n\n def predict_scene(self, scene, tmp_dir):\n \"\"\"Predict on a single scene, and return the labels.\"\"\"\n log.info('Making predictions for scene')\n raster_source = scene.raster_source\n windows = self.get_predict_windows(raster_source.get_extent())\n\n def label_fn(window):\n chip = raster_source.get_chip(window)\n if np.any(chip):\n chip = raster_source.get_chip(window)\n labels = self.backend.predict([chip], [window], tmp_dir)\n label_arr = labels.get_label_arr(window)\n else:\n label_arr = np.zeros((window.get_height(), window.get_width()))\n print('.', end='', flush=True)\n return label_arr\n\n return SemanticSegmentationLabels(windows, label_fn)\n", "path": "rastervision/task/semantic_segmentation.py"}], "after_files": [{"content": "from typing import List\nimport logging\n\nimport numpy as np\n\nfrom .task import Task\nfrom rastervision.core.box import Box\nfrom rastervision.data.scene import Scene\nfrom rastervision.data.label import SemanticSegmentationLabels\n\nlog = logging.getLogger(__name__)\n\n\ndef get_random_sample_train_windows(label_store, chip_size, class_map, extent,\n chip_options, filter_windows):\n prob = chip_options.negative_survival_probability\n target_count_threshold = chip_options.target_count_threshold\n target_classes = chip_options.target_classes\n chips_per_scene = chip_options.chips_per_scene\n\n if not target_classes:\n all_class_ids = [item.id for item in class_map.get_items()]\n target_classes = all_class_ids\n\n windows = []\n attempts = 0\n while (attempts < chips_per_scene):\n candidate_window = extent.make_random_square(chip_size)\n if not filter_windows([candidate_window]):\n continue\n attempts = attempts + 1\n\n if (prob >= 1.0):\n windows.append(candidate_window)\n elif attempts == chips_per_scene and len(windows) == 0:\n windows.append(candidate_window)\n else:\n good = label_store.enough_target_pixels(\n candidate_window, target_count_threshold, target_classes)\n if good or (np.random.rand() < prob):\n windows.append(candidate_window)\n\n return windows\n\n\nclass SemanticSegmentation(Task):\n \"\"\"Task-derived type that implements the semantic segmentation task.\"\"\"\n\n def get_train_windows(self, scene: Scene) -> List[Box]:\n \"\"\"Get training windows covering a scene.\n\n Args:\n scene: The scene over-which windows are to be generated.\n\n Returns:\n A list of windows, list(Box)\n\n \"\"\"\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\n\n def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:\n \"\"\"Get the training labels for the given window in the given scene.\n\n Args:\n window: The window over-which the labels are to be\n retrieved.\n scene: The scene from-which the window of labels is to be\n extracted.\n\n Returns:\n An appropriately-shaped 2d np.ndarray with the labels\n encoded as packed pixels.\n\n \"\"\"\n label_store = scene.ground_truth_label_source\n return label_store.get_labels(window)\n\n def get_predict_windows(self, extent: Box) -> List[Box]:\n \"\"\"Get windows over-which predictions will be calculated.\n\n Args:\n extent: The overall extent of the area.\n\n Returns:\n An sequence of windows.\n\n \"\"\"\n chip_size = self.config.chip_size\n return extent.get_windows(chip_size, chip_size)\n\n def post_process_predictions(self, labels, scene):\n return labels\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n\n def predict_scene(self, scene, tmp_dir):\n \"\"\"Predict on a single scene, and return the labels.\"\"\"\n log.info('Making predictions for scene')\n raster_source = scene.raster_source\n windows = self.get_predict_windows(raster_source.get_extent())\n\n def label_fn(window):\n chip = raster_source.get_chip(window)\n labels = self.backend.predict([chip], [window], tmp_dir)\n label_arr = labels.get_label_arr(window)\n\n # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)\n label_arr[np.sum(chip, axis=2) == 0] = 0\n\n print('.', end='', flush=True)\n return label_arr\n\n return SemanticSegmentationLabels(windows, label_fn)\n", "path": "rastervision/task/semantic_segmentation.py"}]}
| 1,538 | 231 |
gh_patches_debug_38244
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-7820
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
genpy hook breaks win32com.client.gencache.EnsureDispatch()
## Description of the issue
The [runtime hook for genpy](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py) creates a temporary directory for the genpy cache and overwrites `win32com.__gen_path__` accordingly: https://github.com/pyinstaller/pyinstaller/blob/14c53a9d9f7b9322cfc8e18ae1c6e415230fba22/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py#L42
This creates a new cache such as:
C:\\Users\\<USER>\\AppData\\Local\\Temp\\**tmpytt5_e6s**\\gen_py (let's call this **hook cache**)
If a genpy cache already exists in:
C:\\Users\\<USER>\\AppData\\Local\\Temp\\gen_py (let's call this one **global cache**)
(as caused by running the python script directly, instead of the pyinstaller version)
the global cache is found by `EnsureDispatch()` such that no new cache will be generated in the hook cache.
This should be fine as long as the global cache contains all required python modules. However, if it does not, the win32com package tries to generate the missing modules:
This leads to the first exception shown below: `ModuleNotFoundError`, which is expected behaviour as far as I understand this mechanism.
But now, when handling this exception, since the hook has overwritten `win32com.__gen_path__`, generating the missing modules is attempted in the hook cache although the global cache is being used currently.
This leads to the second exception shown below: `No such file or directory`.
I'm not sure about the correct way to fix this. I can think of at least the following two ways:
1. Prevent the win32com package from discovering the global cache such that an entirely new cache is generated each time.
2. Modify the runtime hook such that the missing modules are generated in the currently used cache (i.e. the global cache if one exists and the hook cache if not global cache was found).
### Context information (for bug reports)
* Output of `pyinstaller --version`: ```5.0.dev0```
* Version of Python: 3.9.2
* Platform: Windows
* How you installed Python: python.org/downloads
* Did you also try this on another platform? Does it work there? No, since this is only applicable to Windows.
### A minimal example program which shows the error
```
from win32com.client import gencache
excel = gencache.EnsureDispatch("Excel.Application")
if not input(">"):
# this will generate a cache that is complete for this "if" branch, but incomplete for the "else" branch.
print(excel.Selection)
else:
print(excel.Selection)
print(excel.Selection.Font)
```
To reproduce the error:
1. Open any excel sheet
2. Make sure that no global cache exists (simply delete it, if it exists)
3. Create the executable using: `pyinstaller bug_demo.py`
4. Run `bug_demo.exe`. It will prompt for an input. This should work no matter if you take the "if" branch (simply press Enter) or the "else" branch (provide some actual input, e.g. "a").
5. Run `bug_demo.py` directly using the python interpreter and just press Enter when it asks for input. This will create a global cache, which is incomplete for the "else" branch.
6. Run `bug_demo.exe` again. This time, it will only work if you take the "if" branch (simply press Enter). If you take the "else" branch (provide some actual input, e.g. "a"), the error shown below should occur.
### Stacktrace / full error message
```
(venv) PS C:\Users\<SNIP>> .\dist\bug_demo\bug_demo.exe
a
None
Traceback (most recent call last):
File "win32com\client\gencache.py", line 233, in GetModuleForCLSID
ModuleNotFoundError: No module named 'win32com.gen_py.00020813-0000-0000-C000-000000000046x0x1x9.Font'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "bug_demo.py", line 9, in <module>
print(excel.Selection.Font)
File "win32com\client\__init__.py", line 474, in __getattr__
File "win32com\client\__init__.py", line 466, in _ApplyTypes_
File "win32com\client\__init__.py", line 486, in _get_good_object_
File "win32com\client\__init__.py", line 502, in _get_good_object_
File "win32com\client\__init__.py", line 491, in _get_good_single_object_
File "win32com\client\__init__.py", line 96, in Dispatch
File "win32com\client\__init__.py", line 37, in __WrapDispatch
File "win32com\client\gencache.py", line 180, in GetClassForCLSID
File "win32com\client\gencache.py", line 241, in GetModuleForCLSID
File "win32com\client\makepy.py", line 319, in GenerateChildFromTypeLibSpec
File "win32com\client\genpy.py", line 1042, in generate_child
File "win32com\client\genpy.py", line 779, in open_writer
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\<USER>\\AppData\\Local\\Temp\\tmpytt5_e6s\\gen_py\\00020813-0000-0000-C000-000000000046x0x1x9\\Font.py.18792.temp'
[18792] Failed to execute script bug_demo
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2023, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12 # The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the
13 # gencache code to use a suitable directory other than the default in lib\site-packages\win32com\client\gen_py.
14 # PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable
15 # directory like 'C:\Program Files. That's the reason for %temp% directory.
16 #
17 # http://www.py2exe.org/index.cgi/UsingEnsureDispatch
18
19
20 def _pyi_rthook():
21 import atexit
22 import os
23 import shutil
24 import tempfile
25
26 # Put gen_py cache in temp directory.
27 supportdir = tempfile.mkdtemp()
28 # gen_py has to be put into directory 'gen_py'.
29 genpydir = os.path.join(supportdir, 'gen_py')
30
31 # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file.
32 try:
33 # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory
34 # called 'gen_py'. This is the reason why to create this directory in supportdir'.
35 os.makedirs(genpydir)
36 # Remove temp directory at application exit and ignore any errors.
37 atexit.register(shutil.rmtree, supportdir, ignore_errors=True)
38 except OSError:
39 pass
40
41 # Override the default path to gen_py cache.
42 import win32com # noqa: E402
43
44 win32com.__gen_path__ = genpydir
45
46 # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test
47 # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created
48 # properly.
49 if hasattr(win32com, '__loader__'):
50 del win32com.__loader__
51
52 # Ensure genpydir is in 'gen_py' module paths.
53 import win32com.gen_py # noqa: E402
54
55 win32com.gen_py.__path__.insert(0, genpydir)
56
57
58 _pyi_rthook()
59 del _pyi_rthook
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py
--- a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py
+++ b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py
@@ -9,12 +9,12 @@
# SPDX-License-Identifier: Apache-2.0
#-----------------------------------------------------------------------------
-# The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the
-# gencache code to use a suitable directory other than the default in lib\site-packages\win32com\client\gen_py.
-# PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable
-# directory like 'C:\Program Files. That's the reason for %temp% directory.
-#
-# http://www.py2exe.org/index.cgi/UsingEnsureDispatch
+# Put the cache generated by `win32com.client.gencache` into isolated temporary directory. Historically, this was
+# required due to earlier versions of `pywin32` using the `site-packages\win32com\client\gen_py` directory for
+# the cache by default. Nowadays, the default location for the cache seems to be in the configured temporary directory
+# (pointed to by TEMP or TMP, for example %LOCALAPPDATA%\Temp), so strictly speaking, the relocation is not necessary
+# anymore. But for the time being, we are keeping it around to isolate the frozen application from the rest of the
+# system.
def _pyi_rthook():
@@ -23,36 +23,22 @@
import shutil
import tempfile
- # Put gen_py cache in temp directory.
+ import win32com
+
+ # Create temporary directory. The actual cache directory needs to be named `gen_py`, so create a sub-directory.
supportdir = tempfile.mkdtemp()
- # gen_py has to be put into directory 'gen_py'.
+
genpydir = os.path.join(supportdir, 'gen_py')
+ os.makedirs(genpydir, exist_ok=True)
- # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file.
- try:
- # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory
- # called 'gen_py'. This is the reason why to create this directory in supportdir'.
- os.makedirs(genpydir)
- # Remove temp directory at application exit and ignore any errors.
- atexit.register(shutil.rmtree, supportdir, ignore_errors=True)
- except OSError:
- pass
+ # Remove the teporary directory at application exit, ignoring errors.
+ atexit.register(shutil.rmtree, supportdir, ignore_errors=True)
# Override the default path to gen_py cache.
- import win32com # noqa: E402
-
win32com.__gen_path__ = genpydir
- # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test
- # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created
- # properly.
- if hasattr(win32com, '__loader__'):
- del win32com.__loader__
-
- # Ensure genpydir is in 'gen_py' module paths.
- import win32com.gen_py # noqa: E402
-
- win32com.gen_py.__path__.insert(0, genpydir)
+ # Override the sub-module paths for win32com.gen_py run-time sub-package.
+ win32com.gen_py.__path__ = [genpydir]
_pyi_rthook()
|
{"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py\n@@ -9,12 +9,12 @@\n # SPDX-License-Identifier: Apache-2.0\n #-----------------------------------------------------------------------------\n \n-# The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the\n-# gencache code to use a suitable directory other than the default in lib\\site-packages\\win32com\\client\\gen_py.\n-# PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable\n-# directory like 'C:\\Program Files. That's the reason for %temp% directory.\n-#\n-# http://www.py2exe.org/index.cgi/UsingEnsureDispatch\n+# Put the cache generated by `win32com.client.gencache` into isolated temporary directory. Historically, this was\n+# required due to earlier versions of `pywin32` using the `site-packages\\win32com\\client\\gen_py` directory for\n+# the cache by default. Nowadays, the default location for the cache seems to be in the configured temporary directory\n+# (pointed to by TEMP or TMP, for example %LOCALAPPDATA%\\Temp), so strictly speaking, the relocation is not necessary\n+# anymore. But for the time being, we are keeping it around to isolate the frozen application from the rest of the\n+# system.\n \n \n def _pyi_rthook():\n@@ -23,36 +23,22 @@\n import shutil\n import tempfile\n \n- # Put gen_py cache in temp directory.\n+ import win32com\n+\n+ # Create temporary directory. The actual cache directory needs to be named `gen_py`, so create a sub-directory.\n supportdir = tempfile.mkdtemp()\n- # gen_py has to be put into directory 'gen_py'.\n+\n genpydir = os.path.join(supportdir, 'gen_py')\n+ os.makedirs(genpydir, exist_ok=True)\n \n- # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file.\n- try:\n- # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory\n- # called 'gen_py'. This is the reason why to create this directory in supportdir'.\n- os.makedirs(genpydir)\n- # Remove temp directory at application exit and ignore any errors.\n- atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n- except OSError:\n- pass\n+ # Remove the teporary directory at application exit, ignoring errors.\n+ atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n \n # Override the default path to gen_py cache.\n- import win32com # noqa: E402\n-\n win32com.__gen_path__ = genpydir\n \n- # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test\n- # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created\n- # properly.\n- if hasattr(win32com, '__loader__'):\n- del win32com.__loader__\n-\n- # Ensure genpydir is in 'gen_py' module paths.\n- import win32com.gen_py # noqa: E402\n-\n- win32com.gen_py.__path__.insert(0, genpydir)\n+ # Override the sub-module paths for win32com.gen_py run-time sub-package.\n+ win32com.gen_py.__path__ = [genpydir]\n \n \n _pyi_rthook()\n", "issue": "genpy hook breaks win32com.client.gencache.EnsureDispatch()\n## Description of the issue\r\nThe [runtime hook for genpy](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py) creates a temporary directory for the genpy cache and overwrites `win32com.__gen_path__` accordingly: https://github.com/pyinstaller/pyinstaller/blob/14c53a9d9f7b9322cfc8e18ae1c6e415230fba22/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py#L42\r\nThis creates a new cache such as:\r\nC:\\\\Users\\\\<USER>\\\\AppData\\\\Local\\\\Temp\\\\**tmpytt5_e6s**\\\\gen_py (let's call this **hook cache**)\r\n\r\nIf a genpy cache already exists in:\r\nC:\\\\Users\\\\<USER>\\\\AppData\\\\Local\\\\Temp\\\\gen_py (let's call this one **global cache**)\r\n(as caused by running the python script directly, instead of the pyinstaller version)\r\nthe global cache is found by `EnsureDispatch()` such that no new cache will be generated in the hook cache.\r\n\r\nThis should be fine as long as the global cache contains all required python modules. However, if it does not, the win32com package tries to generate the missing modules:\r\nThis leads to the first exception shown below: `ModuleNotFoundError`, which is expected behaviour as far as I understand this mechanism.\r\n\r\nBut now, when handling this exception, since the hook has overwritten `win32com.__gen_path__`, generating the missing modules is attempted in the hook cache although the global cache is being used currently.\r\nThis leads to the second exception shown below: `No such file or directory`.\r\n\r\nI'm not sure about the correct way to fix this. I can think of at least the following two ways:\r\n1. Prevent the win32com package from discovering the global cache such that an entirely new cache is generated each time.\r\n2. Modify the runtime hook such that the missing modules are generated in the currently used cache (i.e. the global cache if one exists and the hook cache if not global cache was found).\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.0.dev0```\r\n* Version of Python: 3.9.2\r\n* Platform: Windows\r\n* How you installed Python: python.org/downloads\r\n* Did you also try this on another platform? Does it work there? No, since this is only applicable to Windows.\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\nfrom win32com.client import gencache\r\n\r\nexcel = gencache.EnsureDispatch(\"Excel.Application\")\r\n\r\nif not input(\">\"):\r\n # this will generate a cache that is complete for this \"if\" branch, but incomplete for the \"else\" branch.\r\n print(excel.Selection)\r\nelse:\r\n print(excel.Selection) \r\n print(excel.Selection.Font)\r\n\r\n```\r\n\r\nTo reproduce the error:\r\n1. Open any excel sheet\r\n2. Make sure that no global cache exists (simply delete it, if it exists)\r\n3. Create the executable using: `pyinstaller bug_demo.py`\r\n4. Run `bug_demo.exe`. It will prompt for an input. This should work no matter if you take the \"if\" branch (simply press Enter) or the \"else\" branch (provide some actual input, e.g. \"a\").\r\n5. Run `bug_demo.py` directly using the python interpreter and just press Enter when it asks for input. This will create a global cache, which is incomplete for the \"else\" branch.\r\n6. Run `bug_demo.exe` again. This time, it will only work if you take the \"if\" branch (simply press Enter). If you take the \"else\" branch (provide some actual input, e.g. \"a\"), the error shown below should occur.\r\n\r\n### Stacktrace / full error message\r\n\r\n\r\n```\r\n(venv) PS C:\\Users\\<SNIP>> .\\dist\\bug_demo\\bug_demo.exe\r\na\r\nNone\r\nTraceback (most recent call last):\r\n File \"win32com\\client\\gencache.py\", line 233, in GetModuleForCLSID\r\nModuleNotFoundError: No module named 'win32com.gen_py.00020813-0000-0000-C000-000000000046x0x1x9.Font'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"bug_demo.py\", line 9, in <module>\r\n print(excel.Selection.Font)\r\n File \"win32com\\client\\__init__.py\", line 474, in __getattr__\r\n File \"win32com\\client\\__init__.py\", line 466, in _ApplyTypes_\r\n File \"win32com\\client\\__init__.py\", line 486, in _get_good_object_\r\n File \"win32com\\client\\__init__.py\", line 502, in _get_good_object_\r\n File \"win32com\\client\\__init__.py\", line 491, in _get_good_single_object_\r\n File \"win32com\\client\\__init__.py\", line 96, in Dispatch\r\n File \"win32com\\client\\__init__.py\", line 37, in __WrapDispatch\r\n File \"win32com\\client\\gencache.py\", line 180, in GetClassForCLSID\r\n File \"win32com\\client\\gencache.py\", line 241, in GetModuleForCLSID\r\n File \"win32com\\client\\makepy.py\", line 319, in GenerateChildFromTypeLibSpec\r\n File \"win32com\\client\\genpy.py\", line 1042, in generate_child\r\n File \"win32com\\client\\genpy.py\", line 779, in open_writer\r\nFileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\<USER>\\\\AppData\\\\Local\\\\Temp\\\\tmpytt5_e6s\\\\gen_py\\\\00020813-0000-0000-C000-000000000046x0x1x9\\\\Font.py.18792.temp'\r\n[18792] Failed to execute script bug_demo\r\n\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n# The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the\n# gencache code to use a suitable directory other than the default in lib\\site-packages\\win32com\\client\\gen_py.\n# PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable\n# directory like 'C:\\Program Files. That's the reason for %temp% directory.\n#\n# http://www.py2exe.org/index.cgi/UsingEnsureDispatch\n\n\ndef _pyi_rthook():\n import atexit\n import os\n import shutil\n import tempfile\n\n # Put gen_py cache in temp directory.\n supportdir = tempfile.mkdtemp()\n # gen_py has to be put into directory 'gen_py'.\n genpydir = os.path.join(supportdir, 'gen_py')\n\n # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file.\n try:\n # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory\n # called 'gen_py'. This is the reason why to create this directory in supportdir'.\n os.makedirs(genpydir)\n # Remove temp directory at application exit and ignore any errors.\n atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n except OSError:\n pass\n\n # Override the default path to gen_py cache.\n import win32com # noqa: E402\n\n win32com.__gen_path__ = genpydir\n\n # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test\n # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created\n # properly.\n if hasattr(win32com, '__loader__'):\n del win32com.__loader__\n\n # Ensure genpydir is in 'gen_py' module paths.\n import win32com.gen_py # noqa: E402\n\n win32com.gen_py.__path__.insert(0, genpydir)\n\n\n_pyi_rthook()\ndel _pyi_rthook\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n# Put the cache generated by `win32com.client.gencache` into isolated temporary directory. Historically, this was\n# required due to earlier versions of `pywin32` using the `site-packages\\win32com\\client\\gen_py` directory for\n# the cache by default. Nowadays, the default location for the cache seems to be in the configured temporary directory\n# (pointed to by TEMP or TMP, for example %LOCALAPPDATA%\\Temp), so strictly speaking, the relocation is not necessary\n# anymore. But for the time being, we are keeping it around to isolate the frozen application from the rest of the\n# system.\n\n\ndef _pyi_rthook():\n import atexit\n import os\n import shutil\n import tempfile\n\n import win32com\n\n # Create temporary directory. The actual cache directory needs to be named `gen_py`, so create a sub-directory.\n supportdir = tempfile.mkdtemp()\n\n genpydir = os.path.join(supportdir, 'gen_py')\n os.makedirs(genpydir, exist_ok=True)\n\n # Remove the teporary directory at application exit, ignoring errors.\n atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n\n # Override the default path to gen_py cache.\n win32com.__gen_path__ = genpydir\n\n # Override the sub-module paths for win32com.gen_py run-time sub-package.\n win32com.gen_py.__path__ = [genpydir]\n\n\n_pyi_rthook()\ndel _pyi_rthook\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py"}]}
| 2,429 | 902 |
gh_patches_debug_41415
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-540
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create Collection event notification not fired for auto-create on default bucket
I was hacking around with Redis event notifications today. I setup two separate listeners with filters:
```
kinto.event_listeners = redis1 redis2
kinto.event_listeners.redis1.use = cliquet.listeners.redis
kinto.event_listeners.redis1.url = <url>
kinto.event_listeners.redis1.pool_size = 5
kinto.event_listeners.redis1.listname = kinto:collection:create
kinto.event_listeners.redis1.actions = create
kinto.event_listeners.redis1.resources = collection
kinto.event_listeners.redis2.use = cliquet.listeners.redis
kinto.event_listeners.redis2.url = <url>
kinto.event_listeners.redis2.pool_size = 5
kinto.event_listeners.redis2.listname = kinto:record:create
kinto.event_listeners.redis2.actions = create
kinto.event_listeners.redis2.resources = record
```
To test, I was using the default bucket, and the inserting a sample record to a collection that did not yet exist. Although the collection was auto-created, and the record inserted, only the event for create record was logged to Redis. If I create the collection manually, then the create collection event is logged.
Summary: Auto-created collections on the default bucket are not firing event notifications.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/default_bucket/__init__.py`
Content:
```
1 import uuid
2
3 import six
4 from pyramid import httpexceptions
5 from pyramid.settings import asbool
6 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
7
8 from cliquet.errors import raise_invalid
9 from cliquet.utils import build_request, reapply_cors, hmac_digest
10 from cliquet.storage import exceptions as storage_exceptions
11
12 from kinto.authorization import RouteFactory
13 from kinto.views.buckets import Bucket
14 from kinto.views.collections import Collection
15
16
17 def create_bucket(request, bucket_id):
18 """Create a bucket if it doesn't exists."""
19 bucket_put = (request.method.lower() == 'put' and
20 request.path.endswith('buckets/default'))
21 # Do nothing if current request will already create the bucket.
22 if bucket_put:
23 return
24
25 # Do not intent to create multiple times per request (e.g. in batch).
26 already_created = request.bound_data.setdefault('buckets', {})
27 if bucket_id in already_created:
28 return
29
30 # Fake context to instantiate a Bucket resource.
31 context = RouteFactory(request)
32 context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id
33 resource = Bucket(request, context)
34 try:
35 bucket = resource.model.create_record({'id': bucket_id})
36 except storage_exceptions.UnicityError as e:
37 bucket = e.record
38 already_created[bucket_id] = bucket
39
40
41 def create_collection(request, bucket_id):
42 # Do nothing if current request does not involve a collection.
43 subpath = request.matchdict.get('subpath')
44 if not (subpath and subpath.startswith('collections/')):
45 return
46
47 collection_id = subpath.split('/')[1]
48 collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)
49
50 # Do not intent to create multiple times per request (e.g. in batch).
51 already_created = request.bound_data.setdefault('collections', {})
52 if collection_uri in already_created:
53 return
54
55 # Do nothing if current request will already create the collection.
56 collection_put = (request.method.lower() == 'put' and
57 request.path.endswith(collection_id))
58 if collection_put:
59 return
60
61 # Fake context to instantiate a Collection resource.
62 context = RouteFactory(request)
63 context.get_permission_object_id = lambda r, i: collection_uri
64
65 backup = request.matchdict
66 request.matchdict = dict(bucket_id=bucket_id,
67 id=collection_id,
68 **request.matchdict)
69 resource = Collection(request, context)
70 if not resource.model.id_generator.match(collection_id):
71 error_details = {
72 'location': 'path',
73 'description': "Invalid collection_id id"
74 }
75 raise_invalid(request, **error_details)
76 try:
77 collection = resource.model.create_record({'id': collection_id})
78 except storage_exceptions.UnicityError as e:
79 collection = e.record
80 already_created[collection_uri] = collection
81 request.matchdict = backup
82
83
84 def default_bucket(request):
85 if request.method.lower() == 'options':
86 path = request.path.replace('default', 'unknown')
87 subrequest = build_request(request, {
88 'method': 'OPTIONS',
89 'path': path
90 })
91 return request.invoke_subrequest(subrequest)
92
93 if Authenticated not in request.effective_principals:
94 # Pass through the forbidden_view_config
95 raise httpexceptions.HTTPForbidden()
96
97 settings = request.registry.settings
98
99 if asbool(settings['readonly']):
100 raise httpexceptions.HTTPMethodNotAllowed()
101
102 bucket_id = request.default_bucket_id
103 path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)
104 querystring = request.url[(request.url.index(request.path) +
105 len(request.path)):]
106
107 # Make sure bucket exists
108 create_bucket(request, bucket_id)
109
110 # Make sure the collection exists
111 create_collection(request, bucket_id)
112
113 subrequest = build_request(request, {
114 'method': request.method,
115 'path': path + querystring,
116 'body': request.body
117 })
118 subrequest.bound_data = request.bound_data
119
120 try:
121 response = request.invoke_subrequest(subrequest)
122 except httpexceptions.HTTPException as error:
123 is_redirect = error.status_code < 400
124 if error.content_type == 'application/json' or is_redirect:
125 response = reapply_cors(subrequest, error)
126 else:
127 # Ask the upper level to format the error.
128 raise error
129 return response
130
131
132 def default_bucket_id(request):
133 settings = request.registry.settings
134 secret = settings['userid_hmac_secret']
135 # Build the user unguessable bucket_id UUID from its user_id
136 digest = hmac_digest(secret, request.prefixed_userid)
137 return six.text_type(uuid.UUID(digest[:32]))
138
139
140 def get_user_info(request):
141 user_info = {
142 'id': request.prefixed_userid,
143 'bucket': request.default_bucket_id
144 }
145 return user_info
146
147
148 def includeme(config):
149 # Redirect default to the right endpoint
150 config.add_view(default_bucket,
151 route_name='default_bucket',
152 permission=NO_PERMISSION_REQUIRED)
153 config.add_view(default_bucket,
154 route_name='default_bucket_collection',
155 permission=NO_PERMISSION_REQUIRED)
156
157 config.add_route('default_bucket_collection',
158 '/buckets/default/{subpath:.*}')
159 config.add_route('default_bucket', '/buckets/default')
160
161 # Provide helpers
162 config.add_request_method(default_bucket_id, reify=True)
163 # Override Cliquet default user info
164 config.add_request_method(get_user_info)
165
166 config.add_api_capability(
167 "default_bucket",
168 description="The default bucket is an alias for a personal"
169 " bucket where collections are created implicitly.",
170 url="http://kinto.readthedocs.org/en/latest/api/1.x/"
171 "buckets.html#personal-bucket-default")
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py
--- a/kinto/plugins/default_bucket/__init__.py
+++ b/kinto/plugins/default_bucket/__init__.py
@@ -6,6 +6,7 @@
from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated
from cliquet.errors import raise_invalid
+from cliquet.events import ACTIONS
from cliquet.utils import build_request, reapply_cors, hmac_digest
from cliquet.storage import exceptions as storage_exceptions
@@ -27,14 +28,11 @@
if bucket_id in already_created:
return
- # Fake context to instantiate a Bucket resource.
- context = RouteFactory(request)
- context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id
- resource = Bucket(request, context)
- try:
- bucket = resource.model.create_record({'id': bucket_id})
- except storage_exceptions.UnicityError as e:
- bucket = e.record
+ bucket = resource_create_object(request=request,
+ resource_cls=Bucket,
+ uri='/buckets/%s' % bucket_id,
+ resource_name='bucket',
+ obj_id=bucket_id)
already_created[bucket_id] = bucket
@@ -58,27 +56,51 @@
if collection_put:
return
- # Fake context to instantiate a Collection resource.
- context = RouteFactory(request)
- context.get_permission_object_id = lambda r, i: collection_uri
-
- backup = request.matchdict
+ backup_matchdict = request.matchdict
request.matchdict = dict(bucket_id=bucket_id,
id=collection_id,
**request.matchdict)
- resource = Collection(request, context)
- if not resource.model.id_generator.match(collection_id):
+ collection = resource_create_object(request=request,
+ resource_cls=Collection,
+ uri=collection_uri,
+ resource_name='collection',
+ obj_id=collection_id)
+ already_created[collection_uri] = collection
+ request.matchdict = backup_matchdict
+
+
+def resource_create_object(request, resource_cls, uri, resource_name, obj_id):
+ """In the default bucket, the bucket and collection are implicitly
+ created. This helper instantiate the resource and simulate a request
+ with its RootFactory on the instantiated resource.
+ :returns: the created object
+ :rtype: dict
+ """
+ # Fake context to instantiate a resource.
+ context = RouteFactory(request)
+ context.get_permission_object_id = lambda r, i: uri
+
+ resource = resource_cls(request, context)
+
+ # Check that provided id is valid for this resource.
+ if not resource.model.id_generator.match(obj_id):
error_details = {
'location': 'path',
- 'description': "Invalid collection_id id"
+ 'description': "Invalid %s id" % resource_name
}
- raise_invalid(request, **error_details)
+ raise_invalid(resource.request, **error_details)
+
+ data = {'id': obj_id}
try:
- collection = resource.model.create_record({'id': collection_id})
+ obj = resource.model.create_record(data)
+ # Since the current request is not a resource (but a straight Service),
+ # we simulate a request on a resource.
+ # This will be used in the resource event payload.
+ resource.request.current_resource_name = resource_name
+ resource.postprocess(data, action=ACTIONS.CREATE)
except storage_exceptions.UnicityError as e:
- collection = e.record
- already_created[collection_uri] = collection
- request.matchdict = backup
+ obj = e.record
+ return obj
def default_bucket(request):
|
{"golden_diff": "diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py\n--- a/kinto/plugins/default_bucket/__init__.py\n+++ b/kinto/plugins/default_bucket/__init__.py\n@@ -6,6 +6,7 @@\n from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n \n from cliquet.errors import raise_invalid\n+from cliquet.events import ACTIONS\n from cliquet.utils import build_request, reapply_cors, hmac_digest\n from cliquet.storage import exceptions as storage_exceptions\n \n@@ -27,14 +28,11 @@\n if bucket_id in already_created:\n return\n \n- # Fake context to instantiate a Bucket resource.\n- context = RouteFactory(request)\n- context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id\n- resource = Bucket(request, context)\n- try:\n- bucket = resource.model.create_record({'id': bucket_id})\n- except storage_exceptions.UnicityError as e:\n- bucket = e.record\n+ bucket = resource_create_object(request=request,\n+ resource_cls=Bucket,\n+ uri='/buckets/%s' % bucket_id,\n+ resource_name='bucket',\n+ obj_id=bucket_id)\n already_created[bucket_id] = bucket\n \n \n@@ -58,27 +56,51 @@\n if collection_put:\n return\n \n- # Fake context to instantiate a Collection resource.\n- context = RouteFactory(request)\n- context.get_permission_object_id = lambda r, i: collection_uri\n-\n- backup = request.matchdict\n+ backup_matchdict = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n- resource = Collection(request, context)\n- if not resource.model.id_generator.match(collection_id):\n+ collection = resource_create_object(request=request,\n+ resource_cls=Collection,\n+ uri=collection_uri,\n+ resource_name='collection',\n+ obj_id=collection_id)\n+ already_created[collection_uri] = collection\n+ request.matchdict = backup_matchdict\n+\n+\n+def resource_create_object(request, resource_cls, uri, resource_name, obj_id):\n+ \"\"\"In the default bucket, the bucket and collection are implicitly\n+ created. This helper instantiate the resource and simulate a request\n+ with its RootFactory on the instantiated resource.\n+ :returns: the created object\n+ :rtype: dict\n+ \"\"\"\n+ # Fake context to instantiate a resource.\n+ context = RouteFactory(request)\n+ context.get_permission_object_id = lambda r, i: uri\n+\n+ resource = resource_cls(request, context)\n+\n+ # Check that provided id is valid for this resource.\n+ if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n- 'description': \"Invalid collection_id id\"\n+ 'description': \"Invalid %s id\" % resource_name\n }\n- raise_invalid(request, **error_details)\n+ raise_invalid(resource.request, **error_details)\n+\n+ data = {'id': obj_id}\n try:\n- collection = resource.model.create_record({'id': collection_id})\n+ obj = resource.model.create_record(data)\n+ # Since the current request is not a resource (but a straight Service),\n+ # we simulate a request on a resource.\n+ # This will be used in the resource event payload.\n+ resource.request.current_resource_name = resource_name\n+ resource.postprocess(data, action=ACTIONS.CREATE)\n except storage_exceptions.UnicityError as e:\n- collection = e.record\n- already_created[collection_uri] = collection\n- request.matchdict = backup\n+ obj = e.record\n+ return obj\n \n \n def default_bucket(request):\n", "issue": "Create Collection event notification not fired for auto-create on default bucket\nI was hacking around with Redis event notifications today. I setup two separate listeners with filters:\n\n```\nkinto.event_listeners = redis1 redis2\n\nkinto.event_listeners.redis1.use = cliquet.listeners.redis\nkinto.event_listeners.redis1.url = <url>\nkinto.event_listeners.redis1.pool_size = 5\nkinto.event_listeners.redis1.listname = kinto:collection:create\nkinto.event_listeners.redis1.actions = create\nkinto.event_listeners.redis1.resources = collection\n\nkinto.event_listeners.redis2.use = cliquet.listeners.redis\nkinto.event_listeners.redis2.url = <url>\nkinto.event_listeners.redis2.pool_size = 5\nkinto.event_listeners.redis2.listname = kinto:record:create\nkinto.event_listeners.redis2.actions = create\nkinto.event_listeners.redis2.resources = record\n```\n\nTo test, I was using the default bucket, and the inserting a sample record to a collection that did not yet exist. Although the collection was auto-created, and the record inserted, only the event for create record was logged to Redis. If I create the collection manually, then the create collection event is logged.\n\nSummary: Auto-created collections on the default bucket are not firing event notifications.\n\n", "before_files": [{"content": "import uuid\n\nimport six\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom cliquet.errors import raise_invalid\nfrom cliquet.utils import build_request, reapply_cors, hmac_digest\nfrom cliquet.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n # Fake context to instantiate a Bucket resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id\n resource = Bucket(request, context)\n try:\n bucket = resource.model.create_record({'id': bucket_id})\n except storage_exceptions.UnicityError as e:\n bucket = e.record\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n # Fake context to instantiate a Collection resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: collection_uri\n\n backup = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n resource = Collection(request, context)\n if not resource.model.id_generator.match(collection_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid collection_id id\"\n }\n raise_invalid(request, **error_details)\n try:\n collection = resource.model.create_record({'id': collection_id})\n except storage_exceptions.UnicityError as e:\n collection = e.record\n already_created[collection_uri] = collection\n request.matchdict = backup\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return six.text_type(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override Cliquet default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"http://kinto.readthedocs.org/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}], "after_files": [{"content": "import uuid\n\nimport six\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom cliquet.errors import raise_invalid\nfrom cliquet.events import ACTIONS\nfrom cliquet.utils import build_request, reapply_cors, hmac_digest\nfrom cliquet.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n bucket = resource_create_object(request=request,\n resource_cls=Bucket,\n uri='/buckets/%s' % bucket_id,\n resource_name='bucket',\n obj_id=bucket_id)\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n backup_matchdict = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n collection = resource_create_object(request=request,\n resource_cls=Collection,\n uri=collection_uri,\n resource_name='collection',\n obj_id=collection_id)\n already_created[collection_uri] = collection\n request.matchdict = backup_matchdict\n\n\ndef resource_create_object(request, resource_cls, uri, resource_name, obj_id):\n \"\"\"In the default bucket, the bucket and collection are implicitly\n created. This helper instantiate the resource and simulate a request\n with its RootFactory on the instantiated resource.\n :returns: the created object\n :rtype: dict\n \"\"\"\n # Fake context to instantiate a resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: uri\n\n resource = resource_cls(request, context)\n\n # Check that provided id is valid for this resource.\n if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid %s id\" % resource_name\n }\n raise_invalid(resource.request, **error_details)\n\n data = {'id': obj_id}\n try:\n obj = resource.model.create_record(data)\n # Since the current request is not a resource (but a straight Service),\n # we simulate a request on a resource.\n # This will be used in the resource event payload.\n resource.request.current_resource_name = resource_name\n resource.postprocess(data, action=ACTIONS.CREATE)\n except storage_exceptions.UnicityError as e:\n obj = e.record\n return obj\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return six.text_type(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override Cliquet default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"http://kinto.readthedocs.org/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}]}
| 2,199 | 824 |
gh_patches_debug_36
|
rasdani/github-patches
|
git_diff
|
sonic-net__sonic-utilities-2090
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sonic-installer install fails in armhf
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
For more information about reporting issues, see
https://github.com/Azure/SONiC/wiki#report-issues
---------------------------------------------------
GENERAL SUPPORT INFORMATION
---------------------------------------------------
The GitHub issue tracker is for bug reports and feature requests.
General support can be found at the following locations:
- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
#### Description
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
Sonic-installer failure log using install operation.
File “/usr/local/lib/python3.9/dist-packages/click/core.py”, line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File “/usr/local/lib/python3.9/dist-packages/click/core.py”, line 555, in invoke
return callback(*args, **kwargs)
File “/usr/local/lib/python3.9/dist-packages/sonic_installer/main.py”, line 543, in install
if not skip_platform_check and not bootloader.verify_image_platform(image_path):
File “/usr/local/lib/python3.9/dist-packages/sonic_installer/bootloader/uboot.py”, line 81, in verify_image_platform
return os.path.isfile(image_path)
NameError: name ‘os’ is not defined
## Steps to reproduce the issue
1. sonic-installer install <image>
#### Describe the results you received
#### Describe the results you expected
sonic-installer to work seamlessly.
#### Additional information you deem important (e.g. issue happens only occasionally)
#### Output of `show version`
```
# show version
SONiC Software Version: SONiC.HEAD.0-dirty-20220302.124544
Distribution: Debian 11.2
Kernel: 5.10.0-8-2-armmp
Build commit: 94b778c39
Build date: Wed Mar 2 08:25:34 UTC 2022
Built by: marvell@cpss-build1
Platform: armhf-nokia_ixs7215_52x-r0
HwSKU: Nokia-7215
ASIC: marvell
ASIC Count: 1
Serial Number: NK203110011
Model Number: 3HE16794AARA01
Hardware Revision: N/A
Uptime: 16:49:12 up 33 min, 1 user, load average: 0.21, 0.69, 0.82
Docker images:
REPOSITORY TAG IMAGE ID SIZE
docker-dhcp-relay latest 39c289f394ba 484MB
docker-teamd HEAD.0-dirty-20220302.124544 897670943f24 483MB
docker-teamd latest 897670943f24 483MB
docker-syncd-mrvl HEAD.0-dirty-20220302.124544 ded0f3c5116c 607MB
docker-syncd-mrvl latest ded0f3c5116c 607MB
docker-snmp HEAD.0-dirty-20220302.124544 50b2af07aa43 514MB
docker-snmp latest 50b2af07aa43 514MB
docker-sflow HEAD.0-dirty-20220302.124544 80d19598c760 484MB
docker-sflow latest 80d19598c760 484MB
docker-router-advertiser HEAD.0-dirty-20220302.124544 ff951f4fa02e 474MB
docker-router-advertiser latest ff951f4fa02e 474MB
docker-platform-monitor HEAD.0-dirty-20220302.124544 87406f9f212c 695MB
docker-platform-monitor latest 87406f9f212c 695MB
docker-orchagent HEAD.0-dirty-20220302.124544 fafd5ae1c574 571MB
docker-orchagent latest fafd5ae1c574 571MB
docker-nat HEAD.0-dirty-20220302.124544 c6a2c3a9d794 485MB
docker-nat latest c6a2c3a9d794 485MB
docker-mux HEAD.0-dirty-20220302.124544 e1246be5c510 492MB
docker-mux latest e1246be5c510 492MB
docker-macsec HEAD.0-dirty-20220302.124544 87fbc786165c 485MB
docker-macsec latest 87fbc786165c 485MB
docker-lldp HEAD.0-dirty-20220302.124544 498091e0d9f6 478MB
docker-lldp latest 498091e0d9f6 478MB
docker-fpm-frr HEAD.0-dirty-20220302.124544 95d1d947a343 497MB
docker-fpm-frr latest 95d1d947a343 497MB
docker-database HEAD.0-dirty-20220302.124544 3e5047261b1c 471MB
docker-database latest 3e5047261b1c 471MB
```
<!--
Also attach debug file produced by `sudo generate_dump`
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sonic_installer/bootloader/uboot.py`
Content:
```
1 """
2 Bootloader implementation for uboot based platforms
3 """
4
5 import platform
6 import subprocess
7
8 import click
9
10 from ..common import (
11 HOST_PATH,
12 IMAGE_DIR_PREFIX,
13 IMAGE_PREFIX,
14 run_command,
15 )
16 from .onie import OnieInstallerBootloader
17
18 class UbootBootloader(OnieInstallerBootloader):
19
20 NAME = 'uboot'
21
22 def get_installed_images(self):
23 images = []
24 proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, text=True, stdout=subprocess.PIPE)
25 (out, _) = proc.communicate()
26 image = out.rstrip()
27 if IMAGE_PREFIX in image:
28 images.append(image)
29 proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, text=True, stdout=subprocess.PIPE)
30 (out, _) = proc.communicate()
31 image = out.rstrip()
32 if IMAGE_PREFIX in image:
33 images.append(image)
34 return images
35
36 def get_next_image(self):
37 images = self.get_installed_images()
38 proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, text=True, stdout=subprocess.PIPE)
39 (out, _) = proc.communicate()
40 image = out.rstrip()
41 if "sonic_image_2" in image:
42 next_image_index = 1
43 else:
44 next_image_index = 0
45 return images[next_image_index]
46
47 def set_default_image(self, image):
48 images = self.get_installed_images()
49 if image in images[0]:
50 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"')
51 elif image in images[1]:
52 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"')
53 return True
54
55 def set_next_image(self, image):
56 images = self.get_installed_images()
57 if image in images[0]:
58 run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"')
59 elif image in images[1]:
60 run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"')
61 return True
62
63 def install_image(self, image_path):
64 run_command("bash " + image_path)
65
66 def remove_image(self, image):
67 click.echo('Updating next boot ...')
68 images = self.get_installed_images()
69 if image in images[0]:
70 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"')
71 run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"')
72 elif image in images[1]:
73 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"')
74 run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"')
75 image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)
76 click.echo('Removing image root filesystem...')
77 subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])
78 click.echo('Done')
79
80 def verify_image_platform(self, image_path):
81 return os.path.isfile(image_path)
82
83 @classmethod
84 def detect(cls):
85 arch = platform.machine()
86 return ("arm" in arch) or ("aarch64" in arch)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py
--- a/sonic_installer/bootloader/uboot.py
+++ b/sonic_installer/bootloader/uboot.py
@@ -4,6 +4,7 @@
import platform
import subprocess
+import os
import click
|
{"golden_diff": "diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py\n--- a/sonic_installer/bootloader/uboot.py\n+++ b/sonic_installer/bootloader/uboot.py\n@@ -4,6 +4,7 @@\n \n import platform\n import subprocess\n+import os\n \n import click\n", "issue": "sonic-installer install fails in armhf\n<!--\r\n If you are reporting a new issue, make sure that we do not have any duplicates\r\n already open. You can ensure this by searching the issue list for this\r\n repository. If there is a duplicate, please close your issue and add a comment\r\n to the existing issue instead.\r\n\r\n If you suspect your issue is a bug, please edit your issue description to\r\n include the BUG REPORT INFORMATION shown below. If you fail to provide this\r\n information within 7 days, we cannot debug your issue and will close it. We\r\n will, however, reopen it if you later provide the information.\r\n\r\n For more information about reporting issues, see\r\n https://github.com/Azure/SONiC/wiki#report-issues\r\n\r\n ---------------------------------------------------\r\n GENERAL SUPPORT INFORMATION\r\n ---------------------------------------------------\r\n\r\n The GitHub issue tracker is for bug reports and feature requests.\r\n General support can be found at the following locations:\r\n\r\n - SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject\r\n\r\n ---------------------------------------------------\r\n BUG REPORT INFORMATION\r\n ---------------------------------------------------\r\n Use the commands below to provide key information from your environment:\r\n You do NOT have to include this information if this is a FEATURE REQUEST\r\n-->\r\n\r\n#### Description\r\n\r\n<!--\r\n Briefly describe the problem you are having in a few paragraphs.\r\n-->\r\nSonic-installer failure log using install operation.\r\n\r\nFile \u201c/usr/local/lib/python3.9/dist-packages/click/core.py\u201d, line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \u201c/usr/local/lib/python3.9/dist-packages/click/core.py\u201d, line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \u201c/usr/local/lib/python3.9/dist-packages/sonic_installer/main.py\u201d, line 543, in install\r\n if not skip_platform_check and not bootloader.verify_image_platform(image_path):\r\n File \u201c/usr/local/lib/python3.9/dist-packages/sonic_installer/bootloader/uboot.py\u201d, line 81, in verify_image_platform\r\n return os.path.isfile(image_path)\r\nNameError: name \u2018os\u2019 is not defined\r\n\r\n## Steps to reproduce the issue\r\n1. sonic-installer install <image>\r\n\r\n\r\n#### Describe the results you received\r\n\r\n\r\n#### Describe the results you expected\r\nsonic-installer to work seamlessly.\r\n\r\n#### Additional information you deem important (e.g. issue happens only occasionally)\r\n\r\n\r\n#### Output of `show version`\r\n\r\n```\r\n# show version\r\n\r\nSONiC Software Version: SONiC.HEAD.0-dirty-20220302.124544\r\nDistribution: Debian 11.2\r\nKernel: 5.10.0-8-2-armmp\r\nBuild commit: 94b778c39\r\nBuild date: Wed Mar 2 08:25:34 UTC 2022\r\nBuilt by: marvell@cpss-build1\r\n\r\nPlatform: armhf-nokia_ixs7215_52x-r0\r\nHwSKU: Nokia-7215\r\nASIC: marvell\r\nASIC Count: 1\r\nSerial Number: NK203110011\r\nModel Number: 3HE16794AARA01\r\nHardware Revision: N/A\r\nUptime: 16:49:12 up 33 min, 1 user, load average: 0.21, 0.69, 0.82\r\n\r\nDocker images:\r\nREPOSITORY TAG IMAGE ID SIZE\r\ndocker-dhcp-relay latest 39c289f394ba 484MB\r\ndocker-teamd HEAD.0-dirty-20220302.124544 897670943f24 483MB\r\ndocker-teamd latest 897670943f24 483MB\r\ndocker-syncd-mrvl HEAD.0-dirty-20220302.124544 ded0f3c5116c 607MB\r\ndocker-syncd-mrvl latest ded0f3c5116c 607MB\r\ndocker-snmp HEAD.0-dirty-20220302.124544 50b2af07aa43 514MB\r\ndocker-snmp latest 50b2af07aa43 514MB\r\ndocker-sflow HEAD.0-dirty-20220302.124544 80d19598c760 484MB\r\ndocker-sflow latest 80d19598c760 484MB\r\ndocker-router-advertiser HEAD.0-dirty-20220302.124544 ff951f4fa02e 474MB\r\ndocker-router-advertiser latest ff951f4fa02e 474MB\r\ndocker-platform-monitor HEAD.0-dirty-20220302.124544 87406f9f212c 695MB\r\ndocker-platform-monitor latest 87406f9f212c 695MB\r\ndocker-orchagent HEAD.0-dirty-20220302.124544 fafd5ae1c574 571MB\r\ndocker-orchagent latest fafd5ae1c574 571MB\r\ndocker-nat HEAD.0-dirty-20220302.124544 c6a2c3a9d794 485MB\r\ndocker-nat latest c6a2c3a9d794 485MB\r\ndocker-mux HEAD.0-dirty-20220302.124544 e1246be5c510 492MB\r\ndocker-mux latest e1246be5c510 492MB\r\ndocker-macsec HEAD.0-dirty-20220302.124544 87fbc786165c 485MB\r\ndocker-macsec latest 87fbc786165c 485MB\r\ndocker-lldp HEAD.0-dirty-20220302.124544 498091e0d9f6 478MB\r\ndocker-lldp latest 498091e0d9f6 478MB\r\ndocker-fpm-frr HEAD.0-dirty-20220302.124544 95d1d947a343 497MB\r\ndocker-fpm-frr latest 95d1d947a343 497MB\r\ndocker-database HEAD.0-dirty-20220302.124544 3e5047261b1c 471MB\r\ndocker-database latest 3e5047261b1c 471MB\r\n\r\n```\r\n\r\n<!--\r\n Also attach debug file produced by `sudo generate_dump`\r\n-->\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nBootloader implementation for uboot based platforms\n\"\"\"\n\nimport platform\nimport subprocess\n\nimport click\n\nfrom ..common import (\n HOST_PATH,\n IMAGE_DIR_PREFIX,\n IMAGE_PREFIX,\n run_command,\n)\nfrom .onie import OnieInstallerBootloader\n\nclass UbootBootloader(OnieInstallerBootloader):\n\n NAME = 'uboot'\n\n def get_installed_images(self):\n images = []\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_1\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_2\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n return images\n\n def get_next_image(self):\n images = self.get_installed_images()\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n boot_next\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if \"sonic_image_2\" in image:\n next_image_index = 1\n else:\n next_image_index = 0\n return images[next_image_index]\n\n def set_default_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n return True\n\n def set_next_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_2\"')\n return True\n\n def install_image(self, image_path):\n run_command(\"bash \" + image_path)\n\n def remove_image(self, image):\n click.echo('Updating next boot ...')\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n run_command('/usr/bin/fw_setenv sonic_version_1 \"NONE\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n run_command('/usr/bin/fw_setenv sonic_version_2 \"NONE\"')\n image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)\n click.echo('Removing image root filesystem...')\n subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])\n click.echo('Done')\n\n def verify_image_platform(self, image_path):\n return os.path.isfile(image_path)\n\n @classmethod\n def detect(cls):\n arch = platform.machine()\n return (\"arm\" in arch) or (\"aarch64\" in arch)\n", "path": "sonic_installer/bootloader/uboot.py"}], "after_files": [{"content": "\"\"\"\nBootloader implementation for uboot based platforms\n\"\"\"\n\nimport platform\nimport subprocess\nimport os\n\nimport click\n\nfrom ..common import (\n HOST_PATH,\n IMAGE_DIR_PREFIX,\n IMAGE_PREFIX,\n run_command,\n)\nfrom .onie import OnieInstallerBootloader\n\nclass UbootBootloader(OnieInstallerBootloader):\n\n NAME = 'uboot'\n\n def get_installed_images(self):\n images = []\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_1\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_2\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n return images\n\n def get_next_image(self):\n images = self.get_installed_images()\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n boot_next\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if \"sonic_image_2\" in image:\n next_image_index = 1\n else:\n next_image_index = 0\n return images[next_image_index]\n\n def set_default_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n return True\n\n def set_next_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_2\"')\n return True\n\n def install_image(self, image_path):\n run_command(\"bash \" + image_path)\n\n def remove_image(self, image):\n click.echo('Updating next boot ...')\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n run_command('/usr/bin/fw_setenv sonic_version_1 \"NONE\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n run_command('/usr/bin/fw_setenv sonic_version_2 \"NONE\"')\n image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)\n click.echo('Removing image root filesystem...')\n subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])\n click.echo('Done')\n\n def verify_image_platform(self, image_path):\n return os.path.isfile(image_path)\n\n @classmethod\n def detect(cls):\n arch = platform.machine()\n return (\"arm\" in arch) or (\"aarch64\" in arch)\n", "path": "sonic_installer/bootloader/uboot.py"}]}
| 2,859 | 80 |
gh_patches_debug_24535
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-14588
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
in tabbed notepad when switching between tabs nvda should announce some way to differentiate between tabs
### Steps to reproduce:
download the new tabbed notepad.
now using the menu create a new tab
now switch between tabs with ctrl+tabe
### Actual behavior:
nvda announces blank edition text editor
### Expected behavior:
Before writing what I want, I would like to talk about my discoveries, sorry if it doesn't make sense.
I typed a different word into the first line of text on each tab.
guide example 1
Fernando
guide 2
silva
using object navigation I found the list of tabs and within this list there was each tab named with what was written in the first line of text.
Now I left the first line of text empty in tab 1
in the list of tabs tab 1 appears with the name of untitled
from what i understand if the first line of text is characters this text will be the title of the tab.
If the first line of text is empty, the tab will have an untitled title.
so my suggestion is:
when switching between tabs in notepad in this example by pressing ctrl+tab nvda should announce the title of the tab which will be what is typed in the first line.
But this doesn't work if the first line of the tabs is empty, so I suggest that nvda also announce the position of the tab within the list.
example
guide 1
first line
Fernando
guide 2
first line
empty
guide 3
first line
silva
when switching between tabs nvda would announce:
guide 1 of 3 fernando
guide 2 of 3 untitled
guide 3 of 3 silva
Tab name and tab count could also be announced by command nvda + t to read window name.
### NVDA logs, crash dumps and other attachments:
### System configuration
#### NVDA installed/portable/running from source:
instaled
#### NVDA version:
nvda.exe, NVDA alpha-27590,180c9f2b
#### Windows version:
11 22.623.1095
#### Name and version of other software in use when reproducing the issue:
Notepad.exe, Microsoft.WindowsNotepad 11.2212.33.0
#### Other information about your system:
### Other questions
#### Does the issue still occur after restarting your computer?
yes
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
no
#### If NVDA add-ons are disabled, is your problem still occurring?
yes
#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?
yes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/appModules/notepad.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2022-2023 NV Access Limited, Joseph Lee
3 # This file is covered by the GNU General Public License.
4 # See the file COPYING for more details.
5
6 """App module for Windows Notepad.
7 While this app module also covers older Notepad releases,
8 this module provides workarounds for Windows 11 Notepad."""
9
10 from comtypes import COMError
11 import appModuleHandler
12 import api
13 import UIAHandler
14 from NVDAObjects.UIA import UIA
15 from NVDAObjects import NVDAObject
16
17
18 class AppModule(appModuleHandler.AppModule):
19
20 def _get_statusBar(self) -> NVDAObject:
21 """Retrieves Windows 11 Notepad status bar.
22 In Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.
23 Windows 11 Notepad uses Windows 11 UI design (top-level window is labeled "DesktopWindowXamlSource",
24 therefore status bar cannot be obtained by position alone.
25 If visible, a child of the foreground window hosts the status bar elements.
26 Status bar child position must be checked whenever Notepad is updated on stable Windows 11 releases
27 as Notepad is updated through Microsoft Store as opposed to tied to specific Windows releases.
28 L{api.getStatusBar} will resort to position lookup if C{NotImplementedError} is raised.
29 """
30 # #13688: Notepad 11 uses Windows 11 user interface, therefore status bar is harder to obtain.
31 # This does not affect earlier versions.
32 notepadVersion = int(self.productVersion.split(".")[0])
33 if notepadVersion < 11:
34 raise NotImplementedError()
35 # And no, status bar is shown when editing documents.
36 # Thankfully, of all the UIA objects encountered, document window has a unique window class name.
37 if api.getFocusObject().windowClassName != "RichEditD2DPT":
38 raise NotImplementedError()
39 # Obtain status bar text across Notepad 11 releases.
40 clientObject = UIAHandler.handler.clientObject
41 condition = clientObject.createPropertyCondition(UIAHandler.UIA_AutomationIdPropertyId, "ContentTextBlock")
42 walker = clientObject.createTreeWalker(condition)
43 notepadWindow = clientObject.elementFromHandle(api.getForegroundObject().windowHandle)
44 try:
45 element = walker.getFirstChildElement(notepadWindow)
46 # Is status bar even showing?
47 element = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
48 except (ValueError, COMError):
49 raise NotImplementedError
50 statusBar = UIA(UIAElement=element).parent
51 return statusBar
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/appModules/notepad.py b/source/appModules/notepad.py
--- a/source/appModules/notepad.py
+++ b/source/appModules/notepad.py
@@ -10,13 +10,38 @@
from comtypes import COMError
import appModuleHandler
import api
+import braille
+import controlTypes
+import eventHandler
+import speech
import UIAHandler
from NVDAObjects.UIA import UIA
from NVDAObjects import NVDAObject
+from typing import Callable
class AppModule(appModuleHandler.AppModule):
+ def event_UIA_elementSelected(self, obj: NVDAObject, nextHandler: Callable[[], None]):
+ # Announce currently selected tab when it changes.
+ if (
+ obj.role == controlTypes.Role.TAB
+ # this is done because 2 selection events are sent for the same object, so to prevent double speaking.
+ and not eventHandler.isPendingEvents("UIA_elementSelected")
+ and controlTypes.State.SELECTED in obj.states
+ ):
+ speech.cancelSpeech()
+ speech.speakObject(obj, reason=controlTypes.OutputReason.FOCUS)
+ braille.handler.message(
+ braille.getPropertiesBraille(
+ name=obj.name,
+ role=obj.role,
+ states=obj.states,
+ positionInfo=obj.positionInfo
+ )
+ )
+ nextHandler()
+
def _get_statusBar(self) -> NVDAObject:
"""Retrieves Windows 11 Notepad status bar.
In Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.
|
{"golden_diff": "diff --git a/source/appModules/notepad.py b/source/appModules/notepad.py\n--- a/source/appModules/notepad.py\n+++ b/source/appModules/notepad.py\n@@ -10,13 +10,38 @@\n from comtypes import COMError\n import appModuleHandler\n import api\n+import braille\n+import controlTypes\n+import eventHandler\n+import speech\n import UIAHandler\n from NVDAObjects.UIA import UIA\n from NVDAObjects import NVDAObject\n+from typing import Callable\n \n \n class AppModule(appModuleHandler.AppModule):\n \n+\tdef event_UIA_elementSelected(self, obj: NVDAObject, nextHandler: Callable[[], None]):\n+\t\t# Announce currently selected tab when it changes.\n+\t\tif (\n+\t\t\tobj.role == controlTypes.Role.TAB\n+\t\t\t# this is done because 2 selection events are sent for the same object, so to prevent double speaking.\n+\t\t\tand not eventHandler.isPendingEvents(\"UIA_elementSelected\")\n+\t\t\tand controlTypes.State.SELECTED in obj.states\n+\t\t):\n+\t\t\tspeech.cancelSpeech()\n+\t\t\tspeech.speakObject(obj, reason=controlTypes.OutputReason.FOCUS)\n+\t\t\tbraille.handler.message(\n+\t\t\t\tbraille.getPropertiesBraille(\n+\t\t\t\t\tname=obj.name,\n+\t\t\t\t\trole=obj.role,\n+\t\t\t\t\tstates=obj.states,\n+\t\t\t\t\tpositionInfo=obj.positionInfo\n+\t\t\t\t)\n+\t\t\t)\n+\t\tnextHandler()\n+\n \tdef _get_statusBar(self) -> NVDAObject:\n \t\t\"\"\"Retrieves Windows 11 Notepad status bar.\n \t\tIn Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.\n", "issue": "in tabbed notepad when switching between tabs nvda should announce some way to differentiate between tabs\n\r\n### Steps to reproduce:\r\ndownload the new tabbed notepad.\r\nnow using the menu create a new tab\r\nnow switch between tabs with ctrl+tabe\r\n### Actual behavior:\r\nnvda announces blank edition text editor\r\n### Expected behavior:\r\nBefore writing what I want, I would like to talk about my discoveries, sorry if it doesn't make sense.\r\nI typed a different word into the first line of text on each tab.\r\nguide example 1\r\nFernando\r\nguide 2\r\nsilva\r\nusing object navigation I found the list of tabs and within this list there was each tab named with what was written in the first line of text.\r\nNow I left the first line of text empty in tab 1\r\nin the list of tabs tab 1 appears with the name of untitled\r\nfrom what i understand if the first line of text is characters this text will be the title of the tab.\r\nIf the first line of text is empty, the tab will have an untitled title.\r\nso my suggestion is:\r\nwhen switching between tabs in notepad in this example by pressing ctrl+tab nvda should announce the title of the tab which will be what is typed in the first line.\r\nBut this doesn't work if the first line of the tabs is empty, so I suggest that nvda also announce the position of the tab within the list.\r\nexample\r\nguide 1\r\nfirst line\r\nFernando\r\nguide 2\r\nfirst line\r\nempty\r\nguide 3\r\nfirst line\r\nsilva\r\nwhen switching between tabs nvda would announce:\r\nguide 1 of 3 fernando\r\nguide 2 of 3 untitled\r\nguide 3 of 3 silva\r\nTab name and tab count could also be announced by command nvda + t to read window name.\r\n### NVDA logs, crash dumps and other attachments:\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\ninstaled\r\n#### NVDA version:\r\nnvda.exe, NVDA alpha-27590,180c9f2b\r\n#### Windows version:\r\n11 22.623.1095\r\n#### Name and version of other software in use when reproducing the issue:\r\nNotepad.exe, Microsoft.WindowsNotepad 11.2212.33.0\r\n\r\n#### Other information about your system:\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nyes\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nno\r\n#### If NVDA add-ons are disabled, is your problem still occurring?\r\nyes\r\n#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?\r\nyes\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\n# Copyright (C) 2022-2023 NV Access Limited, Joseph Lee\n# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n\n\"\"\"App module for Windows Notepad.\nWhile this app module also covers older Notepad releases,\nthis module provides workarounds for Windows 11 Notepad.\"\"\"\n\nfrom comtypes import COMError\nimport appModuleHandler\nimport api\nimport UIAHandler\nfrom NVDAObjects.UIA import UIA\nfrom NVDAObjects import NVDAObject\n\n\nclass AppModule(appModuleHandler.AppModule):\n\n\tdef _get_statusBar(self) -> NVDAObject:\n\t\t\"\"\"Retrieves Windows 11 Notepad status bar.\n\t\tIn Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.\n\t\tWindows 11 Notepad uses Windows 11 UI design (top-level window is labeled \"DesktopWindowXamlSource\",\n\t\ttherefore status bar cannot be obtained by position alone.\n\t\tIf visible, a child of the foreground window hosts the status bar elements.\n\t\tStatus bar child position must be checked whenever Notepad is updated on stable Windows 11 releases\n\t\tas Notepad is updated through Microsoft Store as opposed to tied to specific Windows releases.\n\t\tL{api.getStatusBar} will resort to position lookup if C{NotImplementedError} is raised.\n\t\t\"\"\"\n\t\t# #13688: Notepad 11 uses Windows 11 user interface, therefore status bar is harder to obtain.\n\t\t# This does not affect earlier versions.\n\t\tnotepadVersion = int(self.productVersion.split(\".\")[0])\n\t\tif notepadVersion < 11:\n\t\t\traise NotImplementedError()\n\t\t# And no, status bar is shown when editing documents.\n\t\t# Thankfully, of all the UIA objects encountered, document window has a unique window class name.\n\t\tif api.getFocusObject().windowClassName != \"RichEditD2DPT\":\n\t\t\traise NotImplementedError()\n\t\t# Obtain status bar text across Notepad 11 releases.\n\t\tclientObject = UIAHandler.handler.clientObject\n\t\tcondition = clientObject.createPropertyCondition(UIAHandler.UIA_AutomationIdPropertyId, \"ContentTextBlock\")\n\t\twalker = clientObject.createTreeWalker(condition)\n\t\tnotepadWindow = clientObject.elementFromHandle(api.getForegroundObject().windowHandle)\n\t\ttry:\n\t\t\telement = walker.getFirstChildElement(notepadWindow)\n\t\t\t# Is status bar even showing?\n\t\t\telement = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)\n\t\texcept (ValueError, COMError):\n\t\t\traise NotImplementedError\n\t\tstatusBar = UIA(UIAElement=element).parent\n\t\treturn statusBar\n", "path": "source/appModules/notepad.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\n# Copyright (C) 2022-2023 NV Access Limited, Joseph Lee\n# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n\n\"\"\"App module for Windows Notepad.\nWhile this app module also covers older Notepad releases,\nthis module provides workarounds for Windows 11 Notepad.\"\"\"\n\nfrom comtypes import COMError\nimport appModuleHandler\nimport api\nimport braille\nimport controlTypes\nimport eventHandler\nimport speech\nimport UIAHandler\nfrom NVDAObjects.UIA import UIA\nfrom NVDAObjects import NVDAObject\nfrom typing import Callable\n\n\nclass AppModule(appModuleHandler.AppModule):\n\n\tdef event_UIA_elementSelected(self, obj: NVDAObject, nextHandler: Callable[[], None]):\n\t\t# Announce currently selected tab when it changes.\n\t\tif (\n\t\t\tobj.role == controlTypes.Role.TAB\n\t\t\t# this is done because 2 selection events are sent for the same object, so to prevent double speaking.\n\t\t\tand not eventHandler.isPendingEvents(\"UIA_elementSelected\")\n\t\t\tand controlTypes.State.SELECTED in obj.states\n\t\t):\n\t\t\tspeech.cancelSpeech()\n\t\t\tspeech.speakObject(obj, reason=controlTypes.OutputReason.FOCUS)\n\t\t\tbraille.handler.message(\n\t\t\t\tbraille.getPropertiesBraille(\n\t\t\t\t\tname=obj.name,\n\t\t\t\t\trole=obj.role,\n\t\t\t\t\tstates=obj.states,\n\t\t\t\t\tpositionInfo=obj.positionInfo\n\t\t\t\t)\n\t\t\t)\n\t\tnextHandler()\n\n\tdef _get_statusBar(self) -> NVDAObject:\n\t\t\"\"\"Retrieves Windows 11 Notepad status bar.\n\t\tIn Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.\n\t\tWindows 11 Notepad uses Windows 11 UI design (top-level window is labeled \"DesktopWindowXamlSource\",\n\t\ttherefore status bar cannot be obtained by position alone.\n\t\tIf visible, a child of the foreground window hosts the status bar elements.\n\t\tStatus bar child position must be checked whenever Notepad is updated on stable Windows 11 releases\n\t\tas Notepad is updated through Microsoft Store as opposed to tied to specific Windows releases.\n\t\tL{api.getStatusBar} will resort to position lookup if C{NotImplementedError} is raised.\n\t\t\"\"\"\n\t\t# #13688: Notepad 11 uses Windows 11 user interface, therefore status bar is harder to obtain.\n\t\t# This does not affect earlier versions.\n\t\tnotepadVersion = int(self.productVersion.split(\".\")[0])\n\t\tif notepadVersion < 11:\n\t\t\traise NotImplementedError()\n\t\t# And no, status bar is shown when editing documents.\n\t\t# Thankfully, of all the UIA objects encountered, document window has a unique window class name.\n\t\tif api.getFocusObject().windowClassName != \"RichEditD2DPT\":\n\t\t\traise NotImplementedError()\n\t\t# Obtain status bar text across Notepad 11 releases.\n\t\tclientObject = UIAHandler.handler.clientObject\n\t\tcondition = clientObject.createPropertyCondition(UIAHandler.UIA_AutomationIdPropertyId, \"ContentTextBlock\")\n\t\twalker = clientObject.createTreeWalker(condition)\n\t\tnotepadWindow = clientObject.elementFromHandle(api.getForegroundObject().windowHandle)\n\t\ttry:\n\t\t\telement = walker.getFirstChildElement(notepadWindow)\n\t\t\t# Is status bar even showing?\n\t\t\telement = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)\n\t\texcept (ValueError, COMError):\n\t\t\traise NotImplementedError\n\t\tstatusBar = UIA(UIAElement=element).parent\n\t\treturn statusBar\n", "path": "source/appModules/notepad.py"}]}
| 1,524 | 364 |
gh_patches_debug_11669
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-960
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation: meaning of value for return_fitted_val=True
# Description
In this code snippet from the documentation
```python
>>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)
(array([1. , 0.97224597, 0.87553894]), array([28.92218013]))
```
it isn't clear what the meaning of `array([28.92218013])` is. Is it likelihood, log likelihood, -log likelihood, -2 log likelihood?
It is the latter, but that is not clear.
Applies to
https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html
or
https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fit.html
## Is your feature request related to a problem? Please describe.
I wasn't sure, so I had to try a few things to figure it out.
### Describe the solution you'd like
Add a note to the documentation for the convention.
### Describe alternatives you've considered
banging my head against the wall.
# Relevant Issues and Pull Requests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/infer/mle.py`
Content:
```
1 """Module for Maximum Likelihood Estimation."""
2 from .. import get_backend
3 from ..exceptions import UnspecifiedPOI
4
5
6 def twice_nll(pars, data, pdf):
7 """
8 Twice the negative Log-Likelihood.
9
10 Args:
11 data (`tensor`): The data
12 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
13
14 Returns:
15 Twice the negative log likelihood.
16
17 """
18 return -2 * pdf.logpdf(pars, data)
19
20
21 def fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):
22 """
23 Run a unconstrained maximum likelihood fit.
24
25 Example:
26 >>> import pyhf
27 >>> pyhf.set_backend("numpy")
28 >>> model = pyhf.simplemodels.hepdata_like(
29 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
30 ... )
31 >>> observations = [51, 48]
32 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
33 >>> pyhf.infer.mle.fit(data, model, return_fitted_val=True)
34 (array([0. , 1.0030512 , 0.96266961]), array([24.98393521]))
35
36 Args:
37 data (`tensor`): The data
38 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
39 init_pars (`list`): Values to initialize the model parameters at for the fit
40 par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
41 kwargs: Keyword arguments passed through to the optimizer API
42
43 Returns:
44 See optimizer API
45
46 """
47 _, opt = get_backend()
48 init_pars = init_pars or pdf.config.suggested_init()
49 par_bounds = par_bounds or pdf.config.suggested_bounds()
50 return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)
51
52
53 def fixed_poi_fit(poi_val, data, pdf, init_pars=None, par_bounds=None, **kwargs):
54 """
55 Run a maximum likelihood fit with the POI value fixed.
56
57 Example:
58 >>> import pyhf
59 >>> pyhf.set_backend("numpy")
60 >>> model = pyhf.simplemodels.hepdata_like(
61 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
62 ... )
63 >>> observations = [51, 48]
64 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
65 >>> test_poi = 1.0
66 >>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)
67 (array([1. , 0.97224597, 0.87553894]), array([28.92218013]))
68
69 Args:
70 data: The data
71 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
72 init_pars (`list`): Values to initialize the model parameters at for the fit
73 par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
74 kwargs: Keyword arguments passed through to the optimizer API
75
76 Returns:
77 See optimizer API
78
79 """
80 if pdf.config.poi_index is None:
81 raise UnspecifiedPOI(
82 'No POI is defined. A POI is required to fit with a fixed POI.'
83 )
84 _, opt = get_backend()
85 init_pars = init_pars or pdf.config.suggested_init()
86 par_bounds = par_bounds or pdf.config.suggested_bounds()
87 return opt.minimize(
88 twice_nll,
89 data,
90 pdf,
91 init_pars,
92 par_bounds,
93 [(pdf.config.poi_index, poi_val)],
94 **kwargs,
95 )
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py
--- a/src/pyhf/infer/mle.py
+++ b/src/pyhf/infer/mle.py
@@ -22,6 +22,10 @@
"""
Run a unconstrained maximum likelihood fit.
+ .. note::
+
+ :func:`twice_nll` is the objective function.
+
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
@@ -54,6 +58,10 @@
"""
Run a maximum likelihood fit with the POI value fixed.
+ .. note::
+
+ :func:`twice_nll` is the objective function.
+
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
|
{"golden_diff": "diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py\n--- a/src/pyhf/infer/mle.py\n+++ b/src/pyhf/infer/mle.py\n@@ -22,6 +22,10 @@\n \"\"\"\n Run a unconstrained maximum likelihood fit.\n \n+ .. note::\n+\n+ :func:`twice_nll` is the objective function.\n+\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n@@ -54,6 +58,10 @@\n \"\"\"\n Run a maximum likelihood fit with the POI value fixed.\n \n+ .. note::\n+\n+ :func:`twice_nll` is the objective function.\n+\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n", "issue": "Documentation: meaning of value for return_fitted_val=True\n# Description\r\n\r\nIn this code snippet from the documentation \r\n\r\n```python\r\n>>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)\r\n(array([1. , 0.97224597, 0.87553894]), array([28.92218013]))\r\n```\r\n\r\nit isn't clear what the meaning of `array([28.92218013])` is. Is it likelihood, log likelihood, -log likelihood, -2 log likelihood?\r\nIt is the latter, but that is not clear.\r\n\r\nApplies to \r\n\r\nhttps://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html\r\nor\r\nhttps://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fit.html\r\n\r\n## Is your feature request related to a problem? Please describe.\r\n\r\nI wasn't sure, so I had to try a few things to figure it out.\r\n\r\n### Describe the solution you'd like\r\n\r\nAdd a note to the documentation for the convention.\r\n\r\n### Describe alternatives you've considered\r\n\r\nbanging my head against the wall.\r\n\r\n# Relevant Issues and Pull Requests\r\n\r\n\n", "before_files": [{"content": "\"\"\"Module for Maximum Likelihood Estimation.\"\"\"\nfrom .. import get_backend\nfrom ..exceptions import UnspecifiedPOI\n\n\ndef twice_nll(pars, data, pdf):\n \"\"\"\n Twice the negative Log-Likelihood.\n\n Args:\n data (`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n\n Returns:\n Twice the negative log likelihood.\n\n \"\"\"\n return -2 * pdf.logpdf(pars, data)\n\n\ndef fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):\n \"\"\"\n Run a unconstrained maximum likelihood fit.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> pyhf.infer.mle.fit(data, model, return_fitted_val=True)\n (array([0. , 1.0030512 , 0.96266961]), array([24.98393521]))\n\n Args:\n data (`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (`list`): Values to initialize the model parameters at for the fit\n par_bounds (`list` of `list`\\s or `tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)\n\n\ndef fixed_poi_fit(poi_val, data, pdf, init_pars=None, par_bounds=None, **kwargs):\n \"\"\"\n Run a maximum likelihood fit with the POI value fixed.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> test_poi = 1.0\n >>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)\n (array([1. , 0.97224597, 0.87553894]), array([28.92218013]))\n\n Args:\n data: The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (`list`): Values to initialize the model parameters at for the fit\n par_bounds (`list` of `list`\\s or `tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n if pdf.config.poi_index is None:\n raise UnspecifiedPOI(\n 'No POI is defined. A POI is required to fit with a fixed POI.'\n )\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(\n twice_nll,\n data,\n pdf,\n init_pars,\n par_bounds,\n [(pdf.config.poi_index, poi_val)],\n **kwargs,\n )\n", "path": "src/pyhf/infer/mle.py"}], "after_files": [{"content": "\"\"\"Module for Maximum Likelihood Estimation.\"\"\"\nfrom .. import get_backend\nfrom ..exceptions import UnspecifiedPOI\n\n\ndef twice_nll(pars, data, pdf):\n \"\"\"\n Twice the negative Log-Likelihood.\n\n Args:\n data (`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n\n Returns:\n Twice the negative log likelihood.\n\n \"\"\"\n return -2 * pdf.logpdf(pars, data)\n\n\ndef fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):\n \"\"\"\n Run a unconstrained maximum likelihood fit.\n\n .. note::\n\n :func:`twice_nll` is the objective function.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> pyhf.infer.mle.fit(data, model, return_fitted_val=True)\n (array([0. , 1.0030512 , 0.96266961]), array([24.98393521]))\n\n Args:\n data (`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (`list`): Values to initialize the model parameters at for the fit\n par_bounds (`list` of `list`\\s or `tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)\n\n\ndef fixed_poi_fit(poi_val, data, pdf, init_pars=None, par_bounds=None, **kwargs):\n \"\"\"\n Run a maximum likelihood fit with the POI value fixed.\n\n .. note::\n\n :func:`twice_nll` is the objective function.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> test_poi = 1.0\n >>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)\n (array([1. , 0.97224597, 0.87553894]), array([28.92218013]))\n\n Args:\n data: The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (`list`): Values to initialize the model parameters at for the fit\n par_bounds (`list` of `list`\\s or `tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n if pdf.config.poi_index is None:\n raise UnspecifiedPOI(\n 'No POI is defined. A POI is required to fit with a fixed POI.'\n )\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(\n twice_nll,\n data,\n pdf,\n init_pars,\n par_bounds,\n [(pdf.config.poi_index, poi_val)],\n **kwargs,\n )\n", "path": "src/pyhf/infer/mle.py"}]}
| 1,645 | 178 |
gh_patches_debug_13489
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-22795
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cron server script runs twice
<!--
Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:
1. This tracker should only be used to report bugs and request features / enhancements to Frappe
- For questions and general support, use https://stackoverflow.com/questions/tagged/frappe
- For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet
2. Use the search function before creating a new issue. Duplicates will be closed and directed to
the original discussion.
3. When making a bug report, make sure you provide all required information. The easier it is for
maintainers to reproduce, the faster it'll be fixed.
4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉
-->
## Description of the issue
Created a cron server script with "13 11 17 10 tue" cron format.
So it should run at 11:13 17/10/2023.
But it runs once after save and runs again at correct time.
## Context information (for bug reports)


**Output of `bench version`**
```
Bench 5.17.2
Frappe 14.52.0
```
## Steps to reproduce the issue
1.Create a server script as "Scheduler Event", "Event Frequency" as "Cron"
2.Make cron format as ```59 23 * * *```
3.Set script as
```frappe.log_error("Cron Test", "Test")```
### Observed result
Watch the Error Log.
It creates a "Cron Test" log once and at the midnight.
### Expected result
It should only create the error log at the midnight.
### Stacktrace / full error message
```
None
```
## Additional information
OS version / distribution, `Frappe` install method, etc.
Manual install
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `frappe/core/doctype/scheduled_job_type/scheduled_job_type.py`
Content:
```
1 # Copyright (c) 2021, Frappe Technologies and contributors
2 # License: MIT. See LICENSE
3
4 import json
5 from datetime import datetime
6
7 import click
8 from croniter import croniter
9
10 import frappe
11 from frappe.model.document import Document
12 from frappe.utils import get_datetime, now_datetime
13 from frappe.utils.background_jobs import enqueue, is_job_enqueued
14
15
16 class ScheduledJobType(Document):
17 # begin: auto-generated types
18 # This code is auto-generated. Do not modify anything in this block.
19
20 from typing import TYPE_CHECKING
21
22 if TYPE_CHECKING:
23 from frappe.types import DF
24
25 create_log: DF.Check
26 cron_format: DF.Data | None
27 frequency: DF.Literal[
28 "All",
29 "Hourly",
30 "Hourly Long",
31 "Daily",
32 "Daily Long",
33 "Weekly",
34 "Weekly Long",
35 "Monthly",
36 "Monthly Long",
37 "Cron",
38 "Yearly",
39 "Annual",
40 ]
41 last_execution: DF.Datetime | None
42 method: DF.Data
43 next_execution: DF.Datetime | None
44 server_script: DF.Link | None
45 stopped: DF.Check
46 # end: auto-generated types
47 def autoname(self):
48 self.name = ".".join(self.method.split(".")[-2:])
49
50 def validate(self):
51 if self.frequency != "All":
52 # force logging for all events other than continuous ones (ALL)
53 self.create_log = 1
54
55 def enqueue(self, force=False) -> bool:
56 # enqueue event if last execution is done
57 if self.is_event_due() or force:
58 if not self.is_job_in_queue():
59 enqueue(
60 "frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job",
61 queue=self.get_queue_name(),
62 job_type=self.method,
63 job_id=self.rq_job_id,
64 )
65 return True
66 else:
67 frappe.logger("scheduler").error(
68 f"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}"
69 )
70
71 return False
72
73 def is_event_due(self, current_time=None):
74 """Return true if event is due based on time lapsed since last execution"""
75 # if the next scheduled event is before NOW, then its due!
76 return self.get_next_execution() <= (current_time or now_datetime())
77
78 def is_job_in_queue(self) -> bool:
79 return is_job_enqueued(self.rq_job_id)
80
81 @property
82 def rq_job_id(self):
83 """Unique ID created to deduplicate jobs with single RQ call."""
84 return f"scheduled_job::{self.method}"
85
86 @property
87 def next_execution(self):
88 return self.get_next_execution()
89
90 def get_next_execution(self):
91 CRON_MAP = {
92 "Yearly": "0 0 1 1 *",
93 "Annual": "0 0 1 1 *",
94 "Monthly": "0 0 1 * *",
95 "Monthly Long": "0 0 1 * *",
96 "Weekly": "0 0 * * 0",
97 "Weekly Long": "0 0 * * 0",
98 "Daily": "0 0 * * *",
99 "Daily Long": "0 0 * * *",
100 "Hourly": "0 * * * *",
101 "Hourly Long": "0 * * * *",
102 "All": f"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *",
103 }
104
105 if not self.cron_format:
106 self.cron_format = CRON_MAP[self.frequency]
107
108 return croniter(
109 self.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1))
110 ).get_next(datetime)
111
112 def execute(self):
113 self.scheduler_log = None
114 try:
115 self.log_status("Start")
116 if self.server_script:
117 script_name = frappe.db.get_value("Server Script", self.server_script)
118 if script_name:
119 frappe.get_doc("Server Script", script_name).execute_scheduled_method()
120 else:
121 frappe.get_attr(self.method)()
122 frappe.db.commit()
123 self.log_status("Complete")
124 except Exception:
125 frappe.db.rollback()
126 self.log_status("Failed")
127
128 def log_status(self, status):
129 # log file
130 frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}")
131 self.update_scheduler_log(status)
132
133 def update_scheduler_log(self, status):
134 if not self.create_log:
135 # self.get_next_execution will work properly iff self.last_execution is properly set
136 if self.frequency == "All" and status == "Start":
137 self.db_set("last_execution", now_datetime(), update_modified=False)
138 frappe.db.commit()
139 return
140 if not self.scheduler_log:
141 self.scheduler_log = frappe.get_doc(
142 dict(doctype="Scheduled Job Log", scheduled_job_type=self.name)
143 ).insert(ignore_permissions=True)
144 self.scheduler_log.db_set("status", status)
145 if status == "Failed":
146 self.scheduler_log.db_set("details", frappe.get_traceback())
147 if status == "Start":
148 self.db_set("last_execution", now_datetime(), update_modified=False)
149 frappe.db.commit()
150
151 def get_queue_name(self):
152 return "long" if ("Long" in self.frequency) else "default"
153
154 def on_trash(self):
155 frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name})
156
157
158 @frappe.whitelist()
159 def execute_event(doc: str):
160 frappe.only_for("System Manager")
161 doc = json.loads(doc)
162 frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True)
163 return doc
164
165
166 def run_scheduled_job(job_type: str):
167 """This is a wrapper function that runs a hooks.scheduler_events method"""
168 try:
169 frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute()
170 except Exception:
171 print(frappe.get_traceback())
172
173
174 def sync_jobs(hooks: dict = None):
175 frappe.reload_doc("core", "doctype", "scheduled_job_type")
176 scheduler_events = hooks or frappe.get_hooks("scheduler_events")
177 all_events = insert_events(scheduler_events)
178 clear_events(all_events)
179
180
181 def insert_events(scheduler_events: dict) -> list:
182 cron_jobs, event_jobs = [], []
183 for event_type in scheduler_events:
184 events = scheduler_events.get(event_type)
185 if isinstance(events, dict):
186 cron_jobs += insert_cron_jobs(events)
187 else:
188 # hourly, daily etc
189 event_jobs += insert_event_jobs(events, event_type)
190 return cron_jobs + event_jobs
191
192
193 def insert_cron_jobs(events: dict) -> list:
194 cron_jobs = []
195 for cron_format in events:
196 for event in events.get(cron_format):
197 cron_jobs.append(event)
198 insert_single_event("Cron", event, cron_format)
199 return cron_jobs
200
201
202 def insert_event_jobs(events: list, event_type: str) -> list:
203 event_jobs = []
204 for event in events:
205 event_jobs.append(event)
206 frequency = event_type.replace("_", " ").title()
207 insert_single_event(frequency, event)
208 return event_jobs
209
210
211 def insert_single_event(frequency: str, event: str, cron_format: str = None):
212 cron_expr = {"cron_format": cron_format} if cron_format else {}
213
214 try:
215 frappe.get_attr(event)
216 except Exception as e:
217 click.secho(f"{event} is not a valid method: {e}", fg="yellow")
218
219 doc = frappe.get_doc(
220 {
221 "doctype": "Scheduled Job Type",
222 "method": event,
223 "cron_format": cron_format,
224 "frequency": frequency,
225 }
226 )
227
228 if not frappe.db.exists(
229 "Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr}
230 ):
231 savepoint = "scheduled_job_type_creation"
232 try:
233 frappe.db.savepoint(savepoint)
234 doc.insert()
235 except frappe.DuplicateEntryError:
236 frappe.db.rollback(save_point=savepoint)
237 doc.delete()
238 doc.insert()
239
240
241 def clear_events(all_events: list):
242 for event in frappe.get_all("Scheduled Job Type", fields=["name", "method", "server_script"]):
243 is_server_script = event.server_script
244 is_defined_in_hooks = event.method in all_events
245
246 if not (is_defined_in_hooks or is_server_script):
247 frappe.delete_doc("Scheduled Job Type", event.name)
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py
@@ -105,9 +105,12 @@
if not self.cron_format:
self.cron_format = CRON_MAP[self.frequency]
- return croniter(
- self.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1))
- ).get_next(datetime)
+ # If this is a cold start then last_execution will not be set.
+ # Creation is set as fallback because if very old fallback is set job might trigger
+ # immediately, even when it's meant to be daily.
+ # A dynamic fallback like current time might miss the scheduler interval and job will never start.
+ last_execution = get_datetime(self.last_execution or self.creation)
+ return croniter(self.cron_format, last_execution).get_next(datetime)
def execute(self):
self.scheduler_log = None
|
{"golden_diff": "diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n@@ -105,9 +105,12 @@\n \t\tif not self.cron_format:\n \t\t\tself.cron_format = CRON_MAP[self.frequency]\n \n-\t\treturn croniter(\n-\t\t\tself.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1))\n-\t\t).get_next(datetime)\n+\t\t# If this is a cold start then last_execution will not be set.\n+\t\t# Creation is set as fallback because if very old fallback is set job might trigger\n+\t\t# immediately, even when it's meant to be daily.\n+\t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n+\t\tlast_execution = get_datetime(self.last_execution or self.creation)\n+\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n \n \tdef execute(self):\n \t\tself.scheduler_log = None\n", "issue": "Cron server script runs twice\n<!--\r\nWelcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to Frappe\r\n - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe\r\n - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR \ud83d\ude09\r\n-->\r\n\r\n## Description of the issue\r\nCreated a cron server script with \"13 11 17 10 tue\" cron format.\r\nSo it should run at 11:13 17/10/2023.\r\nBut it runs once after save and runs again at correct time.\r\n\r\n## Context information (for bug reports)\r\n\r\n\r\n\r\n\r\n\r\n**Output of `bench version`**\r\n```\r\nBench 5.17.2\r\nFrappe 14.52.0\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1.Create a server script as \"Scheduler Event\", \"Event Frequency\" as \"Cron\"\r\n2.Make cron format as ```59 23 * * *```\r\n3.Set script as\r\n```frappe.log_error(\"Cron Test\", \"Test\")```\r\n\r\n### Observed result\r\nWatch the Error Log. \r\nIt creates a \"Cron Test\" log once and at the midnight.\r\n\r\n### Expected result\r\nIt should only create the error log at the midnight.\r\n### Stacktrace / full error message\r\n\r\n```\r\nNone\r\n```\r\n\r\n## Additional information\r\n\r\nOS version / distribution, `Frappe` install method, etc.\r\nManual install\r\n\n", "before_files": [{"content": "# Copyright (c) 2021, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport json\nfrom datetime import datetime\n\nimport click\nfrom croniter import croniter\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.utils import get_datetime, now_datetime\nfrom frappe.utils.background_jobs import enqueue, is_job_enqueued\n\n\nclass ScheduledJobType(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tcreate_log: DF.Check\n\t\tcron_format: DF.Data | None\n\t\tfrequency: DF.Literal[\n\t\t\t\"All\",\n\t\t\t\"Hourly\",\n\t\t\t\"Hourly Long\",\n\t\t\t\"Daily\",\n\t\t\t\"Daily Long\",\n\t\t\t\"Weekly\",\n\t\t\t\"Weekly Long\",\n\t\t\t\"Monthly\",\n\t\t\t\"Monthly Long\",\n\t\t\t\"Cron\",\n\t\t\t\"Yearly\",\n\t\t\t\"Annual\",\n\t\t]\n\t\tlast_execution: DF.Datetime | None\n\t\tmethod: DF.Data\n\t\tnext_execution: DF.Datetime | None\n\t\tserver_script: DF.Link | None\n\t\tstopped: DF.Check\n\t# end: auto-generated types\n\tdef autoname(self):\n\t\tself.name = \".\".join(self.method.split(\".\")[-2:])\n\n\tdef validate(self):\n\t\tif self.frequency != \"All\":\n\t\t\t# force logging for all events other than continuous ones (ALL)\n\t\t\tself.create_log = 1\n\n\tdef enqueue(self, force=False) -> bool:\n\t\t# enqueue event if last execution is done\n\t\tif self.is_event_due() or force:\n\t\t\tif not self.is_job_in_queue():\n\t\t\t\tenqueue(\n\t\t\t\t\t\"frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job\",\n\t\t\t\t\tqueue=self.get_queue_name(),\n\t\t\t\t\tjob_type=self.method,\n\t\t\t\t\tjob_id=self.rq_job_id,\n\t\t\t\t)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tfrappe.logger(\"scheduler\").error(\n\t\t\t\t\tf\"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}\"\n\t\t\t\t)\n\n\t\treturn False\n\n\tdef is_event_due(self, current_time=None):\n\t\t\"\"\"Return true if event is due based on time lapsed since last execution\"\"\"\n\t\t# if the next scheduled event is before NOW, then its due!\n\t\treturn self.get_next_execution() <= (current_time or now_datetime())\n\n\tdef is_job_in_queue(self) -> bool:\n\t\treturn is_job_enqueued(self.rq_job_id)\n\n\t@property\n\tdef rq_job_id(self):\n\t\t\"\"\"Unique ID created to deduplicate jobs with single RQ call.\"\"\"\n\t\treturn f\"scheduled_job::{self.method}\"\n\n\t@property\n\tdef next_execution(self):\n\t\treturn self.get_next_execution()\n\n\tdef get_next_execution(self):\n\t\tCRON_MAP = {\n\t\t\t\"Yearly\": \"0 0 1 1 *\",\n\t\t\t\"Annual\": \"0 0 1 1 *\",\n\t\t\t\"Monthly\": \"0 0 1 * *\",\n\t\t\t\"Monthly Long\": \"0 0 1 * *\",\n\t\t\t\"Weekly\": \"0 0 * * 0\",\n\t\t\t\"Weekly Long\": \"0 0 * * 0\",\n\t\t\t\"Daily\": \"0 0 * * *\",\n\t\t\t\"Daily Long\": \"0 0 * * *\",\n\t\t\t\"Hourly\": \"0 * * * *\",\n\t\t\t\"Hourly Long\": \"0 * * * *\",\n\t\t\t\"All\": f\"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *\",\n\t\t}\n\n\t\tif not self.cron_format:\n\t\t\tself.cron_format = CRON_MAP[self.frequency]\n\n\t\treturn croniter(\n\t\t\tself.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1))\n\t\t).get_next(datetime)\n\n\tdef execute(self):\n\t\tself.scheduler_log = None\n\t\ttry:\n\t\t\tself.log_status(\"Start\")\n\t\t\tif self.server_script:\n\t\t\t\tscript_name = frappe.db.get_value(\"Server Script\", self.server_script)\n\t\t\t\tif script_name:\n\t\t\t\t\tfrappe.get_doc(\"Server Script\", script_name).execute_scheduled_method()\n\t\t\telse:\n\t\t\t\tfrappe.get_attr(self.method)()\n\t\t\tfrappe.db.commit()\n\t\t\tself.log_status(\"Complete\")\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback()\n\t\t\tself.log_status(\"Failed\")\n\n\tdef log_status(self, status):\n\t\t# log file\n\t\tfrappe.logger(\"scheduler\").info(f\"Scheduled Job {status}: {self.method} for {frappe.local.site}\")\n\t\tself.update_scheduler_log(status)\n\n\tdef update_scheduler_log(self, status):\n\t\tif not self.create_log:\n\t\t\t# self.get_next_execution will work properly iff self.last_execution is properly set\n\t\t\tif self.frequency == \"All\" and status == \"Start\":\n\t\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\t\t\tfrappe.db.commit()\n\t\t\treturn\n\t\tif not self.scheduler_log:\n\t\t\tself.scheduler_log = frappe.get_doc(\n\t\t\t\tdict(doctype=\"Scheduled Job Log\", scheduled_job_type=self.name)\n\t\t\t).insert(ignore_permissions=True)\n\t\tself.scheduler_log.db_set(\"status\", status)\n\t\tif status == \"Failed\":\n\t\t\tself.scheduler_log.db_set(\"details\", frappe.get_traceback())\n\t\tif status == \"Start\":\n\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\tfrappe.db.commit()\n\n\tdef get_queue_name(self):\n\t\treturn \"long\" if (\"Long\" in self.frequency) else \"default\"\n\n\tdef on_trash(self):\n\t\tfrappe.db.delete(\"Scheduled Job Log\", {\"scheduled_job_type\": self.name})\n\n\[email protected]()\ndef execute_event(doc: str):\n\tfrappe.only_for(\"System Manager\")\n\tdoc = json.loads(doc)\n\tfrappe.get_doc(\"Scheduled Job Type\", doc.get(\"name\")).enqueue(force=True)\n\treturn doc\n\n\ndef run_scheduled_job(job_type: str):\n\t\"\"\"This is a wrapper function that runs a hooks.scheduler_events method\"\"\"\n\ttry:\n\t\tfrappe.get_doc(\"Scheduled Job Type\", dict(method=job_type)).execute()\n\texcept Exception:\n\t\tprint(frappe.get_traceback())\n\n\ndef sync_jobs(hooks: dict = None):\n\tfrappe.reload_doc(\"core\", \"doctype\", \"scheduled_job_type\")\n\tscheduler_events = hooks or frappe.get_hooks(\"scheduler_events\")\n\tall_events = insert_events(scheduler_events)\n\tclear_events(all_events)\n\n\ndef insert_events(scheduler_events: dict) -> list:\n\tcron_jobs, event_jobs = [], []\n\tfor event_type in scheduler_events:\n\t\tevents = scheduler_events.get(event_type)\n\t\tif isinstance(events, dict):\n\t\t\tcron_jobs += insert_cron_jobs(events)\n\t\telse:\n\t\t\t# hourly, daily etc\n\t\t\tevent_jobs += insert_event_jobs(events, event_type)\n\treturn cron_jobs + event_jobs\n\n\ndef insert_cron_jobs(events: dict) -> list:\n\tcron_jobs = []\n\tfor cron_format in events:\n\t\tfor event in events.get(cron_format):\n\t\t\tcron_jobs.append(event)\n\t\t\tinsert_single_event(\"Cron\", event, cron_format)\n\treturn cron_jobs\n\n\ndef insert_event_jobs(events: list, event_type: str) -> list:\n\tevent_jobs = []\n\tfor event in events:\n\t\tevent_jobs.append(event)\n\t\tfrequency = event_type.replace(\"_\", \" \").title()\n\t\tinsert_single_event(frequency, event)\n\treturn event_jobs\n\n\ndef insert_single_event(frequency: str, event: str, cron_format: str = None):\n\tcron_expr = {\"cron_format\": cron_format} if cron_format else {}\n\n\ttry:\n\t\tfrappe.get_attr(event)\n\texcept Exception as e:\n\t\tclick.secho(f\"{event} is not a valid method: {e}\", fg=\"yellow\")\n\n\tdoc = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Scheduled Job Type\",\n\t\t\t\"method\": event,\n\t\t\t\"cron_format\": cron_format,\n\t\t\t\"frequency\": frequency,\n\t\t}\n\t)\n\n\tif not frappe.db.exists(\n\t\t\"Scheduled Job Type\", {\"method\": event, \"frequency\": frequency, **cron_expr}\n\t):\n\t\tsavepoint = \"scheduled_job_type_creation\"\n\t\ttry:\n\t\t\tfrappe.db.savepoint(savepoint)\n\t\t\tdoc.insert()\n\t\texcept frappe.DuplicateEntryError:\n\t\t\tfrappe.db.rollback(save_point=savepoint)\n\t\t\tdoc.delete()\n\t\t\tdoc.insert()\n\n\ndef clear_events(all_events: list):\n\tfor event in frappe.get_all(\"Scheduled Job Type\", fields=[\"name\", \"method\", \"server_script\"]):\n\t\tis_server_script = event.server_script\n\t\tis_defined_in_hooks = event.method in all_events\n\n\t\tif not (is_defined_in_hooks or is_server_script):\n\t\t\tfrappe.delete_doc(\"Scheduled Job Type\", event.name)\n", "path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py"}], "after_files": [{"content": "# Copyright (c) 2021, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport json\nfrom datetime import datetime\n\nimport click\nfrom croniter import croniter\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.utils import get_datetime, now_datetime\nfrom frappe.utils.background_jobs import enqueue, is_job_enqueued\n\n\nclass ScheduledJobType(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tcreate_log: DF.Check\n\t\tcron_format: DF.Data | None\n\t\tfrequency: DF.Literal[\n\t\t\t\"All\",\n\t\t\t\"Hourly\",\n\t\t\t\"Hourly Long\",\n\t\t\t\"Daily\",\n\t\t\t\"Daily Long\",\n\t\t\t\"Weekly\",\n\t\t\t\"Weekly Long\",\n\t\t\t\"Monthly\",\n\t\t\t\"Monthly Long\",\n\t\t\t\"Cron\",\n\t\t\t\"Yearly\",\n\t\t\t\"Annual\",\n\t\t]\n\t\tlast_execution: DF.Datetime | None\n\t\tmethod: DF.Data\n\t\tnext_execution: DF.Datetime | None\n\t\tserver_script: DF.Link | None\n\t\tstopped: DF.Check\n\t# end: auto-generated types\n\tdef autoname(self):\n\t\tself.name = \".\".join(self.method.split(\".\")[-2:])\n\n\tdef validate(self):\n\t\tif self.frequency != \"All\":\n\t\t\t# force logging for all events other than continuous ones (ALL)\n\t\t\tself.create_log = 1\n\n\tdef enqueue(self, force=False) -> bool:\n\t\t# enqueue event if last execution is done\n\t\tif self.is_event_due() or force:\n\t\t\tif not self.is_job_in_queue():\n\t\t\t\tenqueue(\n\t\t\t\t\t\"frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job\",\n\t\t\t\t\tqueue=self.get_queue_name(),\n\t\t\t\t\tjob_type=self.method,\n\t\t\t\t\tjob_id=self.rq_job_id,\n\t\t\t\t)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tfrappe.logger(\"scheduler\").error(\n\t\t\t\t\tf\"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}\"\n\t\t\t\t)\n\n\t\treturn False\n\n\tdef is_event_due(self, current_time=None):\n\t\t\"\"\"Return true if event is due based on time lapsed since last execution\"\"\"\n\t\t# if the next scheduled event is before NOW, then its due!\n\t\treturn self.get_next_execution() <= (current_time or now_datetime())\n\n\tdef is_job_in_queue(self) -> bool:\n\t\treturn is_job_enqueued(self.rq_job_id)\n\n\t@property\n\tdef rq_job_id(self):\n\t\t\"\"\"Unique ID created to deduplicate jobs with single RQ call.\"\"\"\n\t\treturn f\"scheduled_job::{self.method}\"\n\n\t@property\n\tdef next_execution(self):\n\t\treturn self.get_next_execution()\n\n\tdef get_next_execution(self):\n\t\tCRON_MAP = {\n\t\t\t\"Yearly\": \"0 0 1 1 *\",\n\t\t\t\"Annual\": \"0 0 1 1 *\",\n\t\t\t\"Monthly\": \"0 0 1 * *\",\n\t\t\t\"Monthly Long\": \"0 0 1 * *\",\n\t\t\t\"Weekly\": \"0 0 * * 0\",\n\t\t\t\"Weekly Long\": \"0 0 * * 0\",\n\t\t\t\"Daily\": \"0 0 * * *\",\n\t\t\t\"Daily Long\": \"0 0 * * *\",\n\t\t\t\"Hourly\": \"0 * * * *\",\n\t\t\t\"Hourly Long\": \"0 * * * *\",\n\t\t\t\"All\": f\"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *\",\n\t\t}\n\n\t\tif not self.cron_format:\n\t\t\tself.cron_format = CRON_MAP[self.frequency]\n\n\t\t# If this is a cold start then last_execution will not be set.\n\t\t# Creation is set as fallback because if very old fallback is set job might trigger\n\t\t# immediately, even when it's meant to be daily.\n\t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n\t\tlast_execution = get_datetime(self.last_execution or self.creation)\n\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n\n\tdef execute(self):\n\t\tself.scheduler_log = None\n\t\ttry:\n\t\t\tself.log_status(\"Start\")\n\t\t\tif self.server_script:\n\t\t\t\tscript_name = frappe.db.get_value(\"Server Script\", self.server_script)\n\t\t\t\tif script_name:\n\t\t\t\t\tfrappe.get_doc(\"Server Script\", script_name).execute_scheduled_method()\n\t\t\telse:\n\t\t\t\tfrappe.get_attr(self.method)()\n\t\t\tfrappe.db.commit()\n\t\t\tself.log_status(\"Complete\")\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback()\n\t\t\tself.log_status(\"Failed\")\n\n\tdef log_status(self, status):\n\t\t# log file\n\t\tfrappe.logger(\"scheduler\").info(f\"Scheduled Job {status}: {self.method} for {frappe.local.site}\")\n\t\tself.update_scheduler_log(status)\n\n\tdef update_scheduler_log(self, status):\n\t\tif not self.create_log:\n\t\t\t# self.get_next_execution will work properly iff self.last_execution is properly set\n\t\t\tif self.frequency == \"All\" and status == \"Start\":\n\t\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\t\t\tfrappe.db.commit()\n\t\t\treturn\n\t\tif not self.scheduler_log:\n\t\t\tself.scheduler_log = frappe.get_doc(\n\t\t\t\tdict(doctype=\"Scheduled Job Log\", scheduled_job_type=self.name)\n\t\t\t).insert(ignore_permissions=True)\n\t\tself.scheduler_log.db_set(\"status\", status)\n\t\tif status == \"Failed\":\n\t\t\tself.scheduler_log.db_set(\"details\", frappe.get_traceback())\n\t\tif status == \"Start\":\n\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\tfrappe.db.commit()\n\n\tdef get_queue_name(self):\n\t\treturn \"long\" if (\"Long\" in self.frequency) else \"default\"\n\n\tdef on_trash(self):\n\t\tfrappe.db.delete(\"Scheduled Job Log\", {\"scheduled_job_type\": self.name})\n\n\[email protected]()\ndef execute_event(doc: str):\n\tfrappe.only_for(\"System Manager\")\n\tdoc = json.loads(doc)\n\tfrappe.get_doc(\"Scheduled Job Type\", doc.get(\"name\")).enqueue(force=True)\n\treturn doc\n\n\ndef run_scheduled_job(job_type: str):\n\t\"\"\"This is a wrapper function that runs a hooks.scheduler_events method\"\"\"\n\ttry:\n\t\tfrappe.get_doc(\"Scheduled Job Type\", dict(method=job_type)).execute()\n\texcept Exception:\n\t\tprint(frappe.get_traceback())\n\n\ndef sync_jobs(hooks: dict = None):\n\tfrappe.reload_doc(\"core\", \"doctype\", \"scheduled_job_type\")\n\tscheduler_events = hooks or frappe.get_hooks(\"scheduler_events\")\n\tall_events = insert_events(scheduler_events)\n\tclear_events(all_events)\n\n\ndef insert_events(scheduler_events: dict) -> list:\n\tcron_jobs, event_jobs = [], []\n\tfor event_type in scheduler_events:\n\t\tevents = scheduler_events.get(event_type)\n\t\tif isinstance(events, dict):\n\t\t\tcron_jobs += insert_cron_jobs(events)\n\t\telse:\n\t\t\t# hourly, daily etc\n\t\t\tevent_jobs += insert_event_jobs(events, event_type)\n\treturn cron_jobs + event_jobs\n\n\ndef insert_cron_jobs(events: dict) -> list:\n\tcron_jobs = []\n\tfor cron_format in events:\n\t\tfor event in events.get(cron_format):\n\t\t\tcron_jobs.append(event)\n\t\t\tinsert_single_event(\"Cron\", event, cron_format)\n\treturn cron_jobs\n\n\ndef insert_event_jobs(events: list, event_type: str) -> list:\n\tevent_jobs = []\n\tfor event in events:\n\t\tevent_jobs.append(event)\n\t\tfrequency = event_type.replace(\"_\", \" \").title()\n\t\tinsert_single_event(frequency, event)\n\treturn event_jobs\n\n\ndef insert_single_event(frequency: str, event: str, cron_format: str = None):\n\tcron_expr = {\"cron_format\": cron_format} if cron_format else {}\n\n\ttry:\n\t\tfrappe.get_attr(event)\n\texcept Exception as e:\n\t\tclick.secho(f\"{event} is not a valid method: {e}\", fg=\"yellow\")\n\n\tdoc = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Scheduled Job Type\",\n\t\t\t\"method\": event,\n\t\t\t\"cron_format\": cron_format,\n\t\t\t\"frequency\": frequency,\n\t\t}\n\t)\n\n\tif not frappe.db.exists(\n\t\t\"Scheduled Job Type\", {\"method\": event, \"frequency\": frequency, **cron_expr}\n\t):\n\t\tsavepoint = \"scheduled_job_type_creation\"\n\t\ttry:\n\t\t\tfrappe.db.savepoint(savepoint)\n\t\t\tdoc.insert()\n\t\texcept frappe.DuplicateEntryError:\n\t\t\tfrappe.db.rollback(save_point=savepoint)\n\t\t\tdoc.delete()\n\t\t\tdoc.insert()\n\n\ndef clear_events(all_events: list):\n\tfor event in frappe.get_all(\"Scheduled Job Type\", fields=[\"name\", \"method\", \"server_script\"]):\n\t\tis_server_script = event.server_script\n\t\tis_defined_in_hooks = event.method in all_events\n\n\t\tif not (is_defined_in_hooks or is_server_script):\n\t\t\tfrappe.delete_doc(\"Scheduled Job Type\", event.name)\n", "path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py"}]}
| 3,422 | 262 |
gh_patches_debug_12627
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-1465
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom webhook fails action depending on payload.
Hi there, I've faced a weird issue.
Say it we have a webhook action like this: https://gist.github.com/dennybaa/00d1936af2f441cfff15
If I issue the following commands:
1. `curl -X POST --data "{\"message\": \"6fd781b849bb\"}" http://172.17.0.8:9101/v1/webhooks/sample`
2. `curl -X POST --data "{\"hits.rate_15m\": 0.4765927732670934, \"hits.count\": 30, \"tags\": [\"metric\"], \"@timestamp\": \"2015-04-23T13:58:28.319Z\", \"hits.rate_5m\": 0.31123263029226506, \"@version\": \"1\", \"message\": \"6fd781b849bb\", \"hits.rate_1m\": 0.08765952620175455}"`
In both cases trigger is dispatched. But the action takes place only in the #1 case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2actions/st2actions/resultstracker.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import eventlet
17 import importlib
18 import six
19
20 from collections import defaultdict
21 from kombu import Connection
22 from oslo.config import cfg
23
24 from st2actions.query.base import QueryContext
25 from st2common import log as logging
26 from st2common.models.db import action as action_models
27 from st2common.persistence.action import ActionExecutionState
28 from st2common.transport import actionexecutionstate, consumers, publishers
29
30
31 LOG = logging.getLogger(__name__)
32
33 ACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work',
34 routing_key=publishers.CREATE_RK)
35
36
37 class ResultsTracker(consumers.MessageHandler):
38 message_type = action_models.ActionExecutionStateDB
39
40 def __init__(self, connection, queues):
41 super(ResultsTracker, self).__init__(connection, queues)
42 self._queriers = {}
43 self._query_threads = []
44 self._failed_imports = set()
45
46 def start(self, wait=False):
47 self._bootstrap()
48 super(ResultsTracker, self).start(wait=wait)
49
50 def wait(self):
51 super(ResultsTracker, self).wait()
52 for thread in self._query_threads():
53 thread.wait()
54
55 def shutdown(self):
56 super(ResultsTracker, self).shutdown()
57 LOG.info('Stats from queriers:')
58 self._print_stats()
59
60 def _print_stats(self):
61 for name, querier in six.iteritems(self._queriers):
62 if querier:
63 querier.print_stats()
64
65 def _bootstrap(self):
66 all_states = ActionExecutionState.get_all()
67 LOG.info('Found %d pending states in db.' % len(all_states))
68
69 query_contexts_dict = defaultdict(list)
70 for state_db in all_states:
71 try:
72 context = QueryContext.from_model(state_db)
73 except:
74 LOG.exception('Invalid state object: %s', state_db)
75 continue
76 query_module_name = state_db.query_module
77 querier = self.get_querier(query_module_name)
78
79 if querier is not None:
80 query_contexts_dict[querier].append(context)
81
82 for querier, contexts in six.iteritems(query_contexts_dict):
83 LOG.info('Found %d pending actions for query module %s', len(contexts), querier)
84 querier.add_queries(query_contexts=contexts)
85
86 def process(self, query_context):
87 querier = self.get_querier(query_context.query_module)
88 context = QueryContext.from_model(query_context)
89 querier.add_queries(query_contexts=[context])
90 return
91
92 def get_querier(self, query_module_name):
93 if (query_module_name not in self._queriers and
94 query_module_name not in self._failed_imports):
95 try:
96 query_module = self._import_query_module(query_module_name)
97 except:
98 LOG.exception('Failed importing query module: %s', query_module_name)
99 self._failed_imports.add(query_module_name)
100 self._queriers[query_module_name] = None
101 else:
102 querier = query_module.get_instance()
103 self._queriers[query_module_name] = querier
104 self._query_threads.append(eventlet.spawn(querier.start))
105
106 return self._queriers[query_module_name]
107
108 def _import_query_module(self, module_name):
109 return importlib.import_module(module_name, package=None)
110
111
112 def get_tracker():
113 with Connection(cfg.CONF.messaging.url) as conn:
114 return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])
115
```
Path: `st2common/st2common/models/db/reactor.py`
Content:
```
1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more
2 # contributor license agreements. See the NOTICE file distributed with
3 # this work for additional information regarding copyright ownership.
4 # The ASF licenses this file to You under the Apache License, Version 2.0
5 # (the "License"); you may not use this file except in compliance with
6 # the License. You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import mongoengine as me
17 from st2common.models.db import MongoDBAccess
18 from st2common.models.db import stormbase
19
20 __all__ = [
21 'SensorTypeDB',
22 'TriggerTypeDB',
23 'TriggerDB',
24 'TriggerInstanceDB',
25 'ActionExecutionSpecDB',
26 'RuleDB'
27 ]
28
29
30 class SensorTypeDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):
31 """
32 Description of a specific type of a sensor (think of it as a sensor
33 template).
34
35 Attribute:
36 pack - Name of the content pack this sensor belongs to.
37 artifact_uri - URI to the artifact file.
38 entry_point - Full path to the sensor entry point (e.g. module.foo.ClassSensor).
39 trigger_type - A list of references to the TriggerTypeDB objects exposed by this sensor.
40 poll_interval - Poll interval for this sensor.
41 """
42 name = me.StringField(required=True)
43 pack = me.StringField(required=True, unique_with='name')
44 artifact_uri = me.StringField()
45 entry_point = me.StringField()
46 trigger_types = me.ListField(field=me.StringField())
47 poll_interval = me.IntField()
48 enabled = me.BooleanField(default=True,
49 help_text=u'Flag indicating whether the sensor is enabled.')
50
51
52 class TriggerTypeDB(stormbase.StormBaseDB,
53 stormbase.ContentPackResourceMixin,
54 stormbase.TagsMixin):
55 """Description of a specific kind/type of a trigger. The
56 (pack, name) tuple is expected uniquely identify a trigger in
57 the namespace of all triggers provided by a specific trigger_source.
58 Attribute:
59 pack - Name of the content pack this trigger belongs to.
60 trigger_source: Source that owns this trigger type.
61 payload_info: Meta information of the expected payload.
62 """
63 name = me.StringField(required=True)
64 pack = me.StringField(required=True, unique_with='name')
65 payload_schema = me.DictField()
66 parameters_schema = me.DictField(default={})
67
68 meta = {
69 'indexes': stormbase.TagsMixin.get_indices()
70 }
71
72
73 class TriggerDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):
74 """
75 Attribute:
76 pack - Name of the content pack this trigger belongs to.
77 type - Reference to the TriggerType object.
78 parameters - Trigger parameters.
79 """
80 name = me.StringField(required=True)
81 pack = me.StringField(required=True, unique_with='name')
82 type = me.StringField()
83 parameters = me.DictField()
84
85
86 class TriggerInstanceDB(stormbase.StormFoundationDB):
87 """An instance or occurrence of a type of Trigger.
88 Attribute:
89 trigger: Reference to the Trigger object.
90 payload (dict): payload specific to the occurrence.
91 occurrence_time (datetime): time of occurrence of the trigger.
92 """
93 trigger = me.StringField()
94 payload = me.DictField()
95 occurrence_time = me.DateTimeField()
96
97
98 class ActionExecutionSpecDB(me.EmbeddedDocument):
99 ref = me.StringField(required=True, unique=False)
100 parameters = me.DictField()
101
102 def __str__(self):
103 result = []
104 result.append('ActionExecutionSpecDB@')
105 result.append(str(id(self)))
106 result.append('(ref="%s", ' % self.ref)
107 result.append('parameters="%s")' % self.parameters)
108 return ''.join(result)
109
110
111 class RuleDB(stormbase.StormBaseDB, stormbase.TagsMixin):
112 """Specifies the action to invoke on the occurrence of a Trigger. It
113 also includes the transformation to perform to match the impedance
114 between the payload of a TriggerInstance and input of a action.
115 Attribute:
116 trigger: Trigger that trips this rule.
117 criteria:
118 action: Action to execute when the rule is tripped.
119 status: enabled or disabled. If disabled occurrence of the trigger
120 does not lead to execution of a action and vice-versa.
121 """
122 trigger = me.StringField()
123 criteria = stormbase.EscapedDictField()
124 action = me.EmbeddedDocumentField(ActionExecutionSpecDB)
125 enabled = me.BooleanField(required=True, default=True,
126 help_text=u'Flag indicating whether the rule is enabled.')
127
128 meta = {
129 'indexes': stormbase.TagsMixin.get_indices()
130 }
131
132 # specialized access objects
133 sensor_type_access = MongoDBAccess(SensorTypeDB)
134 triggertype_access = MongoDBAccess(TriggerTypeDB)
135 trigger_access = MongoDBAccess(TriggerDB)
136 triggerinstance_access = MongoDBAccess(TriggerInstanceDB)
137 rule_access = MongoDBAccess(RuleDB)
138
139 MODELS = [SensorTypeDB, TriggerTypeDB, TriggerDB, TriggerInstanceDB, RuleDB]
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/st2actions/st2actions/resultstracker.py b/st2actions/st2actions/resultstracker.py
--- a/st2actions/st2actions/resultstracker.py
+++ b/st2actions/st2actions/resultstracker.py
@@ -49,7 +49,7 @@
def wait(self):
super(ResultsTracker, self).wait()
- for thread in self._query_threads():
+ for thread in self._query_threads:
thread.wait()
def shutdown(self):
diff --git a/st2common/st2common/models/db/reactor.py b/st2common/st2common/models/db/reactor.py
--- a/st2common/st2common/models/db/reactor.py
+++ b/st2common/st2common/models/db/reactor.py
@@ -91,7 +91,7 @@
occurrence_time (datetime): time of occurrence of the trigger.
"""
trigger = me.StringField()
- payload = me.DictField()
+ payload = stormbase.EscapedDictField()
occurrence_time = me.DateTimeField()
|
{"golden_diff": "diff --git a/st2actions/st2actions/resultstracker.py b/st2actions/st2actions/resultstracker.py\n--- a/st2actions/st2actions/resultstracker.py\n+++ b/st2actions/st2actions/resultstracker.py\n@@ -49,7 +49,7 @@\n \n def wait(self):\n super(ResultsTracker, self).wait()\n- for thread in self._query_threads():\n+ for thread in self._query_threads:\n thread.wait()\n \n def shutdown(self):\ndiff --git a/st2common/st2common/models/db/reactor.py b/st2common/st2common/models/db/reactor.py\n--- a/st2common/st2common/models/db/reactor.py\n+++ b/st2common/st2common/models/db/reactor.py\n@@ -91,7 +91,7 @@\n occurrence_time (datetime): time of occurrence of the trigger.\n \"\"\"\n trigger = me.StringField()\n- payload = me.DictField()\n+ payload = stormbase.EscapedDictField()\n occurrence_time = me.DateTimeField()\n", "issue": "Custom webhook fails action depending on payload.\nHi there, I've faced a weird issue.\n\nSay it we have a webhook action like this: https://gist.github.com/dennybaa/00d1936af2f441cfff15\nIf I issue the following commands:\n1. `curl -X POST --data \"{\\\"message\\\": \\\"6fd781b849bb\\\"}\" http://172.17.0.8:9101/v1/webhooks/sample`\n2. `curl -X POST --data \"{\\\"hits.rate_15m\\\": 0.4765927732670934, \\\"hits.count\\\": 30, \\\"tags\\\": [\\\"metric\\\"], \\\"@timestamp\\\": \\\"2015-04-23T13:58:28.319Z\\\", \\\"hits.rate_5m\\\": 0.31123263029226506, \\\"@version\\\": \\\"1\\\", \\\"message\\\": \\\"6fd781b849bb\\\", \\\"hits.rate_1m\\\": 0.08765952620175455}\"`\n\nIn both cases trigger is dispatched. But the action takes place only in the #1 case.\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport eventlet\nimport importlib\nimport six\n\nfrom collections import defaultdict\nfrom kombu import Connection\nfrom oslo.config import cfg\n\nfrom st2actions.query.base import QueryContext\nfrom st2common import log as logging\nfrom st2common.models.db import action as action_models\nfrom st2common.persistence.action import ActionExecutionState\nfrom st2common.transport import actionexecutionstate, consumers, publishers\n\n\nLOG = logging.getLogger(__name__)\n\nACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work',\n routing_key=publishers.CREATE_RK)\n\n\nclass ResultsTracker(consumers.MessageHandler):\n message_type = action_models.ActionExecutionStateDB\n\n def __init__(self, connection, queues):\n super(ResultsTracker, self).__init__(connection, queues)\n self._queriers = {}\n self._query_threads = []\n self._failed_imports = set()\n\n def start(self, wait=False):\n self._bootstrap()\n super(ResultsTracker, self).start(wait=wait)\n\n def wait(self):\n super(ResultsTracker, self).wait()\n for thread in self._query_threads():\n thread.wait()\n\n def shutdown(self):\n super(ResultsTracker, self).shutdown()\n LOG.info('Stats from queriers:')\n self._print_stats()\n\n def _print_stats(self):\n for name, querier in six.iteritems(self._queriers):\n if querier:\n querier.print_stats()\n\n def _bootstrap(self):\n all_states = ActionExecutionState.get_all()\n LOG.info('Found %d pending states in db.' % len(all_states))\n\n query_contexts_dict = defaultdict(list)\n for state_db in all_states:\n try:\n context = QueryContext.from_model(state_db)\n except:\n LOG.exception('Invalid state object: %s', state_db)\n continue\n query_module_name = state_db.query_module\n querier = self.get_querier(query_module_name)\n\n if querier is not None:\n query_contexts_dict[querier].append(context)\n\n for querier, contexts in six.iteritems(query_contexts_dict):\n LOG.info('Found %d pending actions for query module %s', len(contexts), querier)\n querier.add_queries(query_contexts=contexts)\n\n def process(self, query_context):\n querier = self.get_querier(query_context.query_module)\n context = QueryContext.from_model(query_context)\n querier.add_queries(query_contexts=[context])\n return\n\n def get_querier(self, query_module_name):\n if (query_module_name not in self._queriers and\n query_module_name not in self._failed_imports):\n try:\n query_module = self._import_query_module(query_module_name)\n except:\n LOG.exception('Failed importing query module: %s', query_module_name)\n self._failed_imports.add(query_module_name)\n self._queriers[query_module_name] = None\n else:\n querier = query_module.get_instance()\n self._queriers[query_module_name] = querier\n self._query_threads.append(eventlet.spawn(querier.start))\n\n return self._queriers[query_module_name]\n\n def _import_query_module(self, module_name):\n return importlib.import_module(module_name, package=None)\n\n\ndef get_tracker():\n with Connection(cfg.CONF.messaging.url) as conn:\n return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])\n", "path": "st2actions/st2actions/resultstracker.py"}, {"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mongoengine as me\nfrom st2common.models.db import MongoDBAccess\nfrom st2common.models.db import stormbase\n\n__all__ = [\n 'SensorTypeDB',\n 'TriggerTypeDB',\n 'TriggerDB',\n 'TriggerInstanceDB',\n 'ActionExecutionSpecDB',\n 'RuleDB'\n]\n\n\nclass SensorTypeDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):\n \"\"\"\n Description of a specific type of a sensor (think of it as a sensor\n template).\n\n Attribute:\n pack - Name of the content pack this sensor belongs to.\n artifact_uri - URI to the artifact file.\n entry_point - Full path to the sensor entry point (e.g. module.foo.ClassSensor).\n trigger_type - A list of references to the TriggerTypeDB objects exposed by this sensor.\n poll_interval - Poll interval for this sensor.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n artifact_uri = me.StringField()\n entry_point = me.StringField()\n trigger_types = me.ListField(field=me.StringField())\n poll_interval = me.IntField()\n enabled = me.BooleanField(default=True,\n help_text=u'Flag indicating whether the sensor is enabled.')\n\n\nclass TriggerTypeDB(stormbase.StormBaseDB,\n stormbase.ContentPackResourceMixin,\n stormbase.TagsMixin):\n \"\"\"Description of a specific kind/type of a trigger. The\n (pack, name) tuple is expected uniquely identify a trigger in\n the namespace of all triggers provided by a specific trigger_source.\n Attribute:\n pack - Name of the content pack this trigger belongs to.\n trigger_source: Source that owns this trigger type.\n payload_info: Meta information of the expected payload.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n payload_schema = me.DictField()\n parameters_schema = me.DictField(default={})\n\n meta = {\n 'indexes': stormbase.TagsMixin.get_indices()\n }\n\n\nclass TriggerDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):\n \"\"\"\n Attribute:\n pack - Name of the content pack this trigger belongs to.\n type - Reference to the TriggerType object.\n parameters - Trigger parameters.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n type = me.StringField()\n parameters = me.DictField()\n\n\nclass TriggerInstanceDB(stormbase.StormFoundationDB):\n \"\"\"An instance or occurrence of a type of Trigger.\n Attribute:\n trigger: Reference to the Trigger object.\n payload (dict): payload specific to the occurrence.\n occurrence_time (datetime): time of occurrence of the trigger.\n \"\"\"\n trigger = me.StringField()\n payload = me.DictField()\n occurrence_time = me.DateTimeField()\n\n\nclass ActionExecutionSpecDB(me.EmbeddedDocument):\n ref = me.StringField(required=True, unique=False)\n parameters = me.DictField()\n\n def __str__(self):\n result = []\n result.append('ActionExecutionSpecDB@')\n result.append(str(id(self)))\n result.append('(ref=\"%s\", ' % self.ref)\n result.append('parameters=\"%s\")' % self.parameters)\n return ''.join(result)\n\n\nclass RuleDB(stormbase.StormBaseDB, stormbase.TagsMixin):\n \"\"\"Specifies the action to invoke on the occurrence of a Trigger. It\n also includes the transformation to perform to match the impedance\n between the payload of a TriggerInstance and input of a action.\n Attribute:\n trigger: Trigger that trips this rule.\n criteria:\n action: Action to execute when the rule is tripped.\n status: enabled or disabled. If disabled occurrence of the trigger\n does not lead to execution of a action and vice-versa.\n \"\"\"\n trigger = me.StringField()\n criteria = stormbase.EscapedDictField()\n action = me.EmbeddedDocumentField(ActionExecutionSpecDB)\n enabled = me.BooleanField(required=True, default=True,\n help_text=u'Flag indicating whether the rule is enabled.')\n\n meta = {\n 'indexes': stormbase.TagsMixin.get_indices()\n }\n\n# specialized access objects\nsensor_type_access = MongoDBAccess(SensorTypeDB)\ntriggertype_access = MongoDBAccess(TriggerTypeDB)\ntrigger_access = MongoDBAccess(TriggerDB)\ntriggerinstance_access = MongoDBAccess(TriggerInstanceDB)\nrule_access = MongoDBAccess(RuleDB)\n\nMODELS = [SensorTypeDB, TriggerTypeDB, TriggerDB, TriggerInstanceDB, RuleDB]\n", "path": "st2common/st2common/models/db/reactor.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport eventlet\nimport importlib\nimport six\n\nfrom collections import defaultdict\nfrom kombu import Connection\nfrom oslo.config import cfg\n\nfrom st2actions.query.base import QueryContext\nfrom st2common import log as logging\nfrom st2common.models.db import action as action_models\nfrom st2common.persistence.action import ActionExecutionState\nfrom st2common.transport import actionexecutionstate, consumers, publishers\n\n\nLOG = logging.getLogger(__name__)\n\nACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work',\n routing_key=publishers.CREATE_RK)\n\n\nclass ResultsTracker(consumers.MessageHandler):\n message_type = action_models.ActionExecutionStateDB\n\n def __init__(self, connection, queues):\n super(ResultsTracker, self).__init__(connection, queues)\n self._queriers = {}\n self._query_threads = []\n self._failed_imports = set()\n\n def start(self, wait=False):\n self._bootstrap()\n super(ResultsTracker, self).start(wait=wait)\n\n def wait(self):\n super(ResultsTracker, self).wait()\n for thread in self._query_threads:\n thread.wait()\n\n def shutdown(self):\n super(ResultsTracker, self).shutdown()\n LOG.info('Stats from queriers:')\n self._print_stats()\n\n def _print_stats(self):\n for name, querier in six.iteritems(self._queriers):\n if querier:\n querier.print_stats()\n\n def _bootstrap(self):\n all_states = ActionExecutionState.get_all()\n LOG.info('Found %d pending states in db.' % len(all_states))\n\n query_contexts_dict = defaultdict(list)\n for state_db in all_states:\n try:\n context = QueryContext.from_model(state_db)\n except:\n LOG.exception('Invalid state object: %s', state_db)\n continue\n query_module_name = state_db.query_module\n querier = self.get_querier(query_module_name)\n\n if querier is not None:\n query_contexts_dict[querier].append(context)\n\n for querier, contexts in six.iteritems(query_contexts_dict):\n LOG.info('Found %d pending actions for query module %s', len(contexts), querier)\n querier.add_queries(query_contexts=contexts)\n\n def process(self, query_context):\n querier = self.get_querier(query_context.query_module)\n context = QueryContext.from_model(query_context)\n querier.add_queries(query_contexts=[context])\n return\n\n def get_querier(self, query_module_name):\n if (query_module_name not in self._queriers and\n query_module_name not in self._failed_imports):\n try:\n query_module = self._import_query_module(query_module_name)\n except:\n LOG.exception('Failed importing query module: %s', query_module_name)\n self._failed_imports.add(query_module_name)\n self._queriers[query_module_name] = None\n else:\n querier = query_module.get_instance()\n self._queriers[query_module_name] = querier\n self._query_threads.append(eventlet.spawn(querier.start))\n\n return self._queriers[query_module_name]\n\n def _import_query_module(self, module_name):\n return importlib.import_module(module_name, package=None)\n\n\ndef get_tracker():\n with Connection(cfg.CONF.messaging.url) as conn:\n return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])\n", "path": "st2actions/st2actions/resultstracker.py"}, {"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mongoengine as me\nfrom st2common.models.db import MongoDBAccess\nfrom st2common.models.db import stormbase\n\n__all__ = [\n 'SensorTypeDB',\n 'TriggerTypeDB',\n 'TriggerDB',\n 'TriggerInstanceDB',\n 'ActionExecutionSpecDB',\n 'RuleDB'\n]\n\n\nclass SensorTypeDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):\n \"\"\"\n Description of a specific type of a sensor (think of it as a sensor\n template).\n\n Attribute:\n pack - Name of the content pack this sensor belongs to.\n artifact_uri - URI to the artifact file.\n entry_point - Full path to the sensor entry point (e.g. module.foo.ClassSensor).\n trigger_type - A list of references to the TriggerTypeDB objects exposed by this sensor.\n poll_interval - Poll interval for this sensor.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n artifact_uri = me.StringField()\n entry_point = me.StringField()\n trigger_types = me.ListField(field=me.StringField())\n poll_interval = me.IntField()\n enabled = me.BooleanField(default=True,\n help_text=u'Flag indicating whether the sensor is enabled.')\n\n\nclass TriggerTypeDB(stormbase.StormBaseDB,\n stormbase.ContentPackResourceMixin,\n stormbase.TagsMixin):\n \"\"\"Description of a specific kind/type of a trigger. The\n (pack, name) tuple is expected uniquely identify a trigger in\n the namespace of all triggers provided by a specific trigger_source.\n Attribute:\n pack - Name of the content pack this trigger belongs to.\n trigger_source: Source that owns this trigger type.\n payload_info: Meta information of the expected payload.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n payload_schema = me.DictField()\n parameters_schema = me.DictField(default={})\n\n meta = {\n 'indexes': stormbase.TagsMixin.get_indices()\n }\n\n\nclass TriggerDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):\n \"\"\"\n Attribute:\n pack - Name of the content pack this trigger belongs to.\n type - Reference to the TriggerType object.\n parameters - Trigger parameters.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n type = me.StringField()\n parameters = me.DictField()\n\n\nclass TriggerInstanceDB(stormbase.StormFoundationDB):\n \"\"\"An instance or occurrence of a type of Trigger.\n Attribute:\n trigger: Reference to the Trigger object.\n payload (dict): payload specific to the occurrence.\n occurrence_time (datetime): time of occurrence of the trigger.\n \"\"\"\n trigger = me.StringField()\n payload = stormbase.EscapedDictField()\n occurrence_time = me.DateTimeField()\n\n\nclass ActionExecutionSpecDB(me.EmbeddedDocument):\n ref = me.StringField(required=True, unique=False)\n parameters = me.DictField()\n\n def __str__(self):\n result = []\n result.append('ActionExecutionSpecDB@')\n result.append(str(id(self)))\n result.append('(ref=\"%s\", ' % self.ref)\n result.append('parameters=\"%s\")' % self.parameters)\n return ''.join(result)\n\n\nclass RuleDB(stormbase.StormBaseDB, stormbase.TagsMixin):\n \"\"\"Specifies the action to invoke on the occurrence of a Trigger. It\n also includes the transformation to perform to match the impedance\n between the payload of a TriggerInstance and input of a action.\n Attribute:\n trigger: Trigger that trips this rule.\n criteria:\n action: Action to execute when the rule is tripped.\n status: enabled or disabled. If disabled occurrence of the trigger\n does not lead to execution of a action and vice-versa.\n \"\"\"\n trigger = me.StringField()\n criteria = stormbase.EscapedDictField()\n action = me.EmbeddedDocumentField(ActionExecutionSpecDB)\n enabled = me.BooleanField(required=True, default=True,\n help_text=u'Flag indicating whether the rule is enabled.')\n\n meta = {\n 'indexes': stormbase.TagsMixin.get_indices()\n }\n\n# specialized access objects\nsensor_type_access = MongoDBAccess(SensorTypeDB)\ntriggertype_access = MongoDBAccess(TriggerTypeDB)\ntrigger_access = MongoDBAccess(TriggerDB)\ntriggerinstance_access = MongoDBAccess(TriggerInstanceDB)\nrule_access = MongoDBAccess(RuleDB)\n\nMODELS = [SensorTypeDB, TriggerTypeDB, TriggerDB, TriggerInstanceDB, RuleDB]\n", "path": "st2common/st2common/models/db/reactor.py"}]}
| 3,189 | 227 |
gh_patches_debug_33534
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-1673
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature]: paper dates are not in the complete date-file
### I propose a feature for:
Sources
### Describe your wanted feature
Hi,
right now I saw that the homepage "https://www.geoport-nwm.de/de/abfuhrtermine-geoportal.html" describes 3 ics files for paper-dates:
Please can you add them to the integrsation, because I need to add them manually now.
Kalenderdatei AltpapiertonneGER Umweltschutz GmbH | downloaden (ICS)
Kalenderdatei AltpapiertonneGollan Recycling GmbH | downloaden (ICS)
Kalenderdatei AltpapiertonneVeolia Umweltservice Nord GmbH | downloaden (ICS)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py`
Content:
```
1 import datetime
2 import urllib
3
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6 from waste_collection_schedule.service.ICS import ICS
7
8 TITLE = "Landkreis Nordwestmecklenburg"
9 DESCRIPTION = "Source for Landkreis Nordwestmecklenburg"
10 URL = "https://www.geoport-nwm.de"
11 TEST_CASES = {
12 "Rüting": {"district": "Rüting"},
13 "Grevenstein u. ...": {"district": "Grevenstein u. Ausbau"},
14 "Seefeld": {"district": "Seefeld/ Testorf- Steinfort"},
15 "1100l": {"district": "Groß Stieten (1.100 l Behälter)"},
16 "kl. Bünsdorf": {"district": "Klein Bünsdorf"},
17 }
18
19
20 class Source:
21 def __init__(self, district):
22 self._district = district
23 self._ics = ICS()
24
25 def fetch(self):
26 today = datetime.date.today()
27 dates = []
28 if today.month == 12:
29 # On Dec 27 2022, the 2022 schedule was no longer available for test case "Seefeld", all others worked
30 try:
31 dates = self.fetch_year(today.year)
32 except Exception:
33 pass
34 try:
35 dates.extend(self.fetch_year(today.year + 1))
36 except Exception:
37 pass
38 else:
39 dates = self.fetch_year(today.year)
40
41 entries = []
42 for d in dates:
43 entries.append(Collection(d[0], d[1]))
44 return entries
45
46 def fetch_year(self, year):
47 arg = convert_to_arg(self._district)
48 r = requests.get(
49 f"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics"
50 )
51 r.raise_for_status()
52 return self._ics.convert(r.text)
53
54
55 def convert_to_arg(district):
56 district = district.replace("(1.100 l Behälter)", "1100_l")
57 district = district.replace("ü", "ue")
58 district = district.replace("ö", "oe")
59 district = district.replace("ä", "ae")
60 district = district.replace("ß", "ss")
61 district = district.replace("/", "")
62 district = district.replace("- ", "-")
63 district = district.replace(".", "")
64 district = district.replace(" ", "_")
65 arg = urllib.parse.quote("Ortsteil_" + district)
66 return arg
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py
@@ -16,6 +16,8 @@
"kl. Bünsdorf": {"district": "Klein Bünsdorf"},
}
+API_URL = "https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics"
+
class Source:
def __init__(self, district):
@@ -45,22 +47,35 @@
def fetch_year(self, year):
arg = convert_to_arg(self._district)
- r = requests.get(
- f"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics"
- )
+ r = requests.get(API_URL.format(year=year, arg=arg))
r.raise_for_status()
- return self._ics.convert(r.text)
+ entries = self._ics.convert(r.text)
+ for prefix in (
+ "Schadstoffmobil",
+ "Papiertonne_GER",
+ "Papiertonne_Gollan",
+ "Papiertonne_Veolia",
+ ):
+ try:
+ r = requests.get(API_URL.format(year=year, arg=f"{prefix}_{arg}"))
+ r.raise_for_status()
+ new_entries = self._ics.convert(r.text)
+ entries.extend(new_entries)
+ except (ValueError, requests.exceptions.HTTPError):
+ pass
+ return entries
-def convert_to_arg(district):
+def convert_to_arg(district, prefix=""):
district = district.replace("(1.100 l Behälter)", "1100_l")
district = district.replace("ü", "ue")
district = district.replace("ö", "oe")
district = district.replace("ä", "ae")
district = district.replace("ß", "ss")
district = district.replace("/", "")
- district = district.replace("- ", "-")
+ # district = district.replace("- ", "-") failed with Seefeld/ Testorf- Steinfort
district = district.replace(".", "")
district = district.replace(" ", "_")
- arg = urllib.parse.quote("Ortsteil_" + district)
+ prefix = prefix + "_" if prefix else ""
+ arg = urllib.parse.quote(f"{prefix}Ortsteil_{district}")
return arg
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py\n@@ -16,6 +16,8 @@\n \"kl. B\u00fcnsdorf\": {\"district\": \"Klein B\u00fcnsdorf\"},\n }\n \n+API_URL = \"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n+\n \n class Source:\n def __init__(self, district):\n@@ -45,22 +47,35 @@\n \n def fetch_year(self, year):\n arg = convert_to_arg(self._district)\n- r = requests.get(\n- f\"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n- )\n+ r = requests.get(API_URL.format(year=year, arg=arg))\n r.raise_for_status()\n- return self._ics.convert(r.text)\n+ entries = self._ics.convert(r.text)\n+ for prefix in (\n+ \"Schadstoffmobil\",\n+ \"Papiertonne_GER\",\n+ \"Papiertonne_Gollan\",\n+ \"Papiertonne_Veolia\",\n+ ):\n+ try:\n+ r = requests.get(API_URL.format(year=year, arg=f\"{prefix}_{arg}\"))\n+ r.raise_for_status()\n+ new_entries = self._ics.convert(r.text)\n+ entries.extend(new_entries)\n+ except (ValueError, requests.exceptions.HTTPError):\n+ pass\n+ return entries\n \n \n-def convert_to_arg(district):\n+def convert_to_arg(district, prefix=\"\"):\n district = district.replace(\"(1.100 l Beh\u00e4lter)\", \"1100_l\")\n district = district.replace(\"\u00fc\", \"ue\")\n district = district.replace(\"\u00f6\", \"oe\")\n district = district.replace(\"\u00e4\", \"ae\")\n district = district.replace(\"\u00df\", \"ss\")\n district = district.replace(\"/\", \"\")\n- district = district.replace(\"- \", \"-\")\n+ # district = district.replace(\"- \", \"-\") failed with Seefeld/ Testorf- Steinfort\n district = district.replace(\".\", \"\")\n district = district.replace(\" \", \"_\")\n- arg = urllib.parse.quote(\"Ortsteil_\" + district)\n+ prefix = prefix + \"_\" if prefix else \"\"\n+ arg = urllib.parse.quote(f\"{prefix}Ortsteil_{district}\")\n return arg\n", "issue": "[Feature]: paper dates are not in the complete date-file\n### I propose a feature for:\r\n\r\nSources\r\n\r\n### Describe your wanted feature\r\n\r\nHi,\r\nright now I saw that the homepage \"https://www.geoport-nwm.de/de/abfuhrtermine-geoportal.html\" describes 3 ics files for paper-dates: \r\nPlease can you add them to the integrsation, because I need to add them manually now.\r\n\r\nKalenderdatei AltpapiertonneGER Umweltschutz GmbH | downloaden (ICS)\r\nKalenderdatei AltpapiertonneGollan Recycling GmbH | downloaden (ICS)\r\nKalenderdatei AltpapiertonneVeolia Umweltservice Nord GmbH | downloaden (ICS)\r\n\n", "before_files": [{"content": "import datetime\nimport urllib\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Landkreis Nordwestmecklenburg\"\nDESCRIPTION = \"Source for Landkreis Nordwestmecklenburg\"\nURL = \"https://www.geoport-nwm.de\"\nTEST_CASES = {\n \"R\u00fcting\": {\"district\": \"R\u00fcting\"},\n \"Grevenstein u. ...\": {\"district\": \"Grevenstein u. Ausbau\"},\n \"Seefeld\": {\"district\": \"Seefeld/ Testorf- Steinfort\"},\n \"1100l\": {\"district\": \"Gro\u00df Stieten (1.100 l Beh\u00e4lter)\"},\n \"kl. B\u00fcnsdorf\": {\"district\": \"Klein B\u00fcnsdorf\"},\n}\n\n\nclass Source:\n def __init__(self, district):\n self._district = district\n self._ics = ICS()\n\n def fetch(self):\n today = datetime.date.today()\n dates = []\n if today.month == 12:\n # On Dec 27 2022, the 2022 schedule was no longer available for test case \"Seefeld\", all others worked\n try:\n dates = self.fetch_year(today.year)\n except Exception:\n pass\n try:\n dates.extend(self.fetch_year(today.year + 1))\n except Exception:\n pass\n else:\n dates = self.fetch_year(today.year)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n\n def fetch_year(self, year):\n arg = convert_to_arg(self._district)\n r = requests.get(\n f\"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n )\n r.raise_for_status()\n return self._ics.convert(r.text)\n\n\ndef convert_to_arg(district):\n district = district.replace(\"(1.100 l Beh\u00e4lter)\", \"1100_l\")\n district = district.replace(\"\u00fc\", \"ue\")\n district = district.replace(\"\u00f6\", \"oe\")\n district = district.replace(\"\u00e4\", \"ae\")\n district = district.replace(\"\u00df\", \"ss\")\n district = district.replace(\"/\", \"\")\n district = district.replace(\"- \", \"-\")\n district = district.replace(\".\", \"\")\n district = district.replace(\" \", \"_\")\n arg = urllib.parse.quote(\"Ortsteil_\" + district)\n return arg\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py"}], "after_files": [{"content": "import datetime\nimport urllib\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Landkreis Nordwestmecklenburg\"\nDESCRIPTION = \"Source for Landkreis Nordwestmecklenburg\"\nURL = \"https://www.geoport-nwm.de\"\nTEST_CASES = {\n \"R\u00fcting\": {\"district\": \"R\u00fcting\"},\n \"Grevenstein u. ...\": {\"district\": \"Grevenstein u. Ausbau\"},\n \"Seefeld\": {\"district\": \"Seefeld/ Testorf- Steinfort\"},\n \"1100l\": {\"district\": \"Gro\u00df Stieten (1.100 l Beh\u00e4lter)\"},\n \"kl. B\u00fcnsdorf\": {\"district\": \"Klein B\u00fcnsdorf\"},\n}\n\nAPI_URL = \"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n\n\nclass Source:\n def __init__(self, district):\n self._district = district\n self._ics = ICS()\n\n def fetch(self):\n today = datetime.date.today()\n dates = []\n if today.month == 12:\n # On Dec 27 2022, the 2022 schedule was no longer available for test case \"Seefeld\", all others worked\n try:\n dates = self.fetch_year(today.year)\n except Exception:\n pass\n try:\n dates.extend(self.fetch_year(today.year + 1))\n except Exception:\n pass\n else:\n dates = self.fetch_year(today.year)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n\n def fetch_year(self, year):\n arg = convert_to_arg(self._district)\n r = requests.get(API_URL.format(year=year, arg=arg))\n r.raise_for_status()\n entries = self._ics.convert(r.text)\n for prefix in (\n \"Schadstoffmobil\",\n \"Papiertonne_GER\",\n \"Papiertonne_Gollan\",\n \"Papiertonne_Veolia\",\n ):\n try:\n r = requests.get(API_URL.format(year=year, arg=f\"{prefix}_{arg}\"))\n r.raise_for_status()\n new_entries = self._ics.convert(r.text)\n entries.extend(new_entries)\n except (ValueError, requests.exceptions.HTTPError):\n pass\n return entries\n\n\ndef convert_to_arg(district, prefix=\"\"):\n district = district.replace(\"(1.100 l Beh\u00e4lter)\", \"1100_l\")\n district = district.replace(\"\u00fc\", \"ue\")\n district = district.replace(\"\u00f6\", \"oe\")\n district = district.replace(\"\u00e4\", \"ae\")\n district = district.replace(\"\u00df\", \"ss\")\n district = district.replace(\"/\", \"\")\n # district = district.replace(\"- \", \"-\") failed with Seefeld/ Testorf- Steinfort\n district = district.replace(\".\", \"\")\n district = district.replace(\" \", \"_\")\n prefix = prefix + \"_\" if prefix else \"\"\n arg = urllib.parse.quote(f\"{prefix}Ortsteil_{district}\")\n return arg\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py"}]}
| 1,104 | 590 |
gh_patches_debug_19205
|
rasdani/github-patches
|
git_diff
|
strawberry-graphql__strawberry-2205
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move non-core dependencies to dedicated groups
@la4de has made a very useful playground for Strawberry, available (for now) here -> https://la4de.github.io/strawberry-playground/
Unfortunately some of the default dependencies aren't uploaded as wheels (see https://github.com/la4de/strawberry-playground/issues/1).
Maybe it could time to move some of these deps to specific groups, we definitely don't need python-multipart installed by default :)
Here's a list of proposed groups based on dependencies installed when doing `pip install strawberry-graphql`:
**Default**:
- cached-property
- sentinel
- typing-extensions
- graphql-core
- python-dateutil (I think we need this because of compatibility with python 3.7)
**CLI**:
- click
- pygments
**All web frameworks**:
- python-multipart
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/utils/debug.py`
Content:
```
1 import datetime
2 import json
3 from json import JSONEncoder
4 from typing import Any, Dict, Optional
5
6 from pygments import highlight, lexers
7 from pygments.formatters import Terminal256Formatter
8
9 from .graphql_lexer import GraphQLLexer
10
11
12 class StrawberryJSONEncoder(JSONEncoder):
13 def default(self, o: Any) -> Any:
14 return repr(o)
15
16
17 def pretty_print_graphql_operation(
18 operation_name: Optional[str], query: str, variables: Optional[Dict["str", Any]]
19 ):
20 """Pretty print a GraphQL operation using pygments.
21
22 Won't print introspection operation to prevent noise in the output."""
23
24 if operation_name == "IntrospectionQuery":
25 return
26
27 now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
28
29 print(f"[{now}]: {operation_name or 'No operation name'}")
30 print(highlight(query, GraphQLLexer(), Terminal256Formatter()))
31
32 if variables:
33 variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)
34
35 print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py
--- a/strawberry/utils/debug.py
+++ b/strawberry/utils/debug.py
@@ -3,11 +3,6 @@
from json import JSONEncoder
from typing import Any, Dict, Optional
-from pygments import highlight, lexers
-from pygments.formatters import Terminal256Formatter
-
-from .graphql_lexer import GraphQLLexer
-
class StrawberryJSONEncoder(JSONEncoder):
def default(self, o: Any) -> Any:
@@ -21,6 +16,17 @@
Won't print introspection operation to prevent noise in the output."""
+ try:
+ from pygments import highlight, lexers
+ from pygments.formatters import Terminal256Formatter
+ except ImportError as e:
+ raise ImportError(
+ "pygments is not installed but is required for debug output, install it "
+ "directly or run `pip install strawberry-graphql[debug-server]`"
+ ) from e
+
+ from .graphql_lexer import GraphQLLexer
+
if operation_name == "IntrospectionQuery":
return
|
{"golden_diff": "diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py\n--- a/strawberry/utils/debug.py\n+++ b/strawberry/utils/debug.py\n@@ -3,11 +3,6 @@\n from json import JSONEncoder\n from typing import Any, Dict, Optional\n \n-from pygments import highlight, lexers\n-from pygments.formatters import Terminal256Formatter\n-\n-from .graphql_lexer import GraphQLLexer\n-\n \n class StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n@@ -21,6 +16,17 @@\n \n Won't print introspection operation to prevent noise in the output.\"\"\"\n \n+ try:\n+ from pygments import highlight, lexers\n+ from pygments.formatters import Terminal256Formatter\n+ except ImportError as e:\n+ raise ImportError(\n+ \"pygments is not installed but is required for debug output, install it \"\n+ \"directly or run `pip install strawberry-graphql[debug-server]`\"\n+ ) from e\n+\n+ from .graphql_lexer import GraphQLLexer\n+\n if operation_name == \"IntrospectionQuery\":\n return\n", "issue": "Move non-core dependencies to dedicated groups\n@la4de has made a very useful playground for Strawberry, available (for now) here -> https://la4de.github.io/strawberry-playground/\r\n\r\nUnfortunately some of the default dependencies aren't uploaded as wheels (see https://github.com/la4de/strawberry-playground/issues/1).\r\n\r\nMaybe it could time to move some of these deps to specific groups, we definitely don't need python-multipart installed by default :)\r\n\r\nHere's a list of proposed groups based on dependencies installed when doing `pip install strawberry-graphql`:\r\n\r\n**Default**:\r\n \r\n- cached-property\r\n- sentinel\r\n- typing-extensions\r\n- graphql-core\r\n- python-dateutil (I think we need this because of compatibility with python 3.7)\r\n\r\n**CLI**:\r\n\r\n- click\r\n- pygments\r\n\r\n**All web frameworks**:\r\n\r\n- python-multipart\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\nimport json\nfrom json import JSONEncoder\nfrom typing import Any, Dict, Optional\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import Terminal256Formatter\n\nfrom .graphql_lexer import GraphQLLexer\n\n\nclass StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n return repr(o)\n\n\ndef pretty_print_graphql_operation(\n operation_name: Optional[str], query: str, variables: Optional[Dict[\"str\", Any]]\n):\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}], "after_files": [{"content": "import datetime\nimport json\nfrom json import JSONEncoder\nfrom typing import Any, Dict, Optional\n\n\nclass StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n return repr(o)\n\n\ndef pretty_print_graphql_operation(\n operation_name: Optional[str], query: str, variables: Optional[Dict[\"str\", Any]]\n):\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n try:\n from pygments import highlight, lexers\n from pygments.formatters import Terminal256Formatter\n except ImportError as e:\n raise ImportError(\n \"pygments is not installed but is required for debug output, install it \"\n \"directly or run `pip install strawberry-graphql[debug-server]`\"\n ) from e\n\n from .graphql_lexer import GraphQLLexer\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}]}
| 763 | 258 |
gh_patches_debug_7588
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.general-5687
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gconftool2 throws an IndexError exception when setting configuration
### Summary
When setting a key using gconftool2, an IndexError exception is thrown because the wrong index is used when formatting the command string. It appears to have been broken since version 4.8.1 of this collection. It should be a simple one character fix and would be great if we could have it back ported as a patch for the older releases.
Effectively
`cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value])`
Should be
`cmd.extend(["--type", self.value_type, "--{0}".format(call_type), self.key, self.value])`
### Issue Type
Bug Report
### Component Name
gconftool2
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.13.5]
config file = /Users/***/work/ansible/ansible.cfg
configured module search path = ['/Users/***/work/ansible/library']
ansible python module location = /Users/***/Library/Python/3.9/lib/python/site-packages/ansible
ansible collection location = /Users/***/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/***/Library/Python/3.9/bin/ansible
python version = 3.9.6 (default, Sep 26 2022, 11:37:49) [Clang 14.0.0 (clang-1400.0.29.202)]
jinja version = 3.1.2
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
Collection Version
----------------- -------
community.general 5.7.0
```
### Configuration
```yaml (paste below)
- community.general.gconftool2:
key: '/desktop/gnome/remote_access/enabled'
value_type: 'bool'
value: 'true'
state: 'present'
```
### OS / Environment
Targeting CentOS 7.9
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Expected Results
I expect gconftool2 to set the key and not throw an exception
### Actual Results
```console (paste below)
Traceback (most recent call last):
File \"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\", line 107, in <module>
_ansiballz_main()
File \"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\", line 99, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File \"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\", line 48, in invoke_module
run_name='__main__', alter_sys=True)
File \"/usr/lib64/python2.7/runpy.py\", line 176, in run_module
fname, loader, pkg_name)
File \"/usr/lib64/python2.7/runpy.py\", line 82, in _run_module_code
mod_name, mod_fname, mod_loader, pkg_name)
File \"/usr/lib64/python2.7/runpy.py\", line 72, in _run_code
exec code in run_globals
File \"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\", line 230, in <module>
File \"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\", line 213, in main
File \"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\", line 128, in call
IndexError: tuple index out of range
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/system/gconftool2.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright (c) 2016, Kenneth D. Evensen <[email protected]>
5 # Copyright (c) 2017, Abhijeet Kasurde <[email protected]>
6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
7 # SPDX-License-Identifier: GPL-3.0-or-later
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12 DOCUMENTATION = '''
13 module: gconftool2
14 author:
15 - Kenneth D. Evensen (@kevensen)
16 short_description: Edit GNOME Configurations
17 description:
18 - This module allows for the manipulation of GNOME 2 Configuration via
19 gconftool-2. Please see the gconftool-2(1) man pages for more details.
20 options:
21 key:
22 type: str
23 description:
24 - A GConf preference key is an element in the GConf repository
25 that corresponds to an application preference. See man gconftool-2(1)
26 required: true
27 value:
28 type: str
29 description:
30 - Preference keys typically have simple values such as strings,
31 integers, or lists of strings and integers. This is ignored if the state
32 is "get". See man gconftool-2(1)
33 value_type:
34 type: str
35 description:
36 - The type of value being set. This is ignored if the state is "get".
37 choices: [ bool, float, int, string ]
38 state:
39 type: str
40 description:
41 - The action to take upon the key/value.
42 required: true
43 choices: [ absent, get, present ]
44 config_source:
45 type: str
46 description:
47 - Specify a configuration source to use rather than the default path.
48 See man gconftool-2(1)
49 direct:
50 description:
51 - Access the config database directly, bypassing server. If direct is
52 specified then the config_source must be specified as well.
53 See man gconftool-2(1)
54 type: bool
55 default: false
56 '''
57
58 EXAMPLES = """
59 - name: Change the widget font to "Serif 12"
60 community.general.gconftool2:
61 key: "/desktop/gnome/interface/font_name"
62 value_type: "string"
63 value: "Serif 12"
64 """
65
66 RETURN = '''
67 key:
68 description: The key specified in the module parameters
69 returned: success
70 type: str
71 sample: /desktop/gnome/interface/font_name
72 value_type:
73 description: The type of the value that was changed
74 returned: success
75 type: str
76 sample: string
77 value:
78 description: The value of the preference key after executing the module
79 returned: success
80 type: str
81 sample: "Serif 12"
82 ...
83 '''
84
85 from ansible.module_utils.basic import AnsibleModule
86
87
88 class GConf2Preference(object):
89 def __init__(self, ansible, key, value_type, value,
90 direct=False, config_source=""):
91 self.ansible = ansible
92 self.key = key
93 self.value_type = value_type
94 self.value = value
95 self.config_source = config_source
96 self.direct = direct
97
98 def value_already_set(self):
99 return False
100
101 def call(self, call_type, fail_onerr=True):
102 """ Helper function to perform gconftool-2 operations """
103 config_source = []
104 direct = []
105 changed = False
106 out = ''
107
108 # If the configuration source is different from the default, create
109 # the argument
110 if self.config_source is not None and len(self.config_source) > 0:
111 config_source = ["--config-source", self.config_source]
112
113 # If direct is true, create the argument
114 if self.direct:
115 direct = ["--direct"]
116
117 # Execute the call
118 cmd = ["gconftool-2"]
119 try:
120 # If the call is "get", then we don't need as many parameters and
121 # we can ignore some
122 if call_type == 'get':
123 cmd.extend(["--get", self.key])
124 # Otherwise, we will use all relevant parameters
125 elif call_type == 'set':
126 cmd.extend(direct)
127 cmd.extend(config_source)
128 cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value])
129 elif call_type == 'unset':
130 cmd.extend(["--unset", self.key])
131
132 # Start external command
133 rc, out, err = self.ansible.run_command(cmd)
134
135 if err and fail_onerr:
136 self.ansible.fail_json(msg='gconftool-2 failed with '
137 'error: %s' % (str(err)))
138 else:
139 changed = True
140
141 except OSError as exception:
142 self.ansible.fail_json(msg='gconftool-2 failed with exception: '
143 '%s' % exception)
144 return changed, out.rstrip()
145
146
147 def main():
148 # Setup the Ansible module
149 module = AnsibleModule(
150 argument_spec=dict(
151 key=dict(type='str', required=True, no_log=False),
152 value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),
153 value=dict(type='str'),
154 state=dict(type='str', required=True, choices=['absent', 'get', 'present']),
155 direct=dict(type='bool', default=False),
156 config_source=dict(type='str'),
157 ),
158 supports_check_mode=True
159 )
160
161 state_values = {"present": "set", "absent": "unset", "get": "get"}
162
163 # Assign module values to dictionary values
164 key = module.params['key']
165 value_type = module.params['value_type']
166 if module.params['value'].lower() == "true":
167 value = "true"
168 elif module.params['value'] == "false":
169 value = "false"
170 else:
171 value = module.params['value']
172
173 state = state_values[module.params['state']]
174 direct = module.params['direct']
175 config_source = module.params['config_source']
176
177 # Initialize some variables for later
178 change = False
179 new_value = ''
180
181 if state != "get":
182 if value is None or value == "":
183 module.fail_json(msg='State %s requires "value" to be set'
184 % str(state))
185 elif value_type is None or value_type == "":
186 module.fail_json(msg='State %s requires "value_type" to be set'
187 % str(state))
188
189 if direct and config_source is None:
190 module.fail_json(msg='If "direct" is "true" then the ' +
191 '"config_source" must be specified')
192 elif not direct and config_source is not None:
193 module.fail_json(msg='If the "config_source" is specified ' +
194 'then "direct" must be "true"')
195
196 # Create a gconf2 preference
197 gconf_pref = GConf2Preference(module, key, value_type,
198 value, direct, config_source)
199 # Now we get the current value, if not found don't fail
200 dummy, current_value = gconf_pref.call("get", fail_onerr=False)
201
202 # Check if the current value equals the value we want to set. If not, make
203 # a change
204 if current_value != value:
205 # If check mode, we know a change would have occurred.
206 if module.check_mode:
207 # So we will set the change to True
208 change = True
209 # And set the new_value to the value that would have been set
210 new_value = value
211 # If not check mode make the change.
212 else:
213 change, new_value = gconf_pref.call(state)
214 # If the value we want to set is the same as the current_value, we will
215 # set the new_value to the current_value for reporting
216 else:
217 new_value = current_value
218
219 facts = dict(gconftool2={'changed': change,
220 'key': key,
221 'value_type': value_type,
222 'new_value': new_value,
223 'previous_value': current_value,
224 'playbook_value': module.params['value']})
225
226 module.exit_json(changed=change, ansible_facts=facts)
227
228
229 if __name__ == '__main__':
230 main()
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py
--- a/plugins/modules/system/gconftool2.py
+++ b/plugins/modules/system/gconftool2.py
@@ -125,7 +125,7 @@
elif call_type == 'set':
cmd.extend(direct)
cmd.extend(config_source)
- cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value])
+ cmd.extend(["--type", self.value_type, "--{0}".format(call_type), self.key, self.value])
elif call_type == 'unset':
cmd.extend(["--unset", self.key])
|
{"golden_diff": "diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py\n--- a/plugins/modules/system/gconftool2.py\n+++ b/plugins/modules/system/gconftool2.py\n@@ -125,7 +125,7 @@\n elif call_type == 'set':\n cmd.extend(direct)\n cmd.extend(config_source)\n- cmd.extend([\"--type\", self.value_type, \"--{3}\".format(call_type), self.key, self.value])\n+ cmd.extend([\"--type\", self.value_type, \"--{0}\".format(call_type), self.key, self.value])\n elif call_type == 'unset':\n cmd.extend([\"--unset\", self.key])\n", "issue": "gconftool2 throws an IndexError exception when setting configuration\n### Summary\n\nWhen setting a key using gconftool2, an IndexError exception is thrown because the wrong index is used when formatting the command string. It appears to have been broken since version 4.8.1 of this collection. It should be a simple one character fix and would be great if we could have it back ported as a patch for the older releases.\r\n\r\nEffectively \r\n`cmd.extend([\"--type\", self.value_type, \"--{3}\".format(call_type), self.key, self.value])`\r\nShould be\r\n`cmd.extend([\"--type\", self.value_type, \"--{0}\".format(call_type), self.key, self.value])`\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\ngconftool2\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.13.5]\r\n config file = /Users/***/work/ansible/ansible.cfg\r\n configured module search path = ['/Users/***/work/ansible/library']\r\n ansible python module location = /Users/***/Library/Python/3.9/lib/python/site-packages/ansible\r\n ansible collection location = /Users/***/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /Users/***/Library/Python/3.9/bin/ansible\r\n python version = 3.9.6 (default, Sep 26 2022, 11:37:49) [Clang 14.0.0 (clang-1400.0.29.202)]\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 5.7.0 \r\n```\r\n\n\n### Configuration\n\n```yaml (paste below)\r\n- community.general.gconftool2:\r\n key: '/desktop/gnome/remote_access/enabled'\r\n value_type: 'bool'\r\n value: 'true'\r\n state: 'present'\r\n```\r\n\n\n### OS / Environment\n\nTargeting CentOS 7.9\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n\r\n```\r\n\n\n### Expected Results\n\nI expect gconftool2 to set the key and not throw an exception\n\n### Actual Results\n\n```console (paste below)\r\nTraceback (most recent call last):\r\n File \\\"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\\\", line 107, in <module>\r\n _ansiballz_main()\r\n File \\\"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\\\", line 99, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \\\"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\\\", line 48, in invoke_module\r\n run_name='__main__', alter_sys=True)\r\n File \\\"/usr/lib64/python2.7/runpy.py\\\", line 176, in run_module\r\n fname, loader, pkg_name)\r\n File \\\"/usr/lib64/python2.7/runpy.py\\\", line 82, in _run_module_code\r\n mod_name, mod_fname, mod_loader, pkg_name)\r\n File \\\"/usr/lib64/python2.7/runpy.py\\\", line 72, in _run_code\r\n exec code in run_globals\r\n File \\\"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\\\", line 230, in <module>\r\n File \\\"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\\\", line 213, in main\r\n File \\\"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\\\", line 128, in call\r\n IndexError: tuple index out of range\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016, Kenneth D. Evensen <[email protected]>\n# Copyright (c) 2017, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\nmodule: gconftool2\nauthor:\n - Kenneth D. Evensen (@kevensen)\nshort_description: Edit GNOME Configurations\ndescription:\n - This module allows for the manipulation of GNOME 2 Configuration via\n gconftool-2. Please see the gconftool-2(1) man pages for more details.\noptions:\n key:\n type: str\n description:\n - A GConf preference key is an element in the GConf repository\n that corresponds to an application preference. See man gconftool-2(1)\n required: true\n value:\n type: str\n description:\n - Preference keys typically have simple values such as strings,\n integers, or lists of strings and integers. This is ignored if the state\n is \"get\". See man gconftool-2(1)\n value_type:\n type: str\n description:\n - The type of value being set. This is ignored if the state is \"get\".\n choices: [ bool, float, int, string ]\n state:\n type: str\n description:\n - The action to take upon the key/value.\n required: true\n choices: [ absent, get, present ]\n config_source:\n type: str\n description:\n - Specify a configuration source to use rather than the default path.\n See man gconftool-2(1)\n direct:\n description:\n - Access the config database directly, bypassing server. If direct is\n specified then the config_source must be specified as well.\n See man gconftool-2(1)\n type: bool\n default: false\n'''\n\nEXAMPLES = \"\"\"\n- name: Change the widget font to \"Serif 12\"\n community.general.gconftool2:\n key: \"/desktop/gnome/interface/font_name\"\n value_type: \"string\"\n value: \"Serif 12\"\n\"\"\"\n\nRETURN = '''\n key:\n description: The key specified in the module parameters\n returned: success\n type: str\n sample: /desktop/gnome/interface/font_name\n value_type:\n description: The type of the value that was changed\n returned: success\n type: str\n sample: string\n value:\n description: The value of the preference key after executing the module\n returned: success\n type: str\n sample: \"Serif 12\"\n...\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass GConf2Preference(object):\n def __init__(self, ansible, key, value_type, value,\n direct=False, config_source=\"\"):\n self.ansible = ansible\n self.key = key\n self.value_type = value_type\n self.value = value\n self.config_source = config_source\n self.direct = direct\n\n def value_already_set(self):\n return False\n\n def call(self, call_type, fail_onerr=True):\n \"\"\" Helper function to perform gconftool-2 operations \"\"\"\n config_source = []\n direct = []\n changed = False\n out = ''\n\n # If the configuration source is different from the default, create\n # the argument\n if self.config_source is not None and len(self.config_source) > 0:\n config_source = [\"--config-source\", self.config_source]\n\n # If direct is true, create the argument\n if self.direct:\n direct = [\"--direct\"]\n\n # Execute the call\n cmd = [\"gconftool-2\"]\n try:\n # If the call is \"get\", then we don't need as many parameters and\n # we can ignore some\n if call_type == 'get':\n cmd.extend([\"--get\", self.key])\n # Otherwise, we will use all relevant parameters\n elif call_type == 'set':\n cmd.extend(direct)\n cmd.extend(config_source)\n cmd.extend([\"--type\", self.value_type, \"--{3}\".format(call_type), self.key, self.value])\n elif call_type == 'unset':\n cmd.extend([\"--unset\", self.key])\n\n # Start external command\n rc, out, err = self.ansible.run_command(cmd)\n\n if err and fail_onerr:\n self.ansible.fail_json(msg='gconftool-2 failed with '\n 'error: %s' % (str(err)))\n else:\n changed = True\n\n except OSError as exception:\n self.ansible.fail_json(msg='gconftool-2 failed with exception: '\n '%s' % exception)\n return changed, out.rstrip()\n\n\ndef main():\n # Setup the Ansible module\n module = AnsibleModule(\n argument_spec=dict(\n key=dict(type='str', required=True, no_log=False),\n value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),\n value=dict(type='str'),\n state=dict(type='str', required=True, choices=['absent', 'get', 'present']),\n direct=dict(type='bool', default=False),\n config_source=dict(type='str'),\n ),\n supports_check_mode=True\n )\n\n state_values = {\"present\": \"set\", \"absent\": \"unset\", \"get\": \"get\"}\n\n # Assign module values to dictionary values\n key = module.params['key']\n value_type = module.params['value_type']\n if module.params['value'].lower() == \"true\":\n value = \"true\"\n elif module.params['value'] == \"false\":\n value = \"false\"\n else:\n value = module.params['value']\n\n state = state_values[module.params['state']]\n direct = module.params['direct']\n config_source = module.params['config_source']\n\n # Initialize some variables for later\n change = False\n new_value = ''\n\n if state != \"get\":\n if value is None or value == \"\":\n module.fail_json(msg='State %s requires \"value\" to be set'\n % str(state))\n elif value_type is None or value_type == \"\":\n module.fail_json(msg='State %s requires \"value_type\" to be set'\n % str(state))\n\n if direct and config_source is None:\n module.fail_json(msg='If \"direct\" is \"true\" then the ' +\n '\"config_source\" must be specified')\n elif not direct and config_source is not None:\n module.fail_json(msg='If the \"config_source\" is specified ' +\n 'then \"direct\" must be \"true\"')\n\n # Create a gconf2 preference\n gconf_pref = GConf2Preference(module, key, value_type,\n value, direct, config_source)\n # Now we get the current value, if not found don't fail\n dummy, current_value = gconf_pref.call(\"get\", fail_onerr=False)\n\n # Check if the current value equals the value we want to set. If not, make\n # a change\n if current_value != value:\n # If check mode, we know a change would have occurred.\n if module.check_mode:\n # So we will set the change to True\n change = True\n # And set the new_value to the value that would have been set\n new_value = value\n # If not check mode make the change.\n else:\n change, new_value = gconf_pref.call(state)\n # If the value we want to set is the same as the current_value, we will\n # set the new_value to the current_value for reporting\n else:\n new_value = current_value\n\n facts = dict(gconftool2={'changed': change,\n 'key': key,\n 'value_type': value_type,\n 'new_value': new_value,\n 'previous_value': current_value,\n 'playbook_value': module.params['value']})\n\n module.exit_json(changed=change, ansible_facts=facts)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/gconftool2.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016, Kenneth D. Evensen <[email protected]>\n# Copyright (c) 2017, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\nmodule: gconftool2\nauthor:\n - Kenneth D. Evensen (@kevensen)\nshort_description: Edit GNOME Configurations\ndescription:\n - This module allows for the manipulation of GNOME 2 Configuration via\n gconftool-2. Please see the gconftool-2(1) man pages for more details.\noptions:\n key:\n type: str\n description:\n - A GConf preference key is an element in the GConf repository\n that corresponds to an application preference. See man gconftool-2(1)\n required: true\n value:\n type: str\n description:\n - Preference keys typically have simple values such as strings,\n integers, or lists of strings and integers. This is ignored if the state\n is \"get\". See man gconftool-2(1)\n value_type:\n type: str\n description:\n - The type of value being set. This is ignored if the state is \"get\".\n choices: [ bool, float, int, string ]\n state:\n type: str\n description:\n - The action to take upon the key/value.\n required: true\n choices: [ absent, get, present ]\n config_source:\n type: str\n description:\n - Specify a configuration source to use rather than the default path.\n See man gconftool-2(1)\n direct:\n description:\n - Access the config database directly, bypassing server. If direct is\n specified then the config_source must be specified as well.\n See man gconftool-2(1)\n type: bool\n default: false\n'''\n\nEXAMPLES = \"\"\"\n- name: Change the widget font to \"Serif 12\"\n community.general.gconftool2:\n key: \"/desktop/gnome/interface/font_name\"\n value_type: \"string\"\n value: \"Serif 12\"\n\"\"\"\n\nRETURN = '''\n key:\n description: The key specified in the module parameters\n returned: success\n type: str\n sample: /desktop/gnome/interface/font_name\n value_type:\n description: The type of the value that was changed\n returned: success\n type: str\n sample: string\n value:\n description: The value of the preference key after executing the module\n returned: success\n type: str\n sample: \"Serif 12\"\n...\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass GConf2Preference(object):\n def __init__(self, ansible, key, value_type, value,\n direct=False, config_source=\"\"):\n self.ansible = ansible\n self.key = key\n self.value_type = value_type\n self.value = value\n self.config_source = config_source\n self.direct = direct\n\n def value_already_set(self):\n return False\n\n def call(self, call_type, fail_onerr=True):\n \"\"\" Helper function to perform gconftool-2 operations \"\"\"\n config_source = []\n direct = []\n changed = False\n out = ''\n\n # If the configuration source is different from the default, create\n # the argument\n if self.config_source is not None and len(self.config_source) > 0:\n config_source = [\"--config-source\", self.config_source]\n\n # If direct is true, create the argument\n if self.direct:\n direct = [\"--direct\"]\n\n # Execute the call\n cmd = [\"gconftool-2\"]\n try:\n # If the call is \"get\", then we don't need as many parameters and\n # we can ignore some\n if call_type == 'get':\n cmd.extend([\"--get\", self.key])\n # Otherwise, we will use all relevant parameters\n elif call_type == 'set':\n cmd.extend(direct)\n cmd.extend(config_source)\n cmd.extend([\"--type\", self.value_type, \"--{0}\".format(call_type), self.key, self.value])\n elif call_type == 'unset':\n cmd.extend([\"--unset\", self.key])\n\n # Start external command\n rc, out, err = self.ansible.run_command(cmd)\n\n if err and fail_onerr:\n self.ansible.fail_json(msg='gconftool-2 failed with '\n 'error: %s' % (str(err)))\n else:\n changed = True\n\n except OSError as exception:\n self.ansible.fail_json(msg='gconftool-2 failed with exception: '\n '%s' % exception)\n return changed, out.rstrip()\n\n\ndef main():\n # Setup the Ansible module\n module = AnsibleModule(\n argument_spec=dict(\n key=dict(type='str', required=True, no_log=False),\n value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),\n value=dict(type='str'),\n state=dict(type='str', required=True, choices=['absent', 'get', 'present']),\n direct=dict(type='bool', default=False),\n config_source=dict(type='str'),\n ),\n supports_check_mode=True\n )\n\n state_values = {\"present\": \"set\", \"absent\": \"unset\", \"get\": \"get\"}\n\n # Assign module values to dictionary values\n key = module.params['key']\n value_type = module.params['value_type']\n if module.params['value'].lower() == \"true\":\n value = \"true\"\n elif module.params['value'] == \"false\":\n value = \"false\"\n else:\n value = module.params['value']\n\n state = state_values[module.params['state']]\n direct = module.params['direct']\n config_source = module.params['config_source']\n\n # Initialize some variables for later\n change = False\n new_value = ''\n\n if state != \"get\":\n if value is None or value == \"\":\n module.fail_json(msg='State %s requires \"value\" to be set'\n % str(state))\n elif value_type is None or value_type == \"\":\n module.fail_json(msg='State %s requires \"value_type\" to be set'\n % str(state))\n\n if direct and config_source is None:\n module.fail_json(msg='If \"direct\" is \"true\" then the ' +\n '\"config_source\" must be specified')\n elif not direct and config_source is not None:\n module.fail_json(msg='If the \"config_source\" is specified ' +\n 'then \"direct\" must be \"true\"')\n\n # Create a gconf2 preference\n gconf_pref = GConf2Preference(module, key, value_type,\n value, direct, config_source)\n # Now we get the current value, if not found don't fail\n dummy, current_value = gconf_pref.call(\"get\", fail_onerr=False)\n\n # Check if the current value equals the value we want to set. If not, make\n # a change\n if current_value != value:\n # If check mode, we know a change would have occurred.\n if module.check_mode:\n # So we will set the change to True\n change = True\n # And set the new_value to the value that would have been set\n new_value = value\n # If not check mode make the change.\n else:\n change, new_value = gconf_pref.call(state)\n # If the value we want to set is the same as the current_value, we will\n # set the new_value to the current_value for reporting\n else:\n new_value = current_value\n\n facts = dict(gconftool2={'changed': change,\n 'key': key,\n 'value_type': value_type,\n 'new_value': new_value,\n 'previous_value': current_value,\n 'playbook_value': module.params['value']})\n\n module.exit_json(changed=change, ansible_facts=facts)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/gconftool2.py"}]}
| 3,808 | 152 |
gh_patches_debug_40376
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-849
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Garage does not run without display (pyglet)
Not all use cases require environment rendering. Can we get rid of this default import?
```
Traceback (most recent call last):
from garage.tf.envs import TfEnv
File "/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/__init__.py", line 1, in <module>
from garage.tf.envs.base import TfEnv
File "/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/base.py", line 4, in <module>
from garage.envs import GarageEnv
File "/home//garage/env/lib/python3.6/site-packages/garage/envs/__init__.py", line 1, in <module>
from garage.envs.base import GarageEnv
File "/home//garage/env/lib/python3.6/site-packages/garage/envs/base.py", line 7, in <module>
from gym.envs.classic_control.rendering import SimpleImageViewer
File "/home//garage/env/lib/python3.6/site-packages/gym/envs/classic_control/rendering.py", line 27, in <module>
from pyglet.gl import *
File "/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py", line 239, in <module>
import pyglet.window
File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py", line 1896, in <module>
gl._create_shadow_window()
File "/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py", line 208, in _create_shadow_window
_shadow_window = Window(width=1, height=1, visible=False)
File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/xlib/__init__.py", line 166, in __init__
super(XlibWindow, self).__init__(*args, **kwargs)
File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py", line 501, in __init__
display = get_platform().get_default_display()
File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py", line 1845, in get_default_display
return pyglet.canvas.get_display()
File "/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/__init__.py", line 82, in get_display
return Display()
File "/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/xlib.py", line 86, in __init__
raise NoSuchDisplayException('Cannot connect to "%s"' % name)
pyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to "None"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/garage/envs/base.py`
Content:
```
1 """Wrapper class that converts gym.Env into GarageEnv."""
2 import collections
3
4 import akro
5 import glfw
6 import gym
7 from gym.envs.classic_control.rendering import SimpleImageViewer
8 from gym.envs.classic_control.rendering import Viewer
9
10 from garage.core import Serializable
11 from garage.envs.env_spec import EnvSpec
12
13 # The gym environments using one of the packages in the following lists as
14 # entry points don't close their viewer windows.
15 KNOWN_GYM_NOT_CLOSE_VIEWER = [
16 # Please keep alphabetized
17 'gym.envs.atari',
18 'gym.envs.box2d',
19 'gym.envs.classic_control'
20 ]
21
22 KNOWN_GYM_NOT_CLOSE_MJ_VIEWER = [
23 # Please keep alphabetized
24 'gym.envs.mujoco',
25 'gym.envs.robotics'
26 ]
27
28
29 class GarageEnv(gym.Wrapper, Serializable):
30 """
31 Returns an abstract Garage wrapper class for gym.Env.
32
33 In order to provide pickling (serialization) and parameterization
34 for gym.Envs, they must be wrapped with a GarageEnv. This ensures
35 compatibility with existing samplers and checkpointing when the
36 envs are passed internally around garage.
37
38 Furthermore, classes inheriting from GarageEnv should silently
39 convert action_space and observation_space from gym.Spaces to
40 akro.spaces.
41
42 Args: env (gym.Env): the env that will be wrapped
43 """
44
45 def __init__(self, env=None, env_name=''):
46 if env_name:
47 super().__init__(gym.make(env_name))
48 else:
49 super().__init__(env)
50
51 self.action_space = akro.from_gym(self.env.action_space)
52 self.observation_space = akro.from_gym(self.env.observation_space)
53 if self.spec:
54 self.spec.action_space = self.action_space
55 self.spec.observation_space = self.observation_space
56 else:
57 self.spec = EnvSpec(
58 action_space=self.action_space,
59 observation_space=self.observation_space)
60
61 Serializable.quick_init(self, locals())
62
63 def close(self):
64 """
65 Close the wrapped env.
66
67 Returns:
68 None
69 """
70 self._close_viewer_window()
71 self.env.close()
72
73 def _close_viewer_window(self):
74 """
75 Close viewer window.
76
77 Unfortunately, some gym environments don't close the viewer windows
78 properly, which leads to "out of memory" issues when several of
79 these environments are tested one after the other.
80 This method searches for the viewer object of type MjViewer, Viewer
81 or SimpleImageViewer, based on environment, and if the environment
82 is wrapped in other environment classes, it performs depth search
83 in those as well.
84 This method can be removed once OpenAI solves the issue.
85 """
86 if self.env.spec:
87 if any(package in self.env.spec._entry_point
88 for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER):
89 # This import is not in the header to avoid a MuJoCo dependency
90 # with non-MuJoCo environments that use this base class.
91 from mujoco_py.mjviewer import MjViewer
92 if (hasattr(self.env, 'viewer')
93 and isinstance(self.env.viewer, MjViewer)):
94 glfw.destroy_window(self.env.viewer.window)
95 elif any(package in self.env.spec._entry_point
96 for package in KNOWN_GYM_NOT_CLOSE_VIEWER):
97 if (hasattr(self.env, 'viewer') and
98 (isinstance(self.env.viewer, Viewer)
99 or isinstance(self.env.viewer, SimpleImageViewer))):
100 self.env.viewer.close()
101
102 def reset(self, **kwargs):
103 """
104 This method is necessary to suppress a deprecated warning
105 thrown by gym.Wrapper.
106
107 Calls reset on wrapped env.
108 """
109 return self.env.reset(**kwargs)
110
111 def step(self, action):
112 """
113 This method is necessary to suppress a deprecated warning
114 thrown by gym.Wrapper.
115
116 Calls step on wrapped env.
117 """
118 return self.env.step(action)
119
120
121 def Step(observation, reward, done, **kwargs): # noqa: N802
122 """
123 Convenience method for creating a namedtuple from the results of
124 environment.step(action). Provides the option to put extra
125 diagnostic info in the kwargs (if it exists) without demanding
126 an explicit positional argument.
127 """
128 return _Step(observation, reward, done, kwargs)
129
130
131 _Step = collections.namedtuple('Step',
132 ['observation', 'reward', 'done', 'info'])
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/garage/envs/base.py b/src/garage/envs/base.py
--- a/src/garage/envs/base.py
+++ b/src/garage/envs/base.py
@@ -4,8 +4,6 @@
import akro
import glfw
import gym
-from gym.envs.classic_control.rendering import SimpleImageViewer
-from gym.envs.classic_control.rendering import Viewer
from garage.core import Serializable
from garage.envs.env_spec import EnvSpec
@@ -54,9 +52,8 @@
self.spec.action_space = self.action_space
self.spec.observation_space = self.observation_space
else:
- self.spec = EnvSpec(
- action_space=self.action_space,
- observation_space=self.observation_space)
+ self.spec = EnvSpec(action_space=self.action_space,
+ observation_space=self.observation_space)
Serializable.quick_init(self, locals())
@@ -66,6 +63,7 @@
Returns:
None
+
"""
self._close_viewer_window()
self.env.close()
@@ -94,36 +92,35 @@
glfw.destroy_window(self.env.viewer.window)
elif any(package in self.env.spec._entry_point
for package in KNOWN_GYM_NOT_CLOSE_VIEWER):
- if (hasattr(self.env, 'viewer') and
- (isinstance(self.env.viewer, Viewer)
- or isinstance(self.env.viewer, SimpleImageViewer))):
- self.env.viewer.close()
+ if hasattr(self.env, 'viewer'):
+ from gym.envs.classic_control.rendering import (
+ Viewer, SimpleImageViewer)
+ if (isinstance(self.env.viewer, Viewer)
+ or isinstance(self.env.viewer, SimpleImageViewer)):
+ self.env.viewer.close()
def reset(self, **kwargs):
- """
+ """Call reset on wrapped env.
+
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
-
- Calls reset on wrapped env.
"""
return self.env.reset(**kwargs)
def step(self, action):
- """
+ """Call step on wrapped env.
+
This method is necessary to suppress a deprecated warning
thrown by gym.Wrapper.
-
- Calls step on wrapped env.
"""
return self.env.step(action)
def Step(observation, reward, done, **kwargs): # noqa: N802
- """
- Convenience method for creating a namedtuple from the results of
- environment.step(action). Provides the option to put extra
- diagnostic info in the kwargs (if it exists) without demanding
- an explicit positional argument.
+ """Create a namedtuple from the results of environment.step(action).
+
+ Provides the option to put extra diagnostic info in the kwargs (if it
+ exists) without demanding an explicit positional argument.
"""
return _Step(observation, reward, done, kwargs)
|
{"golden_diff": "diff --git a/src/garage/envs/base.py b/src/garage/envs/base.py\n--- a/src/garage/envs/base.py\n+++ b/src/garage/envs/base.py\n@@ -4,8 +4,6 @@\n import akro\n import glfw\n import gym\n-from gym.envs.classic_control.rendering import SimpleImageViewer\n-from gym.envs.classic_control.rendering import Viewer\n \n from garage.core import Serializable\n from garage.envs.env_spec import EnvSpec\n@@ -54,9 +52,8 @@\n self.spec.action_space = self.action_space\n self.spec.observation_space = self.observation_space\n else:\n- self.spec = EnvSpec(\n- action_space=self.action_space,\n- observation_space=self.observation_space)\n+ self.spec = EnvSpec(action_space=self.action_space,\n+ observation_space=self.observation_space)\n \n Serializable.quick_init(self, locals())\n \n@@ -66,6 +63,7 @@\n \n Returns:\n None\n+\n \"\"\"\n self._close_viewer_window()\n self.env.close()\n@@ -94,36 +92,35 @@\n glfw.destroy_window(self.env.viewer.window)\n elif any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_VIEWER):\n- if (hasattr(self.env, 'viewer') and\n- (isinstance(self.env.viewer, Viewer)\n- or isinstance(self.env.viewer, SimpleImageViewer))):\n- self.env.viewer.close()\n+ if hasattr(self.env, 'viewer'):\n+ from gym.envs.classic_control.rendering import (\n+ Viewer, SimpleImageViewer)\n+ if (isinstance(self.env.viewer, Viewer)\n+ or isinstance(self.env.viewer, SimpleImageViewer)):\n+ self.env.viewer.close()\n \n def reset(self, **kwargs):\n- \"\"\"\n+ \"\"\"Call reset on wrapped env.\n+\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n-\n- Calls reset on wrapped env.\n \"\"\"\n return self.env.reset(**kwargs)\n \n def step(self, action):\n- \"\"\"\n+ \"\"\"Call step on wrapped env.\n+\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n-\n- Calls step on wrapped env.\n \"\"\"\n return self.env.step(action)\n \n \n def Step(observation, reward, done, **kwargs): # noqa: N802\n- \"\"\"\n- Convenience method for creating a namedtuple from the results of\n- environment.step(action). Provides the option to put extra\n- diagnostic info in the kwargs (if it exists) without demanding\n- an explicit positional argument.\n+ \"\"\"Create a namedtuple from the results of environment.step(action).\n+\n+ Provides the option to put extra diagnostic info in the kwargs (if it\n+ exists) without demanding an explicit positional argument.\n \"\"\"\n return _Step(observation, reward, done, kwargs)\n", "issue": "Garage does not run without display (pyglet)\nNot all use cases require environment rendering. Can we get rid of this default import?\r\n\r\n```\r\nTraceback (most recent call last):\r\n from garage.tf.envs import TfEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/__init__.py\", line 1, in <module>\r\n from garage.tf.envs.base import TfEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/base.py\", line 4, in <module>\r\n from garage.envs import GarageEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/envs/__init__.py\", line 1, in <module>\r\n from garage.envs.base import GarageEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/envs/base.py\", line 7, in <module>\r\n from gym.envs.classic_control.rendering import SimpleImageViewer\r\n File \"/home//garage/env/lib/python3.6/site-packages/gym/envs/classic_control/rendering.py\", line 27, in <module>\r\n from pyglet.gl import *\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py\", line 239, in <module>\r\n import pyglet.window\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py\", line 1896, in <module>\r\n gl._create_shadow_window()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py\", line 208, in _create_shadow_window\r\n _shadow_window = Window(width=1, height=1, visible=False)\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/xlib/__init__.py\", line 166, in __init__\r\n super(XlibWindow, self).__init__(*args, **kwargs)\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py\", line 501, in __init__\r\n display = get_platform().get_default_display()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py\", line 1845, in get_default_display\r\n return pyglet.canvas.get_display()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/__init__.py\", line 82, in get_display\r\n return Display()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/xlib.py\", line 86, in __init__\r\n raise NoSuchDisplayException('Cannot connect to \"%s\"' % name)\r\npyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to \"None\"\r\n```\n", "before_files": [{"content": "\"\"\"Wrapper class that converts gym.Env into GarageEnv.\"\"\"\nimport collections\n\nimport akro\nimport glfw\nimport gym\nfrom gym.envs.classic_control.rendering import SimpleImageViewer\nfrom gym.envs.classic_control.rendering import Viewer\n\nfrom garage.core import Serializable\nfrom garage.envs.env_spec import EnvSpec\n\n# The gym environments using one of the packages in the following lists as\n# entry points don't close their viewer windows.\nKNOWN_GYM_NOT_CLOSE_VIEWER = [\n # Please keep alphabetized\n 'gym.envs.atari',\n 'gym.envs.box2d',\n 'gym.envs.classic_control'\n]\n\nKNOWN_GYM_NOT_CLOSE_MJ_VIEWER = [\n # Please keep alphabetized\n 'gym.envs.mujoco',\n 'gym.envs.robotics'\n]\n\n\nclass GarageEnv(gym.Wrapper, Serializable):\n \"\"\"\n Returns an abstract Garage wrapper class for gym.Env.\n\n In order to provide pickling (serialization) and parameterization\n for gym.Envs, they must be wrapped with a GarageEnv. This ensures\n compatibility with existing samplers and checkpointing when the\n envs are passed internally around garage.\n\n Furthermore, classes inheriting from GarageEnv should silently\n convert action_space and observation_space from gym.Spaces to\n akro.spaces.\n\n Args: env (gym.Env): the env that will be wrapped\n \"\"\"\n\n def __init__(self, env=None, env_name=''):\n if env_name:\n super().__init__(gym.make(env_name))\n else:\n super().__init__(env)\n\n self.action_space = akro.from_gym(self.env.action_space)\n self.observation_space = akro.from_gym(self.env.observation_space)\n if self.spec:\n self.spec.action_space = self.action_space\n self.spec.observation_space = self.observation_space\n else:\n self.spec = EnvSpec(\n action_space=self.action_space,\n observation_space=self.observation_space)\n\n Serializable.quick_init(self, locals())\n\n def close(self):\n \"\"\"\n Close the wrapped env.\n\n Returns:\n None\n \"\"\"\n self._close_viewer_window()\n self.env.close()\n\n def _close_viewer_window(self):\n \"\"\"\n Close viewer window.\n\n Unfortunately, some gym environments don't close the viewer windows\n properly, which leads to \"out of memory\" issues when several of\n these environments are tested one after the other.\n This method searches for the viewer object of type MjViewer, Viewer\n or SimpleImageViewer, based on environment, and if the environment\n is wrapped in other environment classes, it performs depth search\n in those as well.\n This method can be removed once OpenAI solves the issue.\n \"\"\"\n if self.env.spec:\n if any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER):\n # This import is not in the header to avoid a MuJoCo dependency\n # with non-MuJoCo environments that use this base class.\n from mujoco_py.mjviewer import MjViewer\n if (hasattr(self.env, 'viewer')\n and isinstance(self.env.viewer, MjViewer)):\n glfw.destroy_window(self.env.viewer.window)\n elif any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_VIEWER):\n if (hasattr(self.env, 'viewer') and\n (isinstance(self.env.viewer, Viewer)\n or isinstance(self.env.viewer, SimpleImageViewer))):\n self.env.viewer.close()\n\n def reset(self, **kwargs):\n \"\"\"\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n\n Calls reset on wrapped env.\n \"\"\"\n return self.env.reset(**kwargs)\n\n def step(self, action):\n \"\"\"\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n\n Calls step on wrapped env.\n \"\"\"\n return self.env.step(action)\n\n\ndef Step(observation, reward, done, **kwargs): # noqa: N802\n \"\"\"\n Convenience method for creating a namedtuple from the results of\n environment.step(action). Provides the option to put extra\n diagnostic info in the kwargs (if it exists) without demanding\n an explicit positional argument.\n \"\"\"\n return _Step(observation, reward, done, kwargs)\n\n\n_Step = collections.namedtuple('Step',\n ['observation', 'reward', 'done', 'info'])\n", "path": "src/garage/envs/base.py"}], "after_files": [{"content": "\"\"\"Wrapper class that converts gym.Env into GarageEnv.\"\"\"\nimport collections\n\nimport akro\nimport glfw\nimport gym\n\nfrom garage.core import Serializable\nfrom garage.envs.env_spec import EnvSpec\n\n# The gym environments using one of the packages in the following lists as\n# entry points don't close their viewer windows.\nKNOWN_GYM_NOT_CLOSE_VIEWER = [\n # Please keep alphabetized\n 'gym.envs.atari',\n 'gym.envs.box2d',\n 'gym.envs.classic_control'\n]\n\nKNOWN_GYM_NOT_CLOSE_MJ_VIEWER = [\n # Please keep alphabetized\n 'gym.envs.mujoco',\n 'gym.envs.robotics'\n]\n\n\nclass GarageEnv(gym.Wrapper, Serializable):\n \"\"\"\n Returns an abstract Garage wrapper class for gym.Env.\n\n In order to provide pickling (serialization) and parameterization\n for gym.Envs, they must be wrapped with a GarageEnv. This ensures\n compatibility with existing samplers and checkpointing when the\n envs are passed internally around garage.\n\n Furthermore, classes inheriting from GarageEnv should silently\n convert action_space and observation_space from gym.Spaces to\n akro.spaces.\n\n Args: env (gym.Env): the env that will be wrapped\n \"\"\"\n\n def __init__(self, env=None, env_name=''):\n if env_name:\n super().__init__(gym.make(env_name))\n else:\n super().__init__(env)\n\n self.action_space = akro.from_gym(self.env.action_space)\n self.observation_space = akro.from_gym(self.env.observation_space)\n if self.spec:\n self.spec.action_space = self.action_space\n self.spec.observation_space = self.observation_space\n else:\n self.spec = EnvSpec(action_space=self.action_space,\n observation_space=self.observation_space)\n\n Serializable.quick_init(self, locals())\n\n def close(self):\n \"\"\"\n Close the wrapped env.\n\n Returns:\n None\n\n \"\"\"\n self._close_viewer_window()\n self.env.close()\n\n def _close_viewer_window(self):\n \"\"\"\n Close viewer window.\n\n Unfortunately, some gym environments don't close the viewer windows\n properly, which leads to \"out of memory\" issues when several of\n these environments are tested one after the other.\n This method searches for the viewer object of type MjViewer, Viewer\n or SimpleImageViewer, based on environment, and if the environment\n is wrapped in other environment classes, it performs depth search\n in those as well.\n This method can be removed once OpenAI solves the issue.\n \"\"\"\n if self.env.spec:\n if any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER):\n # This import is not in the header to avoid a MuJoCo dependency\n # with non-MuJoCo environments that use this base class.\n from mujoco_py.mjviewer import MjViewer\n if (hasattr(self.env, 'viewer')\n and isinstance(self.env.viewer, MjViewer)):\n glfw.destroy_window(self.env.viewer.window)\n elif any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_VIEWER):\n if hasattr(self.env, 'viewer'):\n from gym.envs.classic_control.rendering import (\n Viewer, SimpleImageViewer)\n if (isinstance(self.env.viewer, Viewer)\n or isinstance(self.env.viewer, SimpleImageViewer)):\n self.env.viewer.close()\n\n def reset(self, **kwargs):\n \"\"\"Call reset on wrapped env.\n\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n \"\"\"\n return self.env.reset(**kwargs)\n\n def step(self, action):\n \"\"\"Call step on wrapped env.\n\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n \"\"\"\n return self.env.step(action)\n\n\ndef Step(observation, reward, done, **kwargs): # noqa: N802\n \"\"\"Create a namedtuple from the results of environment.step(action).\n\n Provides the option to put extra diagnostic info in the kwargs (if it\n exists) without demanding an explicit positional argument.\n \"\"\"\n return _Step(observation, reward, done, kwargs)\n\n\n_Step = collections.namedtuple('Step',\n ['observation', 'reward', 'done', 'info'])\n", "path": "src/garage/envs/base.py"}]}
| 2,177 | 635 |
gh_patches_debug_478
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-7579
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_multiple_heads is not raising the expected error
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
On having multiple heads, the travis build fails with error -
```
error: Hooks handler process 'dredd-hooks-python ./tests/hook_main.py' exited with status: 1
warn: Hook handling timed out.
error: Hooks handler process 'dredd-hooks-python ./tests/hook_main.py' exited with status: 1
info: Backend server process exited
The command "dredd" failed and exited with 1 during .
```
It should raise error as expected in - https://github.com/fossasia/open-event-server/blob/development/scripts/test_multiple_heads.sh
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
Expected error should be raised - `Error: Multiple Migration Heads`
<!-- If applicable, add stacktrace to help explain your problem. -->
**Additional context**
<!-- Add any other context about the problem here. -->
On it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py`
Content:
```
1 """empty message
2
3 Revision ID: 3b29ea38f0cb
4 Revises: 2d0760003a8a
5 Create Date: 2021-01-07 05:19:49.749923
6
7 """
8
9 from alembic import op
10 import sqlalchemy as sa
11 import sqlalchemy_utils
12
13
14 # revision identifiers, used by Alembic.
15 revision = '3b29ea38f0cb'
16 down_revision = '2d0760003a8a'
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.add_column('speaker', sa.Column('rank', sa.Integer(), nullable=False, server_default='0'))
22 # ### end Alembic commands ###
23
24
25 def downgrade():
26 # ### commands auto generated by Alembic - please adjust! ###
27 op.drop_column('speaker', 'rank')
28 # ### end Alembic commands ###
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py b/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py
--- a/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py
+++ b/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py
@@ -13,7 +13,7 @@
# revision identifiers, used by Alembic.
revision = '3b29ea38f0cb'
-down_revision = '2d0760003a8a'
+down_revision = '4e61d4df3516'
def upgrade():
|
{"golden_diff": "diff --git a/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py b/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py\n--- a/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py\n+++ b/migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py\n@@ -13,7 +13,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '3b29ea38f0cb'\n-down_revision = '2d0760003a8a'\n+down_revision = '4e61d4df3516'\n \n \n def upgrade():\n", "issue": "test_multiple_heads is not raising the expected error\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nOn having multiple heads, the travis build fails with error -\r\n```\r\nerror: Hooks handler process 'dredd-hooks-python ./tests/hook_main.py' exited with status: 1\r\nwarn: Hook handling timed out.\r\nerror: Hooks handler process 'dredd-hooks-python ./tests/hook_main.py' exited with status: 1\r\ninfo: Backend server process exited\r\nThe command \"dredd\" failed and exited with 1 during .\r\n```\r\nIt should raise error as expected in - https://github.com/fossasia/open-event-server/blob/development/scripts/test_multiple_heads.sh\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nExpected error should be raised - `Error: Multiple Migration Heads`\r\n\r\n<!-- If applicable, add stacktrace to help explain your problem. -->\r\n\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\nOn it\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: 3b29ea38f0cb\nRevises: 2d0760003a8a\nCreate Date: 2021-01-07 05:19:49.749923\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '3b29ea38f0cb'\ndown_revision = '2d0760003a8a'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('speaker', sa.Column('rank', sa.Integer(), nullable=False, server_default='0'))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('speaker', 'rank')\n # ### end Alembic commands ###\n", "path": "migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py"}], "after_files": [{"content": "\"\"\"empty message\n\nRevision ID: 3b29ea38f0cb\nRevises: 2d0760003a8a\nCreate Date: 2021-01-07 05:19:49.749923\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = '3b29ea38f0cb'\ndown_revision = '4e61d4df3516'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('speaker', sa.Column('rank', sa.Integer(), nullable=False, server_default='0'))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('speaker', 'rank')\n # ### end Alembic commands ###\n", "path": "migrations/versions/rev-2021-01-07-05:19:49-3b29ea38f0cb_.py"}]}
| 778 | 243 |
gh_patches_debug_2213
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-5630
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SMS Authenticator Setup Stage with generic provider does not work without mapping
**Describe the bug**
Setting up the stage and flow works but trying to set up an mfa device results in an error after entering the phone number
**To Reproduce**
Create a SMS Authenticator Setup Stage with a generic provider and without mapping
**Expected behavior**
sms should be send
**Version and Deployment (please complete the following information):**
- current main
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/stages/authenticator_sms/models.py`
Content:
```
1 """SMS Authenticator models"""
2 from hashlib import sha256
3 from typing import Optional
4
5 from django.contrib.auth import get_user_model
6 from django.db import models
7 from django.utils.translation import gettext_lazy as _
8 from django.views import View
9 from django_otp.models import SideChannelDevice
10 from requests.exceptions import RequestException
11 from rest_framework.exceptions import ValidationError
12 from rest_framework.serializers import BaseSerializer
13 from structlog.stdlib import get_logger
14 from twilio.base.exceptions import TwilioRestException
15 from twilio.rest import Client
16
17 from authentik.core.types import UserSettingSerializer
18 from authentik.events.models import Event, EventAction, NotificationWebhookMapping
19 from authentik.events.utils import sanitize_item
20 from authentik.flows.models import ConfigurableStage, FriendlyNamedStage, Stage
21 from authentik.lib.models import SerializerModel
22 from authentik.lib.utils.errors import exception_to_string
23 from authentik.lib.utils.http import get_http_session
24
25 LOGGER = get_logger()
26
27
28 class SMSProviders(models.TextChoices):
29 """Supported SMS Providers"""
30
31 TWILIO = "twilio"
32 GENERIC = "generic"
33
34
35 class SMSAuthTypes(models.TextChoices):
36 """Supported SMS Auth Types"""
37
38 BASIC = "basic"
39 BEARER = "bearer"
40
41
42 class AuthenticatorSMSStage(ConfigurableStage, FriendlyNamedStage, Stage):
43 """Use SMS-based TOTP instead of authenticator-based."""
44
45 provider = models.TextField(choices=SMSProviders.choices)
46
47 from_number = models.TextField()
48
49 account_sid = models.TextField()
50 auth = models.TextField()
51 auth_password = models.TextField(default="", blank=True)
52 auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)
53
54 verify_only = models.BooleanField(
55 default=False,
56 help_text=_(
57 "When enabled, the Phone number is only used during enrollment to verify the "
58 "users authenticity. Only a hash of the phone number is saved to ensure it is "
59 "not re-used in the future."
60 ),
61 )
62
63 mapping = models.ForeignKey(
64 NotificationWebhookMapping,
65 null=True,
66 default=None,
67 on_delete=models.SET_NULL,
68 help_text=_("Optionally modify the payload being sent to custom providers."),
69 )
70
71 def send(self, token: str, device: "SMSDevice"):
72 """Send message via selected provider"""
73 if self.provider == SMSProviders.TWILIO:
74 return self.send_twilio(token, device)
75 if self.provider == SMSProviders.GENERIC:
76 return self.send_generic(token, device)
77 raise ValueError(f"invalid provider {self.provider}")
78
79 def get_message(self, token: str) -> str:
80 """Get SMS message"""
81 return _("Use this code to authenticate in authentik: %(token)s" % {"token": token})
82
83 def send_twilio(self, token: str, device: "SMSDevice"):
84 """send sms via twilio provider"""
85 client = Client(self.account_sid, self.auth)
86
87 try:
88 message = client.messages.create(
89 to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))
90 )
91 LOGGER.debug("Sent SMS", to=device, message=message.sid)
92 except TwilioRestException as exc:
93 LOGGER.warning("Error sending token by Twilio SMS", exc=exc, msg=exc.msg)
94 raise ValidationError(exc.msg)
95
96 def send_generic(self, token: str, device: "SMSDevice"):
97 """Send SMS via outside API"""
98 payload = {
99 "From": self.from_number,
100 "To": device.phone_number,
101 "Body": token,
102 "Message": self.get_message(token),
103 }
104
105 if self.mapping:
106 payload = sanitize_item(
107 self.mapping.evaluate(
108 user=device.user,
109 request=None,
110 device=device,
111 token=token,
112 stage=self,
113 )
114 )
115
116 if self.auth_type == SMSAuthTypes.BEARER:
117 response = get_http_session().post(
118 self.account_sid,
119 json=payload,
120 headers={"Authorization": f"Bearer {self.auth}"},
121 )
122 elif self.auth_type == SMSAuthTypes.BASIC:
123 response = get_http_session().post(
124 self.account_sid,
125 json=payload,
126 auth=(self.auth, self.auth_password),
127 )
128 else:
129 raise ValueError(f"Invalid Auth type '{self.auth_type}'")
130
131 LOGGER.debug("Sent SMS", to=device.phone_number)
132 try:
133 response.raise_for_status()
134 except RequestException as exc:
135 LOGGER.warning(
136 "Error sending token by generic SMS",
137 exc=exc,
138 status=response.status_code,
139 body=response.text[:100],
140 )
141 Event.new(
142 EventAction.CONFIGURATION_ERROR,
143 message="Error sending SMS",
144 exc=exception_to_string(exc),
145 status_code=response.status_code,
146 body=response.text,
147 ).set_user(device.user).save()
148 if response.status_code >= 400:
149 raise ValidationError(response.text)
150 raise
151
152 @property
153 def serializer(self) -> type[BaseSerializer]:
154 from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer
155
156 return AuthenticatorSMSStageSerializer
157
158 @property
159 def type(self) -> type[View]:
160 from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView
161
162 return AuthenticatorSMSStageView
163
164 @property
165 def component(self) -> str:
166 return "ak-stage-authenticator-sms-form"
167
168 def ui_user_settings(self) -> Optional[UserSettingSerializer]:
169 return UserSettingSerializer(
170 data={
171 "title": self.friendly_name or str(self._meta.verbose_name),
172 "component": "ak-user-settings-authenticator-sms",
173 }
174 )
175
176 def __str__(self) -> str:
177 return f"SMS Authenticator Setup Stage {self.name}"
178
179 class Meta:
180 verbose_name = _("SMS Authenticator Setup Stage")
181 verbose_name_plural = _("SMS Authenticator Setup Stages")
182
183
184 def hash_phone_number(phone_number: str) -> str:
185 """Hash phone number with prefix"""
186 return "hash:" + sha256(phone_number.encode()).hexdigest()
187
188
189 class SMSDevice(SerializerModel, SideChannelDevice):
190 """SMS Device"""
191
192 user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
193
194 # Connect to the stage to when validating access we know the API Credentials
195 stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)
196
197 phone_number = models.TextField()
198
199 last_t = models.DateTimeField(auto_now=True)
200
201 def set_hashed_number(self):
202 """Set phone_number to hashed number"""
203 self.phone_number = hash_phone_number(self.phone_number)
204
205 @property
206 def is_hashed(self) -> bool:
207 """Check if the phone number is hashed"""
208 return self.phone_number.startswith("hash:")
209
210 @property
211 def serializer(self) -> type[BaseSerializer]:
212 from authentik.stages.authenticator_sms.api import SMSDeviceSerializer
213
214 return SMSDeviceSerializer
215
216 def verify_token(self, token):
217 valid = super().verify_token(token)
218 if valid:
219 self.save()
220 return valid
221
222 def __str__(self):
223 return str(self.name) or str(self.user)
224
225 class Meta:
226 verbose_name = _("SMS Device")
227 verbose_name_plural = _("SMS Devices")
228 unique_together = (("stage", "phone_number"),)
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/stages/authenticator_sms/models.py b/authentik/stages/authenticator_sms/models.py
--- a/authentik/stages/authenticator_sms/models.py
+++ b/authentik/stages/authenticator_sms/models.py
@@ -99,7 +99,7 @@
"From": self.from_number,
"To": device.phone_number,
"Body": token,
- "Message": self.get_message(token),
+ "Message": str(self.get_message(token)),
}
if self.mapping:
|
{"golden_diff": "diff --git a/authentik/stages/authenticator_sms/models.py b/authentik/stages/authenticator_sms/models.py\n--- a/authentik/stages/authenticator_sms/models.py\n+++ b/authentik/stages/authenticator_sms/models.py\n@@ -99,7 +99,7 @@\n \"From\": self.from_number,\n \"To\": device.phone_number,\n \"Body\": token,\n- \"Message\": self.get_message(token),\n+ \"Message\": str(self.get_message(token)),\n }\n \n if self.mapping:\n", "issue": "SMS Authenticator Setup Stage with generic provider does not work without mapping \n**Describe the bug**\r\nSetting up the stage and flow works but trying to set up an mfa device results in an error after entering the phone number\r\n\r\n**To Reproduce**\r\nCreate a SMS Authenticator Setup Stage with a generic provider and without mapping\r\n\r\n**Expected behavior**\r\nsms should be send\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- current main\r\n\n", "before_files": [{"content": "\"\"\"SMS Authenticator models\"\"\"\nfrom hashlib import sha256\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django_otp.models import SideChannelDevice\nfrom requests.exceptions import RequestException\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\nfrom twilio.base.exceptions import TwilioRestException\nfrom twilio.rest import Client\n\nfrom authentik.core.types import UserSettingSerializer\nfrom authentik.events.models import Event, EventAction, NotificationWebhookMapping\nfrom authentik.events.utils import sanitize_item\nfrom authentik.flows.models import ConfigurableStage, FriendlyNamedStage, Stage\nfrom authentik.lib.models import SerializerModel\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.lib.utils.http import get_http_session\n\nLOGGER = get_logger()\n\n\nclass SMSProviders(models.TextChoices):\n \"\"\"Supported SMS Providers\"\"\"\n\n TWILIO = \"twilio\"\n GENERIC = \"generic\"\n\n\nclass SMSAuthTypes(models.TextChoices):\n \"\"\"Supported SMS Auth Types\"\"\"\n\n BASIC = \"basic\"\n BEARER = \"bearer\"\n\n\nclass AuthenticatorSMSStage(ConfigurableStage, FriendlyNamedStage, Stage):\n \"\"\"Use SMS-based TOTP instead of authenticator-based.\"\"\"\n\n provider = models.TextField(choices=SMSProviders.choices)\n\n from_number = models.TextField()\n\n account_sid = models.TextField()\n auth = models.TextField()\n auth_password = models.TextField(default=\"\", blank=True)\n auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)\n\n verify_only = models.BooleanField(\n default=False,\n help_text=_(\n \"When enabled, the Phone number is only used during enrollment to verify the \"\n \"users authenticity. Only a hash of the phone number is saved to ensure it is \"\n \"not re-used in the future.\"\n ),\n )\n\n mapping = models.ForeignKey(\n NotificationWebhookMapping,\n null=True,\n default=None,\n on_delete=models.SET_NULL,\n help_text=_(\"Optionally modify the payload being sent to custom providers.\"),\n )\n\n def send(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send message via selected provider\"\"\"\n if self.provider == SMSProviders.TWILIO:\n return self.send_twilio(token, device)\n if self.provider == SMSProviders.GENERIC:\n return self.send_generic(token, device)\n raise ValueError(f\"invalid provider {self.provider}\")\n\n def get_message(self, token: str) -> str:\n \"\"\"Get SMS message\"\"\"\n return _(\"Use this code to authenticate in authentik: %(token)s\" % {\"token\": token})\n\n def send_twilio(self, token: str, device: \"SMSDevice\"):\n \"\"\"send sms via twilio provider\"\"\"\n client = Client(self.account_sid, self.auth)\n\n try:\n message = client.messages.create(\n to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))\n )\n LOGGER.debug(\"Sent SMS\", to=device, message=message.sid)\n except TwilioRestException as exc:\n LOGGER.warning(\"Error sending token by Twilio SMS\", exc=exc, msg=exc.msg)\n raise ValidationError(exc.msg)\n\n def send_generic(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send SMS via outside API\"\"\"\n payload = {\n \"From\": self.from_number,\n \"To\": device.phone_number,\n \"Body\": token,\n \"Message\": self.get_message(token),\n }\n\n if self.mapping:\n payload = sanitize_item(\n self.mapping.evaluate(\n user=device.user,\n request=None,\n device=device,\n token=token,\n stage=self,\n )\n )\n\n if self.auth_type == SMSAuthTypes.BEARER:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n headers={\"Authorization\": f\"Bearer {self.auth}\"},\n )\n elif self.auth_type == SMSAuthTypes.BASIC:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n auth=(self.auth, self.auth_password),\n )\n else:\n raise ValueError(f\"Invalid Auth type '{self.auth_type}'\")\n\n LOGGER.debug(\"Sent SMS\", to=device.phone_number)\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Error sending token by generic SMS\",\n exc=exc,\n status=response.status_code,\n body=response.text[:100],\n )\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=\"Error sending SMS\",\n exc=exception_to_string(exc),\n status_code=response.status_code,\n body=response.text,\n ).set_user(device.user).save()\n if response.status_code >= 400:\n raise ValidationError(response.text)\n raise\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer\n\n return AuthenticatorSMSStageSerializer\n\n @property\n def type(self) -> type[View]:\n from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView\n\n return AuthenticatorSMSStageView\n\n @property\n def component(self) -> str:\n return \"ak-stage-authenticator-sms-form\"\n\n def ui_user_settings(self) -> Optional[UserSettingSerializer]:\n return UserSettingSerializer(\n data={\n \"title\": self.friendly_name or str(self._meta.verbose_name),\n \"component\": \"ak-user-settings-authenticator-sms\",\n }\n )\n\n def __str__(self) -> str:\n return f\"SMS Authenticator Setup Stage {self.name}\"\n\n class Meta:\n verbose_name = _(\"SMS Authenticator Setup Stage\")\n verbose_name_plural = _(\"SMS Authenticator Setup Stages\")\n\n\ndef hash_phone_number(phone_number: str) -> str:\n \"\"\"Hash phone number with prefix\"\"\"\n return \"hash:\" + sha256(phone_number.encode()).hexdigest()\n\n\nclass SMSDevice(SerializerModel, SideChannelDevice):\n \"\"\"SMS Device\"\"\"\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n\n # Connect to the stage to when validating access we know the API Credentials\n stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)\n\n phone_number = models.TextField()\n\n last_t = models.DateTimeField(auto_now=True)\n\n def set_hashed_number(self):\n \"\"\"Set phone_number to hashed number\"\"\"\n self.phone_number = hash_phone_number(self.phone_number)\n\n @property\n def is_hashed(self) -> bool:\n \"\"\"Check if the phone number is hashed\"\"\"\n return self.phone_number.startswith(\"hash:\")\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import SMSDeviceSerializer\n\n return SMSDeviceSerializer\n\n def verify_token(self, token):\n valid = super().verify_token(token)\n if valid:\n self.save()\n return valid\n\n def __str__(self):\n return str(self.name) or str(self.user)\n\n class Meta:\n verbose_name = _(\"SMS Device\")\n verbose_name_plural = _(\"SMS Devices\")\n unique_together = ((\"stage\", \"phone_number\"),)\n", "path": "authentik/stages/authenticator_sms/models.py"}], "after_files": [{"content": "\"\"\"SMS Authenticator models\"\"\"\nfrom hashlib import sha256\nfrom typing import Optional\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django_otp.models import SideChannelDevice\nfrom requests.exceptions import RequestException\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.serializers import BaseSerializer\nfrom structlog.stdlib import get_logger\nfrom twilio.base.exceptions import TwilioRestException\nfrom twilio.rest import Client\n\nfrom authentik.core.types import UserSettingSerializer\nfrom authentik.events.models import Event, EventAction, NotificationWebhookMapping\nfrom authentik.events.utils import sanitize_item\nfrom authentik.flows.models import ConfigurableStage, FriendlyNamedStage, Stage\nfrom authentik.lib.models import SerializerModel\nfrom authentik.lib.utils.errors import exception_to_string\nfrom authentik.lib.utils.http import get_http_session\n\nLOGGER = get_logger()\n\n\nclass SMSProviders(models.TextChoices):\n \"\"\"Supported SMS Providers\"\"\"\n\n TWILIO = \"twilio\"\n GENERIC = \"generic\"\n\n\nclass SMSAuthTypes(models.TextChoices):\n \"\"\"Supported SMS Auth Types\"\"\"\n\n BASIC = \"basic\"\n BEARER = \"bearer\"\n\n\nclass AuthenticatorSMSStage(ConfigurableStage, FriendlyNamedStage, Stage):\n \"\"\"Use SMS-based TOTP instead of authenticator-based.\"\"\"\n\n provider = models.TextField(choices=SMSProviders.choices)\n\n from_number = models.TextField()\n\n account_sid = models.TextField()\n auth = models.TextField()\n auth_password = models.TextField(default=\"\", blank=True)\n auth_type = models.TextField(choices=SMSAuthTypes.choices, default=SMSAuthTypes.BASIC)\n\n verify_only = models.BooleanField(\n default=False,\n help_text=_(\n \"When enabled, the Phone number is only used during enrollment to verify the \"\n \"users authenticity. Only a hash of the phone number is saved to ensure it is \"\n \"not re-used in the future.\"\n ),\n )\n\n mapping = models.ForeignKey(\n NotificationWebhookMapping,\n null=True,\n default=None,\n on_delete=models.SET_NULL,\n help_text=_(\"Optionally modify the payload being sent to custom providers.\"),\n )\n\n def send(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send message via selected provider\"\"\"\n if self.provider == SMSProviders.TWILIO:\n return self.send_twilio(token, device)\n if self.provider == SMSProviders.GENERIC:\n return self.send_generic(token, device)\n raise ValueError(f\"invalid provider {self.provider}\")\n\n def get_message(self, token: str) -> str:\n \"\"\"Get SMS message\"\"\"\n return _(\"Use this code to authenticate in authentik: %(token)s\" % {\"token\": token})\n\n def send_twilio(self, token: str, device: \"SMSDevice\"):\n \"\"\"send sms via twilio provider\"\"\"\n client = Client(self.account_sid, self.auth)\n\n try:\n message = client.messages.create(\n to=device.phone_number, from_=self.from_number, body=str(self.get_message(token))\n )\n LOGGER.debug(\"Sent SMS\", to=device, message=message.sid)\n except TwilioRestException as exc:\n LOGGER.warning(\"Error sending token by Twilio SMS\", exc=exc, msg=exc.msg)\n raise ValidationError(exc.msg)\n\n def send_generic(self, token: str, device: \"SMSDevice\"):\n \"\"\"Send SMS via outside API\"\"\"\n payload = {\n \"From\": self.from_number,\n \"To\": device.phone_number,\n \"Body\": token,\n \"Message\": str(self.get_message(token)),\n }\n\n if self.mapping:\n payload = sanitize_item(\n self.mapping.evaluate(\n user=device.user,\n request=None,\n device=device,\n token=token,\n stage=self,\n )\n )\n\n if self.auth_type == SMSAuthTypes.BEARER:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n headers={\"Authorization\": f\"Bearer {self.auth}\"},\n )\n elif self.auth_type == SMSAuthTypes.BASIC:\n response = get_http_session().post(\n self.account_sid,\n json=payload,\n auth=(self.auth, self.auth_password),\n )\n else:\n raise ValueError(f\"Invalid Auth type '{self.auth_type}'\")\n\n LOGGER.debug(\"Sent SMS\", to=device.phone_number)\n try:\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\n \"Error sending token by generic SMS\",\n exc=exc,\n status=response.status_code,\n body=response.text[:100],\n )\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=\"Error sending SMS\",\n exc=exception_to_string(exc),\n status_code=response.status_code,\n body=response.text,\n ).set_user(device.user).save()\n if response.status_code >= 400:\n raise ValidationError(response.text)\n raise\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import AuthenticatorSMSStageSerializer\n\n return AuthenticatorSMSStageSerializer\n\n @property\n def type(self) -> type[View]:\n from authentik.stages.authenticator_sms.stage import AuthenticatorSMSStageView\n\n return AuthenticatorSMSStageView\n\n @property\n def component(self) -> str:\n return \"ak-stage-authenticator-sms-form\"\n\n def ui_user_settings(self) -> Optional[UserSettingSerializer]:\n return UserSettingSerializer(\n data={\n \"title\": self.friendly_name or str(self._meta.verbose_name),\n \"component\": \"ak-user-settings-authenticator-sms\",\n }\n )\n\n def __str__(self) -> str:\n return f\"SMS Authenticator Setup Stage {self.name}\"\n\n class Meta:\n verbose_name = _(\"SMS Authenticator Setup Stage\")\n verbose_name_plural = _(\"SMS Authenticator Setup Stages\")\n\n\ndef hash_phone_number(phone_number: str) -> str:\n \"\"\"Hash phone number with prefix\"\"\"\n return \"hash:\" + sha256(phone_number.encode()).hexdigest()\n\n\nclass SMSDevice(SerializerModel, SideChannelDevice):\n \"\"\"SMS Device\"\"\"\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n\n # Connect to the stage to when validating access we know the API Credentials\n stage = models.ForeignKey(AuthenticatorSMSStage, on_delete=models.CASCADE)\n\n phone_number = models.TextField()\n\n last_t = models.DateTimeField(auto_now=True)\n\n def set_hashed_number(self):\n \"\"\"Set phone_number to hashed number\"\"\"\n self.phone_number = hash_phone_number(self.phone_number)\n\n @property\n def is_hashed(self) -> bool:\n \"\"\"Check if the phone number is hashed\"\"\"\n return self.phone_number.startswith(\"hash:\")\n\n @property\n def serializer(self) -> type[BaseSerializer]:\n from authentik.stages.authenticator_sms.api import SMSDeviceSerializer\n\n return SMSDeviceSerializer\n\n def verify_token(self, token):\n valid = super().verify_token(token)\n if valid:\n self.save()\n return valid\n\n def __str__(self):\n return str(self.name) or str(self.user)\n\n class Meta:\n verbose_name = _(\"SMS Device\")\n verbose_name_plural = _(\"SMS Devices\")\n unique_together = ((\"stage\", \"phone_number\"),)\n", "path": "authentik/stages/authenticator_sms/models.py"}]}
| 2,523 | 115 |
gh_patches_debug_29664
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-1542
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 import importlib.metadata
2 import os
3 import re
4 from functools import partial
5 from typing import Any
6
7 from sphinx.addnodes import document
8 from sphinx.application import Sphinx
9
10 __all__ = ["setup", "update_html_context"]
11
12
13 project = "Litestar"
14 copyright = "2023, Litestar-Org"
15 author = "Litestar-Org"
16 release = os.getenv("_LITESTAR_DOCS_BUILD_VERSION", importlib.metadata.version("litestar").rsplit(".")[0])
17
18 extensions = [
19 "sphinx.ext.intersphinx",
20 "sphinx.ext.autosectionlabel",
21 "sphinx.ext.autodoc",
22 "sphinx.ext.napoleon",
23 "sphinx_design",
24 "auto_pytabs.sphinx_ext",
25 "tools.sphinx_ext",
26 "sphinx_copybutton",
27 "sphinxcontrib.mermaid",
28 ]
29
30 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
31
32
33 intersphinx_mapping = {
34 "python": ("https://docs.python.org/3", None),
35 "msgspec": ("https://jcristharif.com/msgspec/", None),
36 "anyio": ("https://anyio.readthedocs.io/en/stable/", None),
37 "multidict": ("https://multidict.aio-libs.org/en/stable/", None),
38 "sqlalchemy": ("https://docs.sqlalchemy.org/en/20/", None),
39 "click": ("https://click.palletsprojects.com/en/8.1.x/", None),
40 "redis": ("https://redis-py.readthedocs.io/en/stable/", None),
41 "picologging": ("https://microsoft.github.io/picologging", None),
42 "structlog": ("https://www.structlog.org/en/stable/", None),
43 "tortoise": ("https://tortoise.github.io/", None),
44 "piccolo": ("https://piccolo-orm.readthedocs.io/en/latest", None),
45 "opentelemetry": ("https://opentelemetry-python.readthedocs.io/en/latest/", None),
46 }
47
48
49 napoleon_google_docstring = True
50 napoleon_include_special_with_doc = True
51 napoleon_use_admonition_for_examples = True
52 napoleon_use_admonition_for_notes = True
53 napoleon_use_admonition_for_references = False
54 napoleon_attr_annotations = True
55
56 autoclass_content = "class"
57 autodoc_class_signature = "separated"
58 autodoc_default_options = {"special-members": "__init__", "show-inheritance": True, "members": True}
59 autodoc_member_order = "bysource"
60 autodoc_typehints_format = "short"
61
62
63 nitpicky = True
64 nitpick_ignore = [
65 # external library / undocumented external
66 ("py:class", "BaseModel"),
67 ("py:class", "pydantic.main.BaseModel"),
68 ("py:class", "pydantic.generics.GenericModel"),
69 ("py:class", "redis.asyncio.Redis"),
70 ("py:class", "sqlalchemy.orm.decl_api.DeclarativeMeta"),
71 ("py:class", "sqlalchemy.sql.sqltypes.TupleType"),
72 ("py:class", "sqlalchemy.dialects.postgresql.named_types.ENUM"),
73 # type vars and aliases / intentionally undocumented
74 ("py:class", "RouteHandlerType"),
75 ("py:obj", "litestar.security.base.AuthType"),
76 ("py:class", "ControllerRouterHandler"),
77 ("py:class", "PathParameterDefinition"),
78 ("py:class", "BaseSessionBackendT"),
79 ("py:class", "AnyIOBackend"),
80 ("py:class", "T"),
81 ("py:class", "C"),
82 ("py:class", "EmptyType"),
83 # intentionally undocumented
84 ("py:class", "NoneType"),
85 ("py:class", "litestar._signature.field.SignatureField"),
86 ("py:class", "litestar.utils.signature.ParsedType"),
87 ("py:class", "litestar.utils.signature.ParsedSignature"),
88 ("py:class", "litestar.utils.signature.ParsedParameter"),
89 ("py:class", "litestar.utils.sync.AsyncCallable"),
90 ]
91 nitpick_ignore_regex = [
92 (r"py:.*", r"litestar\.types.*"),
93 (r"py:.*", r"litestar.*\.T"),
94 (r"py:.*", r".*R_co"),
95 (r"py:.*", r".*UserType"),
96 (r"py:.*", r"litestar\.middleware\.session\.base\.BaseSessionBackendT"),
97 (r"py:obj", r"typing\..*"),
98 (r"py:.*", r"httpx.*"),
99 # type vars
100 ("py:.*", r"litestar\.pagination\.C"),
101 ("py:.*", r"litestar.middleware.session.base.ConfigT"),
102 ("py:.*", r"multidict\..*"),
103 (r"py:.*", r"litestar\.connection\.base\.UserT"),
104 (r"py:.*", r"litestar\.connection\.base\.AuthT"),
105 (r"py:.*", r"litestar\.connection\.base\.StateT"),
106 (r"py:.*", r"litestar\.connection\.base\.HandlerT"),
107 ]
108
109 # Warnings about missing references to those targets in the specified location will be ignored.
110 # The source of the references is taken 1:1 from the warnings as reported by Sphinx, e.g
111 # **/litestar/testing/client/async_client.py:docstring of litestar.testing.AsyncTestClient.exit_stack:1: WARNING: py:class reference target not found: AsyncExitStack
112 # would be added as: "litestar.testing.AsyncTestClient.exit_stack": {"AsyncExitStack"},
113 ignore_missing_refs = {
114 # No idea what autodoc is doing here. Possibly unfixable on our end
115 "litestar.template.base.TemplateEngineProtocol.get_template": {"litestar.template.base.T_co"},
116 "litestar.template": {"litestar.template.base.T_co"},
117 "litestar.openapi.OpenAPIController.security": {"SecurityRequirement"},
118 "litestar.contrib.sqlalchemy_1.plugin.SQLAlchemyPlugin.handle_string_type": {"BINARY", "VARBINARY", "LargeBinary"},
119 "litestar.contrib.sqlalchemy_1.plugin.SQLAlchemyPlugin.is_plugin_supported_type": {"DeclarativeMeta"},
120 re.compile(r"litestar\.plugins.*"): re.compile(".*(ModelT|DataContainerT)"),
121 re.compile(r"litestar\.contrib\.sqlalchemy\.init_plugin\.config.*"): re.compile(
122 ".*(ConnectionT|EngineT|SessionT|SessionMakerT)"
123 ),
124 }
125
126
127 auto_pytabs_min_version = (3, 8)
128 auto_pytabs_max_version = (3, 11)
129 auto_pytabs_compat_mode = True
130
131 autosectionlabel_prefix_document = True
132
133 suppress_warnings = [
134 "autosectionlabel.*",
135 "ref.python", # TODO: remove when https://github.com/sphinx-doc/sphinx/issues/4961 is fixed
136 ]
137
138 html_theme = "litestar_sphinx_theme"
139 html_static_path = ["_static"]
140 html_js_files = ["versioning.js"]
141 html_css_files = ["style.css"]
142 html_show_sourcelink = False
143 html_title = "Litestar Framework"
144
145
146 html_theme_options = {
147 "use_page_nav": False,
148 "github_repo_name": "litestar",
149 "logo": {
150 "link": "https://litestar.dev",
151 },
152 "extra_navbar_items": {
153 "Documentation": "index",
154 "Community": {
155 "Contribution Guide": "contribution-guide",
156 "Code of Conduct": "https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md",
157 },
158 "About": {
159 "Organization": "https://litestar.dev/about/organization",
160 "Releases": "https://litestar.dev/about/litestar-releases",
161 },
162 "Release notes": {
163 "2.0 migration guide": "release-notes/migration_guide_2",
164 "2.x Changelog": "https://docs.litestar.dev/2/release-notes/changelog.html",
165 "1.x Changelog": "https://docs.litestar.dev/1/release-notes/changelog.html",
166 },
167 },
168 }
169
170
171 def update_html_context(
172 app: Sphinx, pagename: str, templatename: str, context: dict[str, Any], doctree: document
173 ) -> None:
174 context["generate_toctree_html"] = partial(context["generate_toctree_html"], startdepth=0)
175
176
177 def setup(app: Sphinx) -> dict[str, bool]:
178 app.setup_extension("litestar_sphinx_theme")
179 app.setup_extension("pydata_sphinx_theme")
180 app.connect("html-page-context", update_html_context)
181
182 return {"parallel_read_safe": True, "parallel_write_safe": True}
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -152,18 +152,35 @@
"extra_navbar_items": {
"Documentation": "index",
"Community": {
- "Contribution Guide": "contribution-guide",
- "Code of Conduct": "https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md",
+ "Contributing": {
+ "description": "Learn how to contribute to the Litestar project",
+ "link": "https://docs.litestar.dev/2/contribution-guide.html",
+ "icon": "contributing",
+ },
+ "Code of Conduct": {
+ "description": "Review the etiquette for interacting with the Litestar community",
+ "link": "https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md",
+ "icon": "coc",
+ },
},
"About": {
- "Organization": "https://litestar.dev/about/organization",
- "Releases": "https://litestar.dev/about/litestar-releases",
+ "Litestar Organization": {
+ "description": "Details about the Litestar organization",
+ "link": "about/organization",
+ "icon": "org",
+ },
+ "Releases": {
+ "description": "Details about the Litestar release process",
+ "link": "about/litestar-releases",
+ "icon": "releases",
+ },
},
"Release notes": {
"2.0 migration guide": "release-notes/migration_guide_2",
"2.x Changelog": "https://docs.litestar.dev/2/release-notes/changelog.html",
"1.x Changelog": "https://docs.litestar.dev/1/release-notes/changelog.html",
},
+ "Help": "https://github.com/orgs/litestar-org/discussions",
},
}
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -152,18 +152,35 @@\n \"extra_navbar_items\": {\n \"Documentation\": \"index\",\n \"Community\": {\n- \"Contribution Guide\": \"contribution-guide\",\n- \"Code of Conduct\": \"https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md\",\n+ \"Contributing\": {\n+ \"description\": \"Learn how to contribute to the Litestar project\",\n+ \"link\": \"https://docs.litestar.dev/2/contribution-guide.html\",\n+ \"icon\": \"contributing\",\n+ },\n+ \"Code of Conduct\": {\n+ \"description\": \"Review the etiquette for interacting with the Litestar community\",\n+ \"link\": \"https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md\",\n+ \"icon\": \"coc\",\n+ },\n },\n \"About\": {\n- \"Organization\": \"https://litestar.dev/about/organization\",\n- \"Releases\": \"https://litestar.dev/about/litestar-releases\",\n+ \"Litestar Organization\": {\n+ \"description\": \"Details about the Litestar organization\",\n+ \"link\": \"about/organization\",\n+ \"icon\": \"org\",\n+ },\n+ \"Releases\": {\n+ \"description\": \"Details about the Litestar release process\",\n+ \"link\": \"about/litestar-releases\",\n+ \"icon\": \"releases\",\n+ },\n },\n \"Release notes\": {\n \"2.0 migration guide\": \"release-notes/migration_guide_2\",\n \"2.x Changelog\": \"https://docs.litestar.dev/2/release-notes/changelog.html\",\n \"1.x Changelog\": \"https://docs.litestar.dev/1/release-notes/changelog.html\",\n },\n+ \"Help\": \"https://github.com/orgs/litestar-org/discussions\",\n },\n }\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "import importlib.metadata\nimport os\nimport re\nfrom functools import partial\nfrom typing import Any\n\nfrom sphinx.addnodes import document\nfrom sphinx.application import Sphinx\n\n__all__ = [\"setup\", \"update_html_context\"]\n\n\nproject = \"Litestar\"\ncopyright = \"2023, Litestar-Org\"\nauthor = \"Litestar-Org\"\nrelease = os.getenv(\"_LITESTAR_DOCS_BUILD_VERSION\", importlib.metadata.version(\"litestar\").rsplit(\".\")[0])\n\nextensions = [\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_design\",\n \"auto_pytabs.sphinx_ext\",\n \"tools.sphinx_ext\",\n \"sphinx_copybutton\",\n \"sphinxcontrib.mermaid\",\n]\n\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"msgspec\": (\"https://jcristharif.com/msgspec/\", None),\n \"anyio\": (\"https://anyio.readthedocs.io/en/stable/\", None),\n \"multidict\": (\"https://multidict.aio-libs.org/en/stable/\", None),\n \"sqlalchemy\": (\"https://docs.sqlalchemy.org/en/20/\", None),\n \"click\": (\"https://click.palletsprojects.com/en/8.1.x/\", None),\n \"redis\": (\"https://redis-py.readthedocs.io/en/stable/\", None),\n \"picologging\": (\"https://microsoft.github.io/picologging\", None),\n \"structlog\": (\"https://www.structlog.org/en/stable/\", None),\n \"tortoise\": (\"https://tortoise.github.io/\", None),\n \"piccolo\": (\"https://piccolo-orm.readthedocs.io/en/latest\", None),\n \"opentelemetry\": (\"https://opentelemetry-python.readthedocs.io/en/latest/\", None),\n}\n\n\nnapoleon_google_docstring = True\nnapoleon_include_special_with_doc = True\nnapoleon_use_admonition_for_examples = True\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = False\nnapoleon_attr_annotations = True\n\nautoclass_content = \"class\"\nautodoc_class_signature = \"separated\"\nautodoc_default_options = {\"special-members\": \"__init__\", \"show-inheritance\": True, \"members\": True}\nautodoc_member_order = \"bysource\"\nautodoc_typehints_format = \"short\"\n\n\nnitpicky = True\nnitpick_ignore = [\n # external library / undocumented external\n (\"py:class\", \"BaseModel\"),\n (\"py:class\", \"pydantic.main.BaseModel\"),\n (\"py:class\", \"pydantic.generics.GenericModel\"),\n (\"py:class\", \"redis.asyncio.Redis\"),\n (\"py:class\", \"sqlalchemy.orm.decl_api.DeclarativeMeta\"),\n (\"py:class\", \"sqlalchemy.sql.sqltypes.TupleType\"),\n (\"py:class\", \"sqlalchemy.dialects.postgresql.named_types.ENUM\"),\n # type vars and aliases / intentionally undocumented\n (\"py:class\", \"RouteHandlerType\"),\n (\"py:obj\", \"litestar.security.base.AuthType\"),\n (\"py:class\", \"ControllerRouterHandler\"),\n (\"py:class\", \"PathParameterDefinition\"),\n (\"py:class\", \"BaseSessionBackendT\"),\n (\"py:class\", \"AnyIOBackend\"),\n (\"py:class\", \"T\"),\n (\"py:class\", \"C\"),\n (\"py:class\", \"EmptyType\"),\n # intentionally undocumented\n (\"py:class\", \"NoneType\"),\n (\"py:class\", \"litestar._signature.field.SignatureField\"),\n (\"py:class\", \"litestar.utils.signature.ParsedType\"),\n (\"py:class\", \"litestar.utils.signature.ParsedSignature\"),\n (\"py:class\", \"litestar.utils.signature.ParsedParameter\"),\n (\"py:class\", \"litestar.utils.sync.AsyncCallable\"),\n]\nnitpick_ignore_regex = [\n (r\"py:.*\", r\"litestar\\.types.*\"),\n (r\"py:.*\", r\"litestar.*\\.T\"),\n (r\"py:.*\", r\".*R_co\"),\n (r\"py:.*\", r\".*UserType\"),\n (r\"py:.*\", r\"litestar\\.middleware\\.session\\.base\\.BaseSessionBackendT\"),\n (r\"py:obj\", r\"typing\\..*\"),\n (r\"py:.*\", r\"httpx.*\"),\n # type vars\n (\"py:.*\", r\"litestar\\.pagination\\.C\"),\n (\"py:.*\", r\"litestar.middleware.session.base.ConfigT\"),\n (\"py:.*\", r\"multidict\\..*\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.UserT\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.AuthT\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.StateT\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.HandlerT\"),\n]\n\n# Warnings about missing references to those targets in the specified location will be ignored.\n# The source of the references is taken 1:1 from the warnings as reported by Sphinx, e.g\n# **/litestar/testing/client/async_client.py:docstring of litestar.testing.AsyncTestClient.exit_stack:1: WARNING: py:class reference target not found: AsyncExitStack\n# would be added as: \"litestar.testing.AsyncTestClient.exit_stack\": {\"AsyncExitStack\"},\nignore_missing_refs = {\n # No idea what autodoc is doing here. Possibly unfixable on our end\n \"litestar.template.base.TemplateEngineProtocol.get_template\": {\"litestar.template.base.T_co\"},\n \"litestar.template\": {\"litestar.template.base.T_co\"},\n \"litestar.openapi.OpenAPIController.security\": {\"SecurityRequirement\"},\n \"litestar.contrib.sqlalchemy_1.plugin.SQLAlchemyPlugin.handle_string_type\": {\"BINARY\", \"VARBINARY\", \"LargeBinary\"},\n \"litestar.contrib.sqlalchemy_1.plugin.SQLAlchemyPlugin.is_plugin_supported_type\": {\"DeclarativeMeta\"},\n re.compile(r\"litestar\\.plugins.*\"): re.compile(\".*(ModelT|DataContainerT)\"),\n re.compile(r\"litestar\\.contrib\\.sqlalchemy\\.init_plugin\\.config.*\"): re.compile(\n \".*(ConnectionT|EngineT|SessionT|SessionMakerT)\"\n ),\n}\n\n\nauto_pytabs_min_version = (3, 8)\nauto_pytabs_max_version = (3, 11)\nauto_pytabs_compat_mode = True\n\nautosectionlabel_prefix_document = True\n\nsuppress_warnings = [\n \"autosectionlabel.*\",\n \"ref.python\", # TODO: remove when https://github.com/sphinx-doc/sphinx/issues/4961 is fixed\n]\n\nhtml_theme = \"litestar_sphinx_theme\"\nhtml_static_path = [\"_static\"]\nhtml_js_files = [\"versioning.js\"]\nhtml_css_files = [\"style.css\"]\nhtml_show_sourcelink = False\nhtml_title = \"Litestar Framework\"\n\n\nhtml_theme_options = {\n \"use_page_nav\": False,\n \"github_repo_name\": \"litestar\",\n \"logo\": {\n \"link\": \"https://litestar.dev\",\n },\n \"extra_navbar_items\": {\n \"Documentation\": \"index\",\n \"Community\": {\n \"Contribution Guide\": \"contribution-guide\",\n \"Code of Conduct\": \"https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md\",\n },\n \"About\": {\n \"Organization\": \"https://litestar.dev/about/organization\",\n \"Releases\": \"https://litestar.dev/about/litestar-releases\",\n },\n \"Release notes\": {\n \"2.0 migration guide\": \"release-notes/migration_guide_2\",\n \"2.x Changelog\": \"https://docs.litestar.dev/2/release-notes/changelog.html\",\n \"1.x Changelog\": \"https://docs.litestar.dev/1/release-notes/changelog.html\",\n },\n },\n}\n\n\ndef update_html_context(\n app: Sphinx, pagename: str, templatename: str, context: dict[str, Any], doctree: document\n) -> None:\n context[\"generate_toctree_html\"] = partial(context[\"generate_toctree_html\"], startdepth=0)\n\n\ndef setup(app: Sphinx) -> dict[str, bool]:\n app.setup_extension(\"litestar_sphinx_theme\")\n app.setup_extension(\"pydata_sphinx_theme\")\n app.connect(\"html-page-context\", update_html_context)\n\n return {\"parallel_read_safe\": True, \"parallel_write_safe\": True}\n", "path": "docs/conf.py"}], "after_files": [{"content": "import importlib.metadata\nimport os\nimport re\nfrom functools import partial\nfrom typing import Any\n\nfrom sphinx.addnodes import document\nfrom sphinx.application import Sphinx\n\n__all__ = [\"setup\", \"update_html_context\"]\n\n\nproject = \"Litestar\"\ncopyright = \"2023, Litestar-Org\"\nauthor = \"Litestar-Org\"\nrelease = os.getenv(\"_LITESTAR_DOCS_BUILD_VERSION\", importlib.metadata.version(\"litestar\").rsplit(\".\")[0])\n\nextensions = [\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_design\",\n \"auto_pytabs.sphinx_ext\",\n \"tools.sphinx_ext\",\n \"sphinx_copybutton\",\n \"sphinxcontrib.mermaid\",\n]\n\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"msgspec\": (\"https://jcristharif.com/msgspec/\", None),\n \"anyio\": (\"https://anyio.readthedocs.io/en/stable/\", None),\n \"multidict\": (\"https://multidict.aio-libs.org/en/stable/\", None),\n \"sqlalchemy\": (\"https://docs.sqlalchemy.org/en/20/\", None),\n \"click\": (\"https://click.palletsprojects.com/en/8.1.x/\", None),\n \"redis\": (\"https://redis-py.readthedocs.io/en/stable/\", None),\n \"picologging\": (\"https://microsoft.github.io/picologging\", None),\n \"structlog\": (\"https://www.structlog.org/en/stable/\", None),\n \"tortoise\": (\"https://tortoise.github.io/\", None),\n \"piccolo\": (\"https://piccolo-orm.readthedocs.io/en/latest\", None),\n \"opentelemetry\": (\"https://opentelemetry-python.readthedocs.io/en/latest/\", None),\n}\n\n\nnapoleon_google_docstring = True\nnapoleon_include_special_with_doc = True\nnapoleon_use_admonition_for_examples = True\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = False\nnapoleon_attr_annotations = True\n\nautoclass_content = \"class\"\nautodoc_class_signature = \"separated\"\nautodoc_default_options = {\"special-members\": \"__init__\", \"show-inheritance\": True, \"members\": True}\nautodoc_member_order = \"bysource\"\nautodoc_typehints_format = \"short\"\n\n\nnitpicky = True\nnitpick_ignore = [\n # external library / undocumented external\n (\"py:class\", \"BaseModel\"),\n (\"py:class\", \"pydantic.main.BaseModel\"),\n (\"py:class\", \"pydantic.generics.GenericModel\"),\n (\"py:class\", \"redis.asyncio.Redis\"),\n (\"py:class\", \"sqlalchemy.orm.decl_api.DeclarativeMeta\"),\n (\"py:class\", \"sqlalchemy.sql.sqltypes.TupleType\"),\n (\"py:class\", \"sqlalchemy.dialects.postgresql.named_types.ENUM\"),\n # type vars and aliases / intentionally undocumented\n (\"py:class\", \"RouteHandlerType\"),\n (\"py:obj\", \"litestar.security.base.AuthType\"),\n (\"py:class\", \"ControllerRouterHandler\"),\n (\"py:class\", \"PathParameterDefinition\"),\n (\"py:class\", \"BaseSessionBackendT\"),\n (\"py:class\", \"AnyIOBackend\"),\n (\"py:class\", \"T\"),\n (\"py:class\", \"C\"),\n (\"py:class\", \"EmptyType\"),\n # intentionally undocumented\n (\"py:class\", \"NoneType\"),\n (\"py:class\", \"litestar._signature.field.SignatureField\"),\n (\"py:class\", \"litestar.utils.signature.ParsedType\"),\n (\"py:class\", \"litestar.utils.signature.ParsedSignature\"),\n (\"py:class\", \"litestar.utils.signature.ParsedParameter\"),\n (\"py:class\", \"litestar.utils.sync.AsyncCallable\"),\n]\nnitpick_ignore_regex = [\n (r\"py:.*\", r\"litestar\\.types.*\"),\n (r\"py:.*\", r\"litestar.*\\.T\"),\n (r\"py:.*\", r\".*R_co\"),\n (r\"py:.*\", r\".*UserType\"),\n (r\"py:.*\", r\"litestar\\.middleware\\.session\\.base\\.BaseSessionBackendT\"),\n (r\"py:obj\", r\"typing\\..*\"),\n (r\"py:.*\", r\"httpx.*\"),\n # type vars\n (\"py:.*\", r\"litestar\\.pagination\\.C\"),\n (\"py:.*\", r\"litestar.middleware.session.base.ConfigT\"),\n (\"py:.*\", r\"multidict\\..*\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.UserT\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.AuthT\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.StateT\"),\n (r\"py:.*\", r\"litestar\\.connection\\.base\\.HandlerT\"),\n]\n\n# Warnings about missing references to those targets in the specified location will be ignored.\n# The source of the references is taken 1:1 from the warnings as reported by Sphinx, e.g\n# **/litestar/testing/client/async_client.py:docstring of litestar.testing.AsyncTestClient.exit_stack:1: WARNING: py:class reference target not found: AsyncExitStack\n# would be added as: \"litestar.testing.AsyncTestClient.exit_stack\": {\"AsyncExitStack\"},\nignore_missing_refs = {\n # No idea what autodoc is doing here. Possibly unfixable on our end\n \"litestar.template.base.TemplateEngineProtocol.get_template\": {\"litestar.template.base.T_co\"},\n \"litestar.template\": {\"litestar.template.base.T_co\"},\n \"litestar.openapi.OpenAPIController.security\": {\"SecurityRequirement\"},\n \"litestar.contrib.sqlalchemy_1.plugin.SQLAlchemyPlugin.handle_string_type\": {\"BINARY\", \"VARBINARY\", \"LargeBinary\"},\n \"litestar.contrib.sqlalchemy_1.plugin.SQLAlchemyPlugin.is_plugin_supported_type\": {\"DeclarativeMeta\"},\n re.compile(r\"litestar\\.plugins.*\"): re.compile(\".*(ModelT|DataContainerT)\"),\n re.compile(r\"litestar\\.contrib\\.sqlalchemy\\.init_plugin\\.config.*\"): re.compile(\n \".*(ConnectionT|EngineT|SessionT|SessionMakerT)\"\n ),\n}\n\n\nauto_pytabs_min_version = (3, 8)\nauto_pytabs_max_version = (3, 11)\nauto_pytabs_compat_mode = True\n\nautosectionlabel_prefix_document = True\n\nsuppress_warnings = [\n \"autosectionlabel.*\",\n \"ref.python\", # TODO: remove when https://github.com/sphinx-doc/sphinx/issues/4961 is fixed\n]\n\nhtml_theme = \"litestar_sphinx_theme\"\nhtml_static_path = [\"_static\"]\nhtml_js_files = [\"versioning.js\"]\nhtml_css_files = [\"style.css\"]\nhtml_show_sourcelink = False\nhtml_title = \"Litestar Framework\"\n\n\nhtml_theme_options = {\n \"use_page_nav\": False,\n \"github_repo_name\": \"litestar\",\n \"logo\": {\n \"link\": \"https://litestar.dev\",\n },\n \"extra_navbar_items\": {\n \"Documentation\": \"index\",\n \"Community\": {\n \"Contributing\": {\n \"description\": \"Learn how to contribute to the Litestar project\",\n \"link\": \"https://docs.litestar.dev/2/contribution-guide.html\",\n \"icon\": \"contributing\",\n },\n \"Code of Conduct\": {\n \"description\": \"Review the etiquette for interacting with the Litestar community\",\n \"link\": \"https://github.com/litestar-org/.github/blob/main/CODE_OF_CONDUCT.md\",\n \"icon\": \"coc\",\n },\n },\n \"About\": {\n \"Litestar Organization\": {\n \"description\": \"Details about the Litestar organization\",\n \"link\": \"about/organization\",\n \"icon\": \"org\",\n },\n \"Releases\": {\n \"description\": \"Details about the Litestar release process\",\n \"link\": \"about/litestar-releases\",\n \"icon\": \"releases\",\n },\n },\n \"Release notes\": {\n \"2.0 migration guide\": \"release-notes/migration_guide_2\",\n \"2.x Changelog\": \"https://docs.litestar.dev/2/release-notes/changelog.html\",\n \"1.x Changelog\": \"https://docs.litestar.dev/1/release-notes/changelog.html\",\n },\n \"Help\": \"https://github.com/orgs/litestar-org/discussions\",\n },\n}\n\n\ndef update_html_context(\n app: Sphinx, pagename: str, templatename: str, context: dict[str, Any], doctree: document\n) -> None:\n context[\"generate_toctree_html\"] = partial(context[\"generate_toctree_html\"], startdepth=0)\n\n\ndef setup(app: Sphinx) -> dict[str, bool]:\n app.setup_extension(\"litestar_sphinx_theme\")\n app.setup_extension(\"pydata_sphinx_theme\")\n app.connect(\"html-page-context\", update_html_context)\n\n return {\"parallel_read_safe\": True, \"parallel_write_safe\": True}\n", "path": "docs/conf.py"}]}
| 2,770 | 439 |
gh_patches_debug_1477
|
rasdani/github-patches
|
git_diff
|
ibis-project__ibis-5647
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug(postgres): cannot connect to postgres table with `tsvector` column
### What happened?
Reposting from https://stackoverflow.com/questions/74520302/why-cant-i-connect-ibis-to-a-postgres-table-with-a-tsvector-column
Implementing whatever postgres functionality exists around tsvector may not be something we want to do, but I think we should at least allow connecting to the table.
### What version of ibis are you using?
3.2.0
### What backend(s) are you using, if any?
postgres
### Relevant log output
```sh
KeyError Traceback (most recent call last)
File ~/anaconda3/envs/ec_extract/lib/python3.9/site-packages/multipledispatch/dispatcher.py:269, in Dispatcher.__call__(self, *args, **kwargs)
268 try:
--> 269 func = self._cache[types]
270 except KeyError:
KeyError: (<class 'sqlalchemy.dialects.postgresql.psycopg2.PGDialect_psycopg2'>, <class 'sqlalchemy.dialects.postgresql.base.TSVECTOR'>)
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
***
----> 29 main_table = con.table(table_name)[columns['column_list']]
File ~/anaconda3/envs/ec_extract/lib/python3.9/site-packages/ibis/backends/base/sql/alchemy/__init__.py:438, in BaseAlchemyBackend.table(self, name, database, schema)
428 return self.database(database=database).table(
429 name=name,
430 database=database,
431 schema=schema,
432 )
433 sqla_table = self._get_sqla_table(
434 name,
...
275 (self.name, str_signature(types)))
276 self._cache[types] = func
277 try:
NotImplementedError: Could not find signature for dtype: <PGDialect_psycopg2, TSVECTOR>
```
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/postgres/datatypes.py`
Content:
```
1 from __future__ import annotations
2
3 import parsy
4 import sqlalchemy as sa
5 import toolz
6 from sqlalchemy.dialects import postgresql
7 from sqlalchemy.dialects.postgresql.base import PGDialect
8
9 import ibis.expr.datatypes as dt
10 from ibis.backends.base.sql.alchemy import to_sqla_type
11 from ibis.common.parsing import (
12 COMMA,
13 LBRACKET,
14 LPAREN,
15 PRECISION,
16 RBRACKET,
17 RPAREN,
18 SCALE,
19 spaceless,
20 spaceless_string,
21 )
22
23 _BRACKETS = "[]"
24
25
26 def _parse_numeric(
27 text: str, default_decimal_parameters: tuple[int | None, int | None] = (None, None)
28 ) -> dt.DataType:
29 decimal = spaceless_string("decimal", "numeric").then(
30 parsy.seq(LPAREN.then(PRECISION.skip(COMMA)), SCALE.skip(RPAREN))
31 .optional(default_decimal_parameters)
32 .combine(dt.Decimal)
33 )
34
35 brackets = spaceless(LBRACKET).then(spaceless(RBRACKET))
36
37 pg_array = parsy.seq(decimal, brackets.at_least(1).map(len)).combine(
38 lambda value_type, n: toolz.nth(n, toolz.iterate(dt.Array, value_type))
39 )
40
41 ty = pg_array | decimal
42 return ty.parse(text)
43
44
45 def _get_type(typestr: str) -> dt.DataType:
46 is_array = typestr.endswith(_BRACKETS)
47 if (typ := _type_mapping.get(typestr.replace(_BRACKETS, ""))) is not None:
48 return dt.Array(typ) if is_array else typ
49 return _parse_numeric(typestr)
50
51
52 _type_mapping = {
53 "bigint": dt.int64,
54 "boolean": dt.bool,
55 "bytea": dt.binary,
56 "character varying": dt.string,
57 "character": dt.string,
58 "character(1)": dt.string,
59 "date": dt.date,
60 "double precision": dt.float64,
61 "geography": dt.geography,
62 "geometry": dt.geometry,
63 "inet": dt.inet,
64 "integer": dt.int32,
65 "interval": dt.interval,
66 "json": dt.json,
67 "jsonb": dt.json,
68 "line": dt.linestring,
69 "macaddr": dt.macaddr,
70 "macaddr8": dt.macaddr,
71 "numeric": dt.decimal,
72 "point": dt.point,
73 "polygon": dt.polygon,
74 "real": dt.float32,
75 "smallint": dt.int16,
76 "text": dt.string,
77 # NB: this isn't correct because we're losing the "with time zone"
78 # information (ibis doesn't have time type that is time-zone aware), but we
79 # try to do _something_ here instead of failing
80 "time with time zone": dt.time,
81 "time without time zone": dt.time,
82 "timestamp with time zone": dt.Timestamp("UTC"),
83 "timestamp without time zone": dt.timestamp,
84 "uuid": dt.uuid,
85 }
86
87
88 @to_sqla_type.register(PGDialect, dt.Array)
89 def _pg_array(dialect, itype):
90 # Unwrap the array element type because sqlalchemy doesn't allow arrays of
91 # arrays. This doesn't affect the underlying data.
92 while itype.is_array():
93 itype = itype.value_type
94 return sa.ARRAY(to_sqla_type(dialect, itype))
95
96
97 @to_sqla_type.register(PGDialect, dt.Map)
98 def _pg_map(dialect, itype):
99 if not (itype.key_type.is_string() and itype.value_type.is_string()):
100 raise TypeError(f"PostgreSQL only supports map<string, string>, got: {itype}")
101 return postgresql.HSTORE()
102
103
104 @dt.dtype.register(PGDialect, postgresql.DOUBLE_PRECISION)
105 def sa_double(_, satype, nullable=True):
106 return dt.Float64(nullable=nullable)
107
108
109 @dt.dtype.register(PGDialect, postgresql.UUID)
110 def sa_uuid(_, satype, nullable=True):
111 return dt.UUID(nullable=nullable)
112
113
114 @dt.dtype.register(PGDialect, postgresql.MACADDR)
115 def sa_macaddr(_, satype, nullable=True):
116 return dt.MACADDR(nullable=nullable)
117
118
119 @dt.dtype.register(PGDialect, postgresql.HSTORE)
120 def sa_hstore(_, satype, nullable=True):
121 return dt.Map(dt.string, dt.string, nullable=nullable)
122
123
124 @dt.dtype.register(PGDialect, postgresql.INET)
125 def sa_inet(_, satype, nullable=True):
126 return dt.INET(nullable=nullable)
127
128
129 @dt.dtype.register(PGDialect, postgresql.JSONB)
130 def sa_json(_, satype, nullable=True):
131 return dt.JSON(nullable=nullable)
132
133
134 _POSTGRES_FIELD_TO_IBIS_UNIT = {
135 "YEAR": "Y",
136 "MONTH": "M",
137 "DAY": "D",
138 "HOUR": "h",
139 "MINUTE": "m",
140 "SECOND": "s",
141 "YEAR TO MONTH": "M",
142 "DAY TO HOUR": "h",
143 "DAY TO MINUTE": "m",
144 "DAY TO SECOND": "s",
145 "HOUR TO MINUTE": "m",
146 "HOUR TO SECOND": "s",
147 "MINUTE TO SECOND": "s",
148 }
149
150
151 @dt.dtype.register(PGDialect, postgresql.INTERVAL)
152 def sa_postgres_interval(_, satype, nullable=True):
153 field = satype.fields.upper()
154 if (unit := _POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)) is None:
155 raise ValueError(f"Unknown PostgreSQL interval field {field!r}")
156 elif unit in {"Y", "M"}:
157 raise ValueError(
158 "Variable length intervals are not yet supported with PostgreSQL"
159 )
160 return dt.Interval(unit=unit, nullable=nullable)
161
162
163 @dt.dtype.register(PGDialect, sa.ARRAY)
164 def sa_pg_array(dialect, satype, nullable=True):
165 dimensions = satype.dimensions
166 if dimensions is not None and dimensions != 1:
167 raise NotImplementedError(
168 f"Nested array types not yet supported for {dialect.name} dialect"
169 )
170
171 value_dtype = dt.dtype(dialect, satype.item_type)
172 return dt.Array(value_dtype, nullable=nullable)
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ibis/backends/postgres/datatypes.py b/ibis/backends/postgres/datatypes.py
--- a/ibis/backends/postgres/datatypes.py
+++ b/ibis/backends/postgres/datatypes.py
@@ -170,3 +170,8 @@
value_dtype = dt.dtype(dialect, satype.item_type)
return dt.Array(value_dtype, nullable=nullable)
+
+
[email protected](PGDialect, postgresql.TSVECTOR)
+def sa_postgres_tsvector(_, satype, nullable=True):
+ return dt.String(nullable=nullable)
|
{"golden_diff": "diff --git a/ibis/backends/postgres/datatypes.py b/ibis/backends/postgres/datatypes.py\n--- a/ibis/backends/postgres/datatypes.py\n+++ b/ibis/backends/postgres/datatypes.py\n@@ -170,3 +170,8 @@\n \n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n+\n+\[email protected](PGDialect, postgresql.TSVECTOR)\n+def sa_postgres_tsvector(_, satype, nullable=True):\n+ return dt.String(nullable=nullable)\n", "issue": "bug(postgres): cannot connect to postgres table with `tsvector` column\n### What happened?\n\nReposting from https://stackoverflow.com/questions/74520302/why-cant-i-connect-ibis-to-a-postgres-table-with-a-tsvector-column\r\n\r\nImplementing whatever postgres functionality exists around tsvector may not be something we want to do, but I think we should at least allow connecting to the table.\n\n### What version of ibis are you using?\n\n3.2.0\n\n### What backend(s) are you using, if any?\n\npostgres\n\n### Relevant log output\n\n```sh\nKeyError Traceback (most recent call last)\r\nFile ~/anaconda3/envs/ec_extract/lib/python3.9/site-packages/multipledispatch/dispatcher.py:269, in Dispatcher.__call__(self, *args, **kwargs)\r\n 268 try:\r\n--> 269 func = self._cache[types]\r\n 270 except KeyError:\r\n\r\nKeyError: (<class 'sqlalchemy.dialects.postgresql.psycopg2.PGDialect_psycopg2'>, <class 'sqlalchemy.dialects.postgresql.base.TSVECTOR'>)\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nNotImplementedError Traceback (most recent call last)\r\n*** \r\n----> 29 main_table = con.table(table_name)[columns['column_list']]\r\n\r\nFile ~/anaconda3/envs/ec_extract/lib/python3.9/site-packages/ibis/backends/base/sql/alchemy/__init__.py:438, in BaseAlchemyBackend.table(self, name, database, schema)\r\n 428 return self.database(database=database).table(\r\n 429 name=name,\r\n 430 database=database,\r\n 431 schema=schema,\r\n 432 )\r\n 433 sqla_table = self._get_sqla_table(\r\n 434 name,\r\n...\r\n 275 (self.name, str_signature(types)))\r\n 276 self._cache[types] = func\r\n 277 try:\r\n\r\nNotImplementedError: Could not find signature for dtype: <PGDialect_psycopg2, TSVECTOR>\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nimport parsy\nimport sqlalchemy as sa\nimport toolz\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\n\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy import to_sqla_type\nfrom ibis.common.parsing import (\n COMMA,\n LBRACKET,\n LPAREN,\n PRECISION,\n RBRACKET,\n RPAREN,\n SCALE,\n spaceless,\n spaceless_string,\n)\n\n_BRACKETS = \"[]\"\n\n\ndef _parse_numeric(\n text: str, default_decimal_parameters: tuple[int | None, int | None] = (None, None)\n) -> dt.DataType:\n decimal = spaceless_string(\"decimal\", \"numeric\").then(\n parsy.seq(LPAREN.then(PRECISION.skip(COMMA)), SCALE.skip(RPAREN))\n .optional(default_decimal_parameters)\n .combine(dt.Decimal)\n )\n\n brackets = spaceless(LBRACKET).then(spaceless(RBRACKET))\n\n pg_array = parsy.seq(decimal, brackets.at_least(1).map(len)).combine(\n lambda value_type, n: toolz.nth(n, toolz.iterate(dt.Array, value_type))\n )\n\n ty = pg_array | decimal\n return ty.parse(text)\n\n\ndef _get_type(typestr: str) -> dt.DataType:\n is_array = typestr.endswith(_BRACKETS)\n if (typ := _type_mapping.get(typestr.replace(_BRACKETS, \"\"))) is not None:\n return dt.Array(typ) if is_array else typ\n return _parse_numeric(typestr)\n\n\n_type_mapping = {\n \"bigint\": dt.int64,\n \"boolean\": dt.bool,\n \"bytea\": dt.binary,\n \"character varying\": dt.string,\n \"character\": dt.string,\n \"character(1)\": dt.string,\n \"date\": dt.date,\n \"double precision\": dt.float64,\n \"geography\": dt.geography,\n \"geometry\": dt.geometry,\n \"inet\": dt.inet,\n \"integer\": dt.int32,\n \"interval\": dt.interval,\n \"json\": dt.json,\n \"jsonb\": dt.json,\n \"line\": dt.linestring,\n \"macaddr\": dt.macaddr,\n \"macaddr8\": dt.macaddr,\n \"numeric\": dt.decimal,\n \"point\": dt.point,\n \"polygon\": dt.polygon,\n \"real\": dt.float32,\n \"smallint\": dt.int16,\n \"text\": dt.string,\n # NB: this isn't correct because we're losing the \"with time zone\"\n # information (ibis doesn't have time type that is time-zone aware), but we\n # try to do _something_ here instead of failing\n \"time with time zone\": dt.time,\n \"time without time zone\": dt.time,\n \"timestamp with time zone\": dt.Timestamp(\"UTC\"),\n \"timestamp without time zone\": dt.timestamp,\n \"uuid\": dt.uuid,\n}\n\n\n@to_sqla_type.register(PGDialect, dt.Array)\ndef _pg_array(dialect, itype):\n # Unwrap the array element type because sqlalchemy doesn't allow arrays of\n # arrays. This doesn't affect the underlying data.\n while itype.is_array():\n itype = itype.value_type\n return sa.ARRAY(to_sqla_type(dialect, itype))\n\n\n@to_sqla_type.register(PGDialect, dt.Map)\ndef _pg_map(dialect, itype):\n if not (itype.key_type.is_string() and itype.value_type.is_string()):\n raise TypeError(f\"PostgreSQL only supports map<string, string>, got: {itype}\")\n return postgresql.HSTORE()\n\n\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.HSTORE)\ndef sa_hstore(_, satype, nullable=True):\n return dt.Map(dt.string, dt.string, nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\n_POSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n if (unit := _POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)) is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length intervals are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](PGDialect, sa.ARRAY)\ndef sa_pg_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError(\n f\"Nested array types not yet supported for {dialect.name} dialect\"\n )\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n", "path": "ibis/backends/postgres/datatypes.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport parsy\nimport sqlalchemy as sa\nimport toolz\nfrom sqlalchemy.dialects import postgresql\nfrom sqlalchemy.dialects.postgresql.base import PGDialect\n\nimport ibis.expr.datatypes as dt\nfrom ibis.backends.base.sql.alchemy import to_sqla_type\nfrom ibis.common.parsing import (\n COMMA,\n LBRACKET,\n LPAREN,\n PRECISION,\n RBRACKET,\n RPAREN,\n SCALE,\n spaceless,\n spaceless_string,\n)\n\n_BRACKETS = \"[]\"\n\n\ndef _parse_numeric(\n text: str, default_decimal_parameters: tuple[int | None, int | None] = (None, None)\n) -> dt.DataType:\n decimal = spaceless_string(\"decimal\", \"numeric\").then(\n parsy.seq(LPAREN.then(PRECISION.skip(COMMA)), SCALE.skip(RPAREN))\n .optional(default_decimal_parameters)\n .combine(dt.Decimal)\n )\n\n brackets = spaceless(LBRACKET).then(spaceless(RBRACKET))\n\n pg_array = parsy.seq(decimal, brackets.at_least(1).map(len)).combine(\n lambda value_type, n: toolz.nth(n, toolz.iterate(dt.Array, value_type))\n )\n\n ty = pg_array | decimal\n return ty.parse(text)\n\n\ndef _get_type(typestr: str) -> dt.DataType:\n is_array = typestr.endswith(_BRACKETS)\n if (typ := _type_mapping.get(typestr.replace(_BRACKETS, \"\"))) is not None:\n return dt.Array(typ) if is_array else typ\n return _parse_numeric(typestr)\n\n\n_type_mapping = {\n \"bigint\": dt.int64,\n \"boolean\": dt.bool,\n \"bytea\": dt.binary,\n \"character varying\": dt.string,\n \"character\": dt.string,\n \"character(1)\": dt.string,\n \"date\": dt.date,\n \"double precision\": dt.float64,\n \"geography\": dt.geography,\n \"geometry\": dt.geometry,\n \"inet\": dt.inet,\n \"integer\": dt.int32,\n \"interval\": dt.interval,\n \"json\": dt.json,\n \"jsonb\": dt.json,\n \"line\": dt.linestring,\n \"macaddr\": dt.macaddr,\n \"macaddr8\": dt.macaddr,\n \"numeric\": dt.decimal,\n \"point\": dt.point,\n \"polygon\": dt.polygon,\n \"real\": dt.float32,\n \"smallint\": dt.int16,\n \"text\": dt.string,\n # NB: this isn't correct because we're losing the \"with time zone\"\n # information (ibis doesn't have time type that is time-zone aware), but we\n # try to do _something_ here instead of failing\n \"time with time zone\": dt.time,\n \"time without time zone\": dt.time,\n \"timestamp with time zone\": dt.Timestamp(\"UTC\"),\n \"timestamp without time zone\": dt.timestamp,\n \"uuid\": dt.uuid,\n}\n\n\n@to_sqla_type.register(PGDialect, dt.Array)\ndef _pg_array(dialect, itype):\n # Unwrap the array element type because sqlalchemy doesn't allow arrays of\n # arrays. This doesn't affect the underlying data.\n while itype.is_array():\n itype = itype.value_type\n return sa.ARRAY(to_sqla_type(dialect, itype))\n\n\n@to_sqla_type.register(PGDialect, dt.Map)\ndef _pg_map(dialect, itype):\n if not (itype.key_type.is_string() and itype.value_type.is_string()):\n raise TypeError(f\"PostgreSQL only supports map<string, string>, got: {itype}\")\n return postgresql.HSTORE()\n\n\[email protected](PGDialect, postgresql.DOUBLE_PRECISION)\ndef sa_double(_, satype, nullable=True):\n return dt.Float64(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.UUID)\ndef sa_uuid(_, satype, nullable=True):\n return dt.UUID(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.MACADDR)\ndef sa_macaddr(_, satype, nullable=True):\n return dt.MACADDR(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.HSTORE)\ndef sa_hstore(_, satype, nullable=True):\n return dt.Map(dt.string, dt.string, nullable=nullable)\n\n\[email protected](PGDialect, postgresql.INET)\ndef sa_inet(_, satype, nullable=True):\n return dt.INET(nullable=nullable)\n\n\[email protected](PGDialect, postgresql.JSONB)\ndef sa_json(_, satype, nullable=True):\n return dt.JSON(nullable=nullable)\n\n\n_POSTGRES_FIELD_TO_IBIS_UNIT = {\n \"YEAR\": \"Y\",\n \"MONTH\": \"M\",\n \"DAY\": \"D\",\n \"HOUR\": \"h\",\n \"MINUTE\": \"m\",\n \"SECOND\": \"s\",\n \"YEAR TO MONTH\": \"M\",\n \"DAY TO HOUR\": \"h\",\n \"DAY TO MINUTE\": \"m\",\n \"DAY TO SECOND\": \"s\",\n \"HOUR TO MINUTE\": \"m\",\n \"HOUR TO SECOND\": \"s\",\n \"MINUTE TO SECOND\": \"s\",\n}\n\n\[email protected](PGDialect, postgresql.INTERVAL)\ndef sa_postgres_interval(_, satype, nullable=True):\n field = satype.fields.upper()\n if (unit := _POSTGRES_FIELD_TO_IBIS_UNIT.get(field, None)) is None:\n raise ValueError(f\"Unknown PostgreSQL interval field {field!r}\")\n elif unit in {\"Y\", \"M\"}:\n raise ValueError(\n \"Variable length intervals are not yet supported with PostgreSQL\"\n )\n return dt.Interval(unit=unit, nullable=nullable)\n\n\[email protected](PGDialect, sa.ARRAY)\ndef sa_pg_array(dialect, satype, nullable=True):\n dimensions = satype.dimensions\n if dimensions is not None and dimensions != 1:\n raise NotImplementedError(\n f\"Nested array types not yet supported for {dialect.name} dialect\"\n )\n\n value_dtype = dt.dtype(dialect, satype.item_type)\n return dt.Array(value_dtype, nullable=nullable)\n\n\[email protected](PGDialect, postgresql.TSVECTOR)\ndef sa_postgres_tsvector(_, satype, nullable=True):\n return dt.String(nullable=nullable)\n", "path": "ibis/backends/postgres/datatypes.py"}]}
| 2,530 | 132 |
gh_patches_debug_6175
|
rasdani/github-patches
|
git_diff
|
google__fuzzbench-148
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[reports] Data.csv.gz don't need to contain id column
It has these columns because data.csv.gz contains data from a join query of snapshots on trials.
time_started and time_ended are from trials but they are probably not useful for the analysis people want to do so they just take up space at this point.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `analysis/queries.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Database queries for acquiring experiment data."""
15
16 import pandas as pd
17 import sqlalchemy
18
19 from database import models
20 from database import utils as db_utils
21
22
23 def get_experiment_data(experiment_names):
24 """Get measurements (such as coverage) on experiments from the database."""
25 snapshots_query = db_utils.query(models.Snapshot).options(
26 sqlalchemy.orm.joinedload('trial')).filter(
27 models.Snapshot.trial.has(
28 models.Trial.experiment.in_(experiment_names)))
29 return pd.read_sql_query(snapshots_query.statement, db_utils.engine)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/analysis/queries.py b/analysis/queries.py
--- a/analysis/queries.py
+++ b/analysis/queries.py
@@ -26,4 +26,8 @@
sqlalchemy.orm.joinedload('trial')).filter(
models.Snapshot.trial.has(
models.Trial.experiment.in_(experiment_names)))
- return pd.read_sql_query(snapshots_query.statement, db_utils.engine)
+
+ # id must be loaded to do the join but get rid of it now since
+ # trial_id provides the same info.
+ data = pd.read_sql_query(snapshots_query.statement, db_utils.engine)
+ return data.drop(columns=['id'])
|
{"golden_diff": "diff --git a/analysis/queries.py b/analysis/queries.py\n--- a/analysis/queries.py\n+++ b/analysis/queries.py\n@@ -26,4 +26,8 @@\n sqlalchemy.orm.joinedload('trial')).filter(\n models.Snapshot.trial.has(\n models.Trial.experiment.in_(experiment_names)))\n- return pd.read_sql_query(snapshots_query.statement, db_utils.engine)\n+\n+ # id must be loaded to do the join but get rid of it now since\n+ # trial_id provides the same info.\n+ data = pd.read_sql_query(snapshots_query.statement, db_utils.engine)\n+ return data.drop(columns=['id'])\n", "issue": "[reports] Data.csv.gz don't need to contain id column\nIt has these columns because data.csv.gz contains data from a join query of snapshots on trials.\r\ntime_started and time_ended are from trials but they are probably not useful for the analysis people want to do so they just take up space at this point.\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Database queries for acquiring experiment data.\"\"\"\n\nimport pandas as pd\nimport sqlalchemy\n\nfrom database import models\nfrom database import utils as db_utils\n\n\ndef get_experiment_data(experiment_names):\n \"\"\"Get measurements (such as coverage) on experiments from the database.\"\"\"\n snapshots_query = db_utils.query(models.Snapshot).options(\n sqlalchemy.orm.joinedload('trial')).filter(\n models.Snapshot.trial.has(\n models.Trial.experiment.in_(experiment_names)))\n return pd.read_sql_query(snapshots_query.statement, db_utils.engine)\n", "path": "analysis/queries.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Database queries for acquiring experiment data.\"\"\"\n\nimport pandas as pd\nimport sqlalchemy\n\nfrom database import models\nfrom database import utils as db_utils\n\n\ndef get_experiment_data(experiment_names):\n \"\"\"Get measurements (such as coverage) on experiments from the database.\"\"\"\n snapshots_query = db_utils.query(models.Snapshot).options(\n sqlalchemy.orm.joinedload('trial')).filter(\n models.Snapshot.trial.has(\n models.Trial.experiment.in_(experiment_names)))\n\n # id must be loaded to do the join but get rid of it now since\n # trial_id provides the same info.\n data = pd.read_sql_query(snapshots_query.statement, db_utils.engine)\n return data.drop(columns=['id'])\n", "path": "analysis/queries.py"}]}
| 620 | 150 |
gh_patches_debug_24520
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-4945
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
measure.label speed
This is triggered by [this Stackoverflow question](https://stackoverflow.com/questions/62804953/performance-differences-between-bwlabeln-on-matlab-and-skimage-measure-label-on/62842582#62842582). When I have large binary arrays to label and performance issues, I usually resort to calling the ndimage version. Could we imagine having a `fast_binary` flag which would call the ndimage function? A factor of 3-4 (from a few tests I just ran) is not bad...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/measure/_label.py`
Content:
```
1 from ._ccomp import label_cython as clabel
2
3
4 def label(input, background=None, return_num=False, connectivity=None):
5 r"""Label connected regions of an integer array.
6
7 Two pixels are connected when they are neighbors and have the same value.
8 In 2D, they can be neighbors either in a 1- or 2-connected sense.
9 The value refers to the maximum number of orthogonal hops to consider a
10 pixel/voxel a neighbor::
11
12 1-connectivity 2-connectivity diagonal connection close-up
13
14 [ ] [ ] [ ] [ ] [ ]
15 | \ | / | <- hop 2
16 [ ]--[x]--[ ] [ ]--[x]--[ ] [x]--[ ]
17 | / | \ hop 1
18 [ ] [ ] [ ] [ ]
19
20 Parameters
21 ----------
22 input : ndarray of dtype int
23 Image to label.
24 background : int, optional
25 Consider all pixels with this value as background pixels, and label
26 them as 0. By default, 0-valued pixels are considered as background
27 pixels.
28 return_num : bool, optional
29 Whether to return the number of assigned labels.
30 connectivity : int, optional
31 Maximum number of orthogonal hops to consider a pixel/voxel
32 as a neighbor.
33 Accepted values are ranging from 1 to input.ndim. If ``None``, a full
34 connectivity of ``input.ndim`` is used.
35
36 Returns
37 -------
38 labels : ndarray of dtype int
39 Labeled array, where all connected regions are assigned the
40 same integer value.
41 num : int, optional
42 Number of labels, which equals the maximum label index and is only
43 returned if return_num is `True`.
44
45 See Also
46 --------
47 regionprops
48
49 References
50 ----------
51 .. [1] Christophe Fiorio and Jens Gustedt, "Two linear time Union-Find
52 strategies for image processing", Theoretical Computer Science
53 154 (1996), pp. 165-181.
54 .. [2] Kensheng Wu, Ekow Otoo and Arie Shoshani, "Optimizing connected
55 component labeling algorithms", Paper LBNL-56864, 2005,
56 Lawrence Berkeley National Laboratory (University of California),
57 http://repositories.cdlib.org/lbnl/LBNL-56864
58
59 Examples
60 --------
61 >>> import numpy as np
62 >>> x = np.eye(3).astype(int)
63 >>> print(x)
64 [[1 0 0]
65 [0 1 0]
66 [0 0 1]]
67 >>> print(label(x, connectivity=1))
68 [[1 0 0]
69 [0 2 0]
70 [0 0 3]]
71 >>> print(label(x, connectivity=2))
72 [[1 0 0]
73 [0 1 0]
74 [0 0 1]]
75 >>> print(label(x, background=-1))
76 [[1 2 2]
77 [2 1 2]
78 [2 2 1]]
79 >>> x = np.array([[1, 0, 0],
80 ... [1, 1, 5],
81 ... [0, 0, 0]])
82 >>> print(label(x))
83 [[1 0 0]
84 [1 1 2]
85 [0 0 0]]
86 """
87 return clabel(input, background, return_num, connectivity)
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/measure/_label.py b/skimage/measure/_label.py
--- a/skimage/measure/_label.py
+++ b/skimage/measure/_label.py
@@ -1,6 +1,34 @@
+from scipy import ndimage
from ._ccomp import label_cython as clabel
+def _label_bool(image, background=None, return_num=False, connectivity=None):
+ """Faster implementation of clabel for boolean input.
+
+ See context: https://github.com/scikit-image/scikit-image/issues/4833
+ """
+ from ..morphology._util import _resolve_neighborhood
+ if background == 1:
+ image = ~image
+
+ if connectivity is None:
+ connectivity = image.ndim
+
+ if not 1 <= connectivity <= image.ndim:
+ raise ValueError(
+ f'Connectivity for {image.ndim}D image should '
+ f'be in [1, ..., {image.ndim}]. Got {connectivity}.'
+ )
+
+ selem = _resolve_neighborhood(None, connectivity, image.ndim)
+ result = ndimage.label(image, structure=selem)
+
+ if return_num:
+ return result
+ else:
+ return result[0]
+
+
def label(input, background=None, return_num=False, connectivity=None):
r"""Label connected regions of an integer array.
@@ -84,4 +112,8 @@
[1 1 2]
[0 0 0]]
"""
- return clabel(input, background, return_num, connectivity)
+ if input.dtype == bool:
+ return _label_bool(input, background=background,
+ return_num=return_num, connectivity=connectivity)
+ else:
+ return clabel(input, background, return_num, connectivity)
|
{"golden_diff": "diff --git a/skimage/measure/_label.py b/skimage/measure/_label.py\n--- a/skimage/measure/_label.py\n+++ b/skimage/measure/_label.py\n@@ -1,6 +1,34 @@\n+from scipy import ndimage\n from ._ccomp import label_cython as clabel\n \n \n+def _label_bool(image, background=None, return_num=False, connectivity=None):\n+ \"\"\"Faster implementation of clabel for boolean input.\n+\n+ See context: https://github.com/scikit-image/scikit-image/issues/4833\n+ \"\"\"\n+ from ..morphology._util import _resolve_neighborhood\n+ if background == 1:\n+ image = ~image\n+\n+ if connectivity is None:\n+ connectivity = image.ndim\n+\n+ if not 1 <= connectivity <= image.ndim:\n+ raise ValueError(\n+ f'Connectivity for {image.ndim}D image should '\n+ f'be in [1, ..., {image.ndim}]. Got {connectivity}.'\n+ )\n+\n+ selem = _resolve_neighborhood(None, connectivity, image.ndim)\n+ result = ndimage.label(image, structure=selem)\n+\n+ if return_num:\n+ return result\n+ else:\n+ return result[0]\n+\n+\n def label(input, background=None, return_num=False, connectivity=None):\n r\"\"\"Label connected regions of an integer array.\n \n@@ -84,4 +112,8 @@\n [1 1 2]\n [0 0 0]]\n \"\"\"\n- return clabel(input, background, return_num, connectivity)\n+ if input.dtype == bool:\n+ return _label_bool(input, background=background,\n+ return_num=return_num, connectivity=connectivity)\n+ else:\n+ return clabel(input, background, return_num, connectivity)\n", "issue": "measure.label speed\nThis is triggered by [this Stackoverflow question](https://stackoverflow.com/questions/62804953/performance-differences-between-bwlabeln-on-matlab-and-skimage-measure-label-on/62842582#62842582). When I have large binary arrays to label and performance issues, I usually resort to calling the ndimage version. Could we imagine having a `fast_binary` flag which would call the ndimage function? A factor of 3-4 (from a few tests I just ran) is not bad...\n", "before_files": [{"content": "from ._ccomp import label_cython as clabel\n\n\ndef label(input, background=None, return_num=False, connectivity=None):\n r\"\"\"Label connected regions of an integer array.\n\n Two pixels are connected when they are neighbors and have the same value.\n In 2D, they can be neighbors either in a 1- or 2-connected sense.\n The value refers to the maximum number of orthogonal hops to consider a\n pixel/voxel a neighbor::\n\n 1-connectivity 2-connectivity diagonal connection close-up\n\n [ ] [ ] [ ] [ ] [ ]\n | \\ | / | <- hop 2\n [ ]--[x]--[ ] [ ]--[x]--[ ] [x]--[ ]\n | / | \\ hop 1\n [ ] [ ] [ ] [ ]\n\n Parameters\n ----------\n input : ndarray of dtype int\n Image to label.\n background : int, optional\n Consider all pixels with this value as background pixels, and label\n them as 0. By default, 0-valued pixels are considered as background\n pixels.\n return_num : bool, optional\n Whether to return the number of assigned labels.\n connectivity : int, optional\n Maximum number of orthogonal hops to consider a pixel/voxel\n as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n\n Returns\n -------\n labels : ndarray of dtype int\n Labeled array, where all connected regions are assigned the\n same integer value.\n num : int, optional\n Number of labels, which equals the maximum label index and is only\n returned if return_num is `True`.\n\n See Also\n --------\n regionprops\n\n References\n ----------\n .. [1] Christophe Fiorio and Jens Gustedt, \"Two linear time Union-Find\n strategies for image processing\", Theoretical Computer Science\n 154 (1996), pp. 165-181.\n .. [2] Kensheng Wu, Ekow Otoo and Arie Shoshani, \"Optimizing connected\n component labeling algorithms\", Paper LBNL-56864, 2005,\n Lawrence Berkeley National Laboratory (University of California),\n http://repositories.cdlib.org/lbnl/LBNL-56864\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.eye(3).astype(int)\n >>> print(x)\n [[1 0 0]\n [0 1 0]\n [0 0 1]]\n >>> print(label(x, connectivity=1))\n [[1 0 0]\n [0 2 0]\n [0 0 3]]\n >>> print(label(x, connectivity=2))\n [[1 0 0]\n [0 1 0]\n [0 0 1]]\n >>> print(label(x, background=-1))\n [[1 2 2]\n [2 1 2]\n [2 2 1]]\n >>> x = np.array([[1, 0, 0],\n ... [1, 1, 5],\n ... [0, 0, 0]])\n >>> print(label(x))\n [[1 0 0]\n [1 1 2]\n [0 0 0]]\n \"\"\"\n return clabel(input, background, return_num, connectivity)\n", "path": "skimage/measure/_label.py"}], "after_files": [{"content": "from scipy import ndimage\nfrom ._ccomp import label_cython as clabel\n\n\ndef _label_bool(image, background=None, return_num=False, connectivity=None):\n \"\"\"Faster implementation of clabel for boolean input.\n\n See context: https://github.com/scikit-image/scikit-image/issues/4833\n \"\"\"\n from ..morphology._util import _resolve_neighborhood\n if background == 1:\n image = ~image\n\n if connectivity is None:\n connectivity = image.ndim\n\n if not 1 <= connectivity <= image.ndim:\n raise ValueError(\n f'Connectivity for {image.ndim}D image should '\n f'be in [1, ..., {image.ndim}]. Got {connectivity}.'\n )\n\n selem = _resolve_neighborhood(None, connectivity, image.ndim)\n result = ndimage.label(image, structure=selem)\n\n if return_num:\n return result\n else:\n return result[0]\n\n\ndef label(input, background=None, return_num=False, connectivity=None):\n r\"\"\"Label connected regions of an integer array.\n\n Two pixels are connected when they are neighbors and have the same value.\n In 2D, they can be neighbors either in a 1- or 2-connected sense.\n The value refers to the maximum number of orthogonal hops to consider a\n pixel/voxel a neighbor::\n\n 1-connectivity 2-connectivity diagonal connection close-up\n\n [ ] [ ] [ ] [ ] [ ]\n | \\ | / | <- hop 2\n [ ]--[x]--[ ] [ ]--[x]--[ ] [x]--[ ]\n | / | \\ hop 1\n [ ] [ ] [ ] [ ]\n\n Parameters\n ----------\n input : ndarray of dtype int\n Image to label.\n background : int, optional\n Consider all pixels with this value as background pixels, and label\n them as 0. By default, 0-valued pixels are considered as background\n pixels.\n return_num : bool, optional\n Whether to return the number of assigned labels.\n connectivity : int, optional\n Maximum number of orthogonal hops to consider a pixel/voxel\n as a neighbor.\n Accepted values are ranging from 1 to input.ndim. If ``None``, a full\n connectivity of ``input.ndim`` is used.\n\n Returns\n -------\n labels : ndarray of dtype int\n Labeled array, where all connected regions are assigned the\n same integer value.\n num : int, optional\n Number of labels, which equals the maximum label index and is only\n returned if return_num is `True`.\n\n See Also\n --------\n regionprops\n\n References\n ----------\n .. [1] Christophe Fiorio and Jens Gustedt, \"Two linear time Union-Find\n strategies for image processing\", Theoretical Computer Science\n 154 (1996), pp. 165-181.\n .. [2] Kensheng Wu, Ekow Otoo and Arie Shoshani, \"Optimizing connected\n component labeling algorithms\", Paper LBNL-56864, 2005,\n Lawrence Berkeley National Laboratory (University of California),\n http://repositories.cdlib.org/lbnl/LBNL-56864\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.eye(3).astype(int)\n >>> print(x)\n [[1 0 0]\n [0 1 0]\n [0 0 1]]\n >>> print(label(x, connectivity=1))\n [[1 0 0]\n [0 2 0]\n [0 0 3]]\n >>> print(label(x, connectivity=2))\n [[1 0 0]\n [0 1 0]\n [0 0 1]]\n >>> print(label(x, background=-1))\n [[1 2 2]\n [2 1 2]\n [2 2 1]]\n >>> x = np.array([[1, 0, 0],\n ... [1, 1, 5],\n ... [0, 0, 0]])\n >>> print(label(x))\n [[1 0 0]\n [1 1 2]\n [0 0 0]]\n \"\"\"\n if input.dtype == bool:\n return _label_bool(input, background=background,\n return_num=return_num, connectivity=connectivity)\n else:\n return clabel(input, background, return_num, connectivity)\n", "path": "skimage/measure/_label.py"}]}
| 1,342 | 410 |
gh_patches_debug_26316
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-424
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pin optional dependencies at the minor release level
# Description
To avoid having our prior releases break like `v0.0.15` did in Issue #396 it would be good to pin our optional dependencies at the minor release level for each release. This should safeguard us from old releases getting broken by API changes in the dependencies that we use as applications.
To be clear, I don't think we should limit the dependencies in `install_requires` beyond placing _lower_ bounds, but I do think that we should now be placing upper bounds on all of the optional dependencies as we are really more using those as **applications** in our library.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow>=1.12.0',
16 'tensorflow-probability>=0.5.0',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch>=1.0.0'],
21 'mxnet': [
22 'mxnet>=1.0.0',
23 'requests<2.19.0,>=2.18.4',
24 'numpy<1.15.0,>=1.8.2',
25 'requests<2.19.0,>=2.18.4',
26 ],
27 # 'dask': [
28 # 'dask[array]'
29 # ],
30 'xmlimport': ['uproot'],
31 'minuit': ['iminuit'],
32 'develop': [
33 'pyflakes',
34 'pytest<4.0.0,>=3.5.1',
35 'pytest-cov>=2.5.1',
36 'pytest-mock',
37 'pytest-benchmark[histogram]',
38 'pytest-console-scripts',
39 'python-coveralls',
40 'coverage>=4.0', # coveralls
41 'matplotlib',
42 'jupyter',
43 'nbdime',
44 'uproot>=3.3.0',
45 'papermill>=0.16.0',
46 'graphviz',
47 'bumpversion',
48 'sphinx',
49 'sphinxcontrib-bibtex',
50 'sphinxcontrib-napoleon',
51 'sphinx_rtd_theme',
52 'nbsphinx',
53 'sphinx-issues',
54 'm2r',
55 'jsonpatch',
56 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
57 'pre-commit',
58 'black;python_version>="3.6"', # Black is Python3 only
59 'twine',
60 ],
61 }
62 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
63
64 setup(
65 name='pyhf',
66 version='0.0.16',
67 description='(partial) pure python histfactory implementation',
68 long_description=long_description,
69 long_description_content_type='text/markdown',
70 url='https://github.com/diana-hep/pyhf',
71 author='Lukas Heinrich',
72 author_email='[email protected]',
73 license='Apache',
74 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
75 classifiers=[
76 "Programming Language :: Python :: 2",
77 "Programming Language :: Python :: 2.7",
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.6",
80 "Programming Language :: Python :: 3.7",
81 ],
82 packages=find_packages(),
83 include_package_data=True,
84 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
85 install_requires=[
86 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
87 'click>=6.0', # for console scripts,
88 'tqdm', # for readxml
89 'six', # for modifiers
90 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
91 'jsonpatch',
92 ],
93 extras_require=extras_require,
94 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
95 dependency_links=[],
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,18 +12,13 @@
extras_require = {
'tensorflow': [
- 'tensorflow>=1.12.0',
- 'tensorflow-probability>=0.5.0',
+ 'tensorflow~=1.13',
+ 'tensorflow-probability~=0.5',
'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
'setuptools<=39.1.0',
],
- 'torch': ['torch>=1.0.0'],
- 'mxnet': [
- 'mxnet>=1.0.0',
- 'requests<2.19.0,>=2.18.4',
- 'numpy<1.15.0,>=1.8.2',
- 'requests<2.19.0,>=2.18.4',
- ],
+ 'torch': ['torch~=1.0'],
+ 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],
# 'dask': [
# 'dask[array]'
# ],
@@ -31,7 +26,7 @@
'minuit': ['iminuit'],
'develop': [
'pyflakes',
- 'pytest<4.0.0,>=3.5.1',
+ 'pytest~=3.5',
'pytest-cov>=2.5.1',
'pytest-mock',
'pytest-benchmark[histogram]',
@@ -41,8 +36,8 @@
'matplotlib',
'jupyter',
'nbdime',
- 'uproot>=3.3.0',
- 'papermill>=0.16.0',
+ 'uproot~=3.3',
+ 'papermill~=0.16',
'graphviz',
'bumpversion',
'sphinx',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,18 +12,13 @@\n \n extras_require = {\n 'tensorflow': [\n- 'tensorflow>=1.12.0',\n- 'tensorflow-probability>=0.5.0',\n+ 'tensorflow~=1.13',\n+ 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n- 'torch': ['torch>=1.0.0'],\n- 'mxnet': [\n- 'mxnet>=1.0.0',\n- 'requests<2.19.0,>=2.18.4',\n- 'numpy<1.15.0,>=1.8.2',\n- 'requests<2.19.0,>=2.18.4',\n- ],\n+ 'torch': ['torch~=1.0'],\n+ 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n@@ -31,7 +26,7 @@\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n- 'pytest<4.0.0,>=3.5.1',\n+ 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n@@ -41,8 +36,8 @@\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n- 'uproot>=3.3.0',\n- 'papermill>=0.16.0',\n+ 'uproot~=3.3',\n+ 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n", "issue": "Pin optional dependencies at the minor release level\n# Description\r\n\r\nTo avoid having our prior releases break like `v0.0.15` did in Issue #396 it would be good to pin our optional dependencies at the minor release level for each release. This should safeguard us from old releases getting broken by API changes in the dependencies that we use as applications.\r\n\r\nTo be clear, I don't think we should limit the dependencies in `install_requires` beyond placing _lower_ bounds, but I do think that we should now be placing upper bounds on all of the optional dependencies as we are really more using those as **applications** in our library.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.12.0',\n 'tensorflow-probability>=0.5.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=1.0.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest<4.0.0,>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.3.0',\n 'papermill>=0.16.0',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.16',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.16',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
| 1,473 | 492 |
gh_patches_debug_17442
|
rasdani/github-patches
|
git_diff
|
spacetelescope__jwql-857
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add code to manage.py to create necessesary symlinks to run web app locally
In order to run the JWQL web app locally, one must create symbolic links to the `outputs`, `thumbnails`, `preview_images`, and `filesystem` directories. We can add some code in `website.manage.py` in order to do this automatically. Something like this:
```python
from jwql.utils.utils import get_config()
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql_proj.settings")
# Create symbolic links here (if they don't already exist)
for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnails_filesystem']:
path = get_config()[directory]
# code to create symlink
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
```
Credit @york-stsci for the suggestion!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jwql/website/manage.py`
Content:
```
1 #! /usr/bin/env python
2
3 """Utility module for administrative tasks.
4
5 A python script version of Django's command-line utility for
6 administrative tasks (``django-admin``). Additionally, puts the project
7 package on ``sys.path`` and defines the ``DJANGO_SETTINGS_MODULE``
8 variable to point to the jwql ``settings.py`` file.
9
10 Generated by ``django-admin startproject`` using Django 2.0.1.
11
12 Use
13 ---
14
15 To run the web app server:
16 ::
17
18 python manage.py runserver
19
20 To start the interactive shellL:
21 ::
22
23 python manage.py shell
24
25 To run tests for all installed apps:
26 ::
27
28 python manage.py test
29
30 References
31 ----------
32 For more information please see:
33 ``https://docs.djangoproject.com/en/2.0/ref/django-admin/``
34 """
35
36 import os
37 import sys
38
39 if __name__ == "__main__":
40
41 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql_proj.settings")
42
43 try:
44 from django.core.management import execute_from_command_line
45 except ImportError as exc:
46 raise ImportError(
47 "Couldn't import Django. Are you sure it's installed and "
48 "available on your PYTHONPATH environment variable? Did you "
49 "forget to activate a virtual environment?"
50 ) from exc
51 execute_from_command_line(sys.argv)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jwql/website/manage.py b/jwql/website/manage.py
--- a/jwql/website/manage.py
+++ b/jwql/website/manage.py
@@ -36,10 +36,25 @@
import os
import sys
+from jwql.utils.utils import get_config
+
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jwql_proj.settings")
+ directory_mapping = {
+ 'filesystem': 'filesystem',
+ 'outputs': 'outputs',
+ 'preview_image_filesystem': 'preview_images',
+ 'thumbnail_filesystem': 'thumbnails'
+ }
+
+ for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnail_filesystem']:
+ symlink_location = os.path.join(os.path.dirname(__file__), 'apps', 'jwql', 'static', directory_mapping[directory])
+ if not os.path.exists(symlink_location):
+ symlink_path = get_config()[directory]
+ os.symlink(symlink_path, symlink_location)
+
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
|
{"golden_diff": "diff --git a/jwql/website/manage.py b/jwql/website/manage.py\n--- a/jwql/website/manage.py\n+++ b/jwql/website/manage.py\n@@ -36,10 +36,25 @@\n import os\n import sys\n \n+from jwql.utils.utils import get_config\n+\n if __name__ == \"__main__\":\n \n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"jwql_proj.settings\")\n \n+ directory_mapping = {\n+ 'filesystem': 'filesystem',\n+ 'outputs': 'outputs',\n+ 'preview_image_filesystem': 'preview_images',\n+ 'thumbnail_filesystem': 'thumbnails'\n+ }\n+\n+ for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnail_filesystem']:\n+ symlink_location = os.path.join(os.path.dirname(__file__), 'apps', 'jwql', 'static', directory_mapping[directory])\n+ if not os.path.exists(symlink_location):\n+ symlink_path = get_config()[directory]\n+ os.symlink(symlink_path, symlink_location)\n+\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n", "issue": "Add code to manage.py to create necessesary symlinks to run web app locally \nIn order to run the JWQL web app locally, one must create symbolic links to the `outputs`, `thumbnails`, `preview_images`, and `filesystem` directories. We can add some code in `website.manage.py` in order to do this automatically. Something like this:\r\n\r\n\r\n```python\r\nfrom jwql.utils.utils import get_config()\r\n\r\nif __name__ == \"__main__\":\r\n\r\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"jwql_proj.settings\")\r\n\r\n # Create symbolic links here (if they don't already exist)\r\n for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnails_filesystem']:\r\n path = get_config()[directory]\r\n # code to create symlink\r\n\r\n try:\r\n from django.core.management import execute_from_command_line\r\n except ImportError as exc:\r\n raise ImportError(\r\n \"Couldn't import Django. Are you sure it's installed and \"\r\n \"available on your PYTHONPATH environment variable? Did you \"\r\n \"forget to activate a virtual environment?\"\r\n ) from exc\r\n execute_from_command_line(sys.argv)\r\n```\r\n\r\nCredit @york-stsci for the suggestion!\n", "before_files": [{"content": "#! /usr/bin/env python\n\n\"\"\"Utility module for administrative tasks.\n\nA python script version of Django's command-line utility for\nadministrative tasks (``django-admin``). Additionally, puts the project\npackage on ``sys.path`` and defines the ``DJANGO_SETTINGS_MODULE``\nvariable to point to the jwql ``settings.py`` file.\n\nGenerated by ``django-admin startproject`` using Django 2.0.1.\n\nUse\n---\n\n To run the web app server:\n ::\n\n python manage.py runserver\n\n To start the interactive shellL:\n ::\n\n python manage.py shell\n\n To run tests for all installed apps:\n ::\n\n python manage.py test\n\nReferences\n----------\nFor more information please see:\n ``https://docs.djangoproject.com/en/2.0/ref/django-admin/``\n\"\"\"\n\nimport os\nimport sys\n\nif __name__ == \"__main__\":\n\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"jwql_proj.settings\")\n\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n", "path": "jwql/website/manage.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\n\"\"\"Utility module for administrative tasks.\n\nA python script version of Django's command-line utility for\nadministrative tasks (``django-admin``). Additionally, puts the project\npackage on ``sys.path`` and defines the ``DJANGO_SETTINGS_MODULE``\nvariable to point to the jwql ``settings.py`` file.\n\nGenerated by ``django-admin startproject`` using Django 2.0.1.\n\nUse\n---\n\n To run the web app server:\n ::\n\n python manage.py runserver\n\n To start the interactive shellL:\n ::\n\n python manage.py shell\n\n To run tests for all installed apps:\n ::\n\n python manage.py test\n\nReferences\n----------\nFor more information please see:\n ``https://docs.djangoproject.com/en/2.0/ref/django-admin/``\n\"\"\"\n\nimport os\nimport sys\n\nfrom jwql.utils.utils import get_config\n\nif __name__ == \"__main__\":\n\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"jwql_proj.settings\")\n\n directory_mapping = {\n 'filesystem': 'filesystem',\n 'outputs': 'outputs',\n 'preview_image_filesystem': 'preview_images',\n 'thumbnail_filesystem': 'thumbnails'\n }\n\n for directory in ['filesystem', 'outputs', 'preview_image_filesystem', 'thumbnail_filesystem']:\n symlink_location = os.path.join(os.path.dirname(__file__), 'apps', 'jwql', 'static', directory_mapping[directory])\n if not os.path.exists(symlink_location):\n symlink_path = get_config()[directory]\n os.symlink(symlink_path, symlink_location)\n\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line(sys.argv)\n", "path": "jwql/website/manage.py"}]}
| 900 | 255 |
gh_patches_debug_20691
|
rasdani/github-patches
|
git_diff
|
ephios-dev__ephios-525
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Select2 on disposition view shows error alert
Closing the select2 field in the disposition view without selecting an entry (e.g. by typing something into the field an then clicking somewhere outside the field) also triggers the form submission. This fails because no valid user has been selected and consequently shows an ugly alert to the user.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/core/context.py`
Content:
```
1 import importlib
2
3 from django.conf import settings
4 from django.templatetags.static import static
5 from django.utils.translation import get_language
6
7 from ephios.core.models import AbstractParticipation
8 from ephios.core.signals import footer_link
9
10 # suggested in https://github.com/python-poetry/poetry/issues/273
11 EPHIOS_VERSION = "v" + importlib.metadata.version("ephios")
12
13
14 def ephios_base_context(request):
15 footer = {}
16 for _, result in footer_link.send(None, request=request):
17 for label, url in result.items():
18 footer[label] = url
19
20 datatables_translation_url = None
21 if get_language() == "de-de":
22 datatables_translation_url = static("datatables/german.json")
23
24 return {
25 "ParticipationStates": AbstractParticipation.States,
26 "footer": footer,
27 "datatables_translation_url": datatables_translation_url,
28 "ephios_version": EPHIOS_VERSION,
29 "SITE_URL": settings.SITE_URL,
30 "PWA_APP_ICONS": settings.PWA_APP_ICONS,
31 }
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ephios/core/context.py b/ephios/core/context.py
--- a/ephios/core/context.py
+++ b/ephios/core/context.py
@@ -1,7 +1,6 @@
import importlib
from django.conf import settings
-from django.templatetags.static import static
from django.utils.translation import get_language
from ephios.core.models import AbstractParticipation
@@ -17,14 +16,10 @@
for label, url in result.items():
footer[label] = url
- datatables_translation_url = None
- if get_language() == "de-de":
- datatables_translation_url = static("datatables/german.json")
-
return {
"ParticipationStates": AbstractParticipation.States,
"footer": footer,
- "datatables_translation_url": datatables_translation_url,
+ "LANGUAGE_CODE": get_language(),
"ephios_version": EPHIOS_VERSION,
"SITE_URL": settings.SITE_URL,
"PWA_APP_ICONS": settings.PWA_APP_ICONS,
|
{"golden_diff": "diff --git a/ephios/core/context.py b/ephios/core/context.py\n--- a/ephios/core/context.py\n+++ b/ephios/core/context.py\n@@ -1,7 +1,6 @@\n import importlib\n \n from django.conf import settings\n-from django.templatetags.static import static\n from django.utils.translation import get_language\n \n from ephios.core.models import AbstractParticipation\n@@ -17,14 +16,10 @@\n for label, url in result.items():\n footer[label] = url\n \n- datatables_translation_url = None\n- if get_language() == \"de-de\":\n- datatables_translation_url = static(\"datatables/german.json\")\n-\n return {\n \"ParticipationStates\": AbstractParticipation.States,\n \"footer\": footer,\n- \"datatables_translation_url\": datatables_translation_url,\n+ \"LANGUAGE_CODE\": get_language(),\n \"ephios_version\": EPHIOS_VERSION,\n \"SITE_URL\": settings.SITE_URL,\n \"PWA_APP_ICONS\": settings.PWA_APP_ICONS,\n", "issue": "Select2 on disposition view shows error alert\nClosing the select2 field in the disposition view without selecting an entry (e.g. by typing something into the field an then clicking somewhere outside the field) also triggers the form submission. This fails because no valid user has been selected and consequently shows an ugly alert to the user.\n", "before_files": [{"content": "import importlib\n\nfrom django.conf import settings\nfrom django.templatetags.static import static\nfrom django.utils.translation import get_language\n\nfrom ephios.core.models import AbstractParticipation\nfrom ephios.core.signals import footer_link\n\n# suggested in https://github.com/python-poetry/poetry/issues/273\nEPHIOS_VERSION = \"v\" + importlib.metadata.version(\"ephios\")\n\n\ndef ephios_base_context(request):\n footer = {}\n for _, result in footer_link.send(None, request=request):\n for label, url in result.items():\n footer[label] = url\n\n datatables_translation_url = None\n if get_language() == \"de-de\":\n datatables_translation_url = static(\"datatables/german.json\")\n\n return {\n \"ParticipationStates\": AbstractParticipation.States,\n \"footer\": footer,\n \"datatables_translation_url\": datatables_translation_url,\n \"ephios_version\": EPHIOS_VERSION,\n \"SITE_URL\": settings.SITE_URL,\n \"PWA_APP_ICONS\": settings.PWA_APP_ICONS,\n }\n", "path": "ephios/core/context.py"}], "after_files": [{"content": "import importlib\n\nfrom django.conf import settings\nfrom django.utils.translation import get_language\n\nfrom ephios.core.models import AbstractParticipation\nfrom ephios.core.signals import footer_link\n\n# suggested in https://github.com/python-poetry/poetry/issues/273\nEPHIOS_VERSION = \"v\" + importlib.metadata.version(\"ephios\")\n\n\ndef ephios_base_context(request):\n footer = {}\n for _, result in footer_link.send(None, request=request):\n for label, url in result.items():\n footer[label] = url\n\n return {\n \"ParticipationStates\": AbstractParticipation.States,\n \"footer\": footer,\n \"LANGUAGE_CODE\": get_language(),\n \"ephios_version\": EPHIOS_VERSION,\n \"SITE_URL\": settings.SITE_URL,\n \"PWA_APP_ICONS\": settings.PWA_APP_ICONS,\n }\n", "path": "ephios/core/context.py"}]}
| 617 | 232 |
gh_patches_debug_2477
|
rasdani/github-patches
|
git_diff
|
evennia__evennia-2708
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG - Develop] Global scripts are not being started, only restarted.
#### Describe the bug
When creating a new game, global scripts are not automatically started.
#### To Reproduce
Steps to reproduce the behavior:
1. Create a new game dir.
2. Create a script with `at_repeat` and add it to your server conf.
3. `evennia migrate` and `evennia start`
4. Check global scripts status.
5. See error
#### Expected behavior
Global scripts are supposed to start automatically.
#### Develop-branch commit
f093c8bcb
#### Additional context
I used the following dict in my settings:
```python
GLOBAL_SCRIPTS = {
"test_script": {
"typeclass": "typeclasses.scripts.TestScript",
"persistent": True,
"interval": 20,
"desc": "Testing script starting and iteration."
},
}
```
And the following script class:
```python
class TestScript(Script):
def at_repeat(self):
print("hey, listen")
```
The script loaded correctly, and I was able to manually start it. After fully stopping and then starting the server, the script was automatically started again.
However, if I create a new game, start the server, and I _don't_ manually start the script, then fully stopping and then starting the server again does _not_ automatically start the script.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evennia/utils/containers.py`
Content:
```
1 """
2 Containers
3
4 Containers are storage classes usually initialized from a setting. They
5 represent Singletons and acts as a convenient place to find resources (
6 available as properties on the singleton)
7
8 evennia.GLOBAL_SCRIPTS
9 evennia.OPTION_CLASSES
10
11 """
12
13
14 from pickle import dumps
15 from django.conf import settings
16 from evennia.utils.utils import class_from_module, callables_from_module
17 from evennia.utils import logger
18
19
20 SCRIPTDB = None
21
22
23 class Container:
24 """
25 Base container class. A container is simply a storage object whose
26 properties can be acquired as a property on it. This is generally
27 considered a read-only affair.
28
29 The container is initialized by a list of modules containing callables.
30
31 """
32
33 storage_modules = []
34
35 def __init__(self):
36 """
37 Read data from module.
38
39 """
40 self.loaded_data = None
41
42 def load_data(self):
43 """
44 Delayed import to avoid eventual circular imports from inside
45 the storage modules.
46
47 """
48 if self.loaded_data is None:
49 self.loaded_data = {}
50 for module in self.storage_modules:
51 self.loaded_data.update(callables_from_module(module))
52
53 def __getattr__(self, key):
54 return self.get(key)
55
56 def get(self, key, default=None):
57 """
58 Retrive data by key (in case of not knowing it beforehand).
59
60 Args:
61 key (str): The name of the script.
62 default (any, optional): Value to return if key is not found.
63
64 Returns:
65 any (any): The data loaded on this container.
66
67 """
68 self.load_data()
69 return self.loaded_data.get(key, default)
70
71 def all(self):
72 """
73 Get all stored data
74
75 Returns:
76 scripts (list): All global script objects stored on the container.
77
78 """
79 self.load_data()
80 return list(self.loaded_data.values())
81
82
83 class OptionContainer(Container):
84 """
85 Loads and stores the final list of OPTION CLASSES.
86
87 Can access these as properties or dictionary-contents.
88 """
89
90 storage_modules = settings.OPTION_CLASS_MODULES
91
92
93 class GlobalScriptContainer(Container):
94 """
95 Simple Handler object loaded by the Evennia API to contain and manage a
96 game's Global Scripts. This will list global Scripts created on their own
97 but will also auto-(re)create scripts defined in `settings.GLOBAL_SCRIPTS`.
98
99 Example:
100 import evennia
101 evennia.GLOBAL_SCRIPTS.scriptname
102
103 Note:
104 This does not use much of the BaseContainer since it's not loading
105 callables from settings but a custom dict of tuples.
106
107 """
108
109 def __init__(self):
110 """
111 Note: We must delay loading of typeclasses since this module may get
112 initialized before Scripts are actually initialized.
113
114 """
115 self.typeclass_storage = None
116 self.loaded_data = {
117 key: {} if data is None else data for key, data in settings.GLOBAL_SCRIPTS.items()
118 }
119
120 def _get_scripts(self, key=None, default=None):
121 global SCRIPTDB
122 if not SCRIPTDB:
123 from evennia.scripts.models import ScriptDB as SCRIPTDB
124 if key:
125 try:
126 return SCRIPTDB.objects.get(db_key__exact=key, db_obj__isnull=True)
127 except SCRIPTDB.DoesNotExist:
128 return default
129 else:
130 return SCRIPTDB.objects.filter(db_obj__isnull=True)
131
132 def _load_script(self, key):
133 self.load_data()
134
135 typeclass = self.typeclass_storage[key]
136 script = typeclass.objects.filter(
137 db_key=key, db_account__isnull=True, db_obj__isnull=True
138 ).first()
139
140 kwargs = {**self.loaded_data[key]}
141 kwargs["key"] = key
142 kwargs["persistent"] = kwargs.get("persistent", True)
143
144 compare_hash = str(dumps(kwargs, protocol=4))
145
146 if script:
147 script_hash = script.attributes.get("global_script_settings", category="settings_hash")
148 if script_hash is None:
149 # legacy - store the hash anew and assume no change
150 script.attributes.add(
151 "global_script_settings", compare_hash, category="settings_hash"
152 )
153 elif script_hash != compare_hash:
154 # wipe the old version and create anew
155 logger.log_info(f"GLOBAL_SCRIPTS: Settings changed for {key} ({typeclass}).")
156 script.stop()
157 script.delete()
158 script = None
159
160 if not script:
161 logger.log_info(f"GLOBAL_SCRIPTS: (Re)creating {key} ({typeclass}).")
162
163 script, errors = typeclass.create(**kwargs)
164 if errors:
165 logger.log_err("\n".join(errors))
166 return None
167
168 # store a hash representation of the setup
169 script.attributes.add("_global_script_settings", compare_hash, category="settings_hash")
170 script.start()
171
172 return script
173
174 def start(self):
175 """
176 Called last in evennia.__init__ to initialize the container late
177 (after script typeclasses have finished loading).
178
179 We include all global scripts in the handler and
180 make sure to auto-load time-based scripts.
181
182 """
183 # populate self.typeclass_storage
184 self.load_data()
185
186 # start registered scripts
187 for key in self.loaded_data:
188 self._load_script(key)
189
190 def load_data(self):
191 """
192 This delayed import avoids trying to load Scripts before they are
193 initialized.
194
195 """
196 if self.typeclass_storage is None:
197 self.typeclass_storage = {}
198 for key, data in self.loaded_data.items():
199 try:
200 typeclass = data.get("typeclass", settings.BASE_SCRIPT_TYPECLASS)
201 self.typeclass_storage[key] = class_from_module(typeclass)
202 except Exception:
203 logger.log_trace(
204 f"GlobalScriptContainer could not start import global script {key}."
205 )
206
207 def get(self, key, default=None):
208 """
209 Retrive data by key (in case of not knowing it beforehand). Any
210 scripts that are in settings.GLOBAL_SCRIPTS that are not found
211 will be recreated on-demand.
212
213 Args:
214 key (str): The name of the script.
215 default (any, optional): Value to return if key is not found
216 at all on this container (i.e it cannot be loaded at all).
217
218 Returns:
219 any (any): The data loaded on this container.
220 """
221 res = self._get_scripts(key)
222 if not res:
223 if key in self.loaded_data:
224 # recreate if we have the info
225 return self._load_script(key) or default
226 return default
227 return res
228
229 def all(self):
230 """
231 Get all global scripts. Note that this will not auto-start
232 scripts defined in settings.
233
234 Returns:
235 scripts (list): All global script objects stored on the container.
236
237 """
238 self.typeclass_storage = None
239 self.load_data()
240 for key in self.loaded_data:
241 self._load_script(key)
242 return self._get_scripts(None)
243
244
245 # Create all singletons
246
247 GLOBAL_SCRIPTS = GlobalScriptContainer()
248 OPTION_CLASSES = OptionContainer()
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/evennia/utils/containers.py b/evennia/utils/containers.py
--- a/evennia/utils/containers.py
+++ b/evennia/utils/containers.py
@@ -167,7 +167,7 @@
# store a hash representation of the setup
script.attributes.add("_global_script_settings", compare_hash, category="settings_hash")
- script.start()
+ script.start()
return script
|
{"golden_diff": "diff --git a/evennia/utils/containers.py b/evennia/utils/containers.py\n--- a/evennia/utils/containers.py\n+++ b/evennia/utils/containers.py\n@@ -167,7 +167,7 @@\n \n # store a hash representation of the setup\n script.attributes.add(\"_global_script_settings\", compare_hash, category=\"settings_hash\")\n- script.start()\n+ script.start()\n \n return script\n", "issue": "[BUG - Develop] Global scripts are not being started, only restarted.\n#### Describe the bug\r\nWhen creating a new game, global scripts are not automatically started.\r\n\r\n#### To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Create a new game dir.\r\n2. Create a script with `at_repeat` and add it to your server conf.\r\n3. `evennia migrate` and `evennia start`\r\n4. Check global scripts status.\r\n5. See error\r\n\r\n#### Expected behavior\r\nGlobal scripts are supposed to start automatically.\r\n\r\n#### Develop-branch commit\r\nf093c8bcb\r\n\r\n#### Additional context\r\nI used the following dict in my settings:\r\n```python\r\nGLOBAL_SCRIPTS = {\r\n \"test_script\": {\r\n \"typeclass\": \"typeclasses.scripts.TestScript\",\r\n \"persistent\": True,\r\n \"interval\": 20,\r\n \"desc\": \"Testing script starting and iteration.\"\r\n },\r\n\r\n }\r\n```\r\nAnd the following script class:\r\n```python\r\nclass TestScript(Script):\r\n\r\n def at_repeat(self):\r\n print(\"hey, listen\")\r\n```\r\n\r\nThe script loaded correctly, and I was able to manually start it. After fully stopping and then starting the server, the script was automatically started again.\r\n\r\nHowever, if I create a new game, start the server, and I _don't_ manually start the script, then fully stopping and then starting the server again does _not_ automatically start the script.\n", "before_files": [{"content": "\"\"\"\nContainers\n\nContainers are storage classes usually initialized from a setting. They\nrepresent Singletons and acts as a convenient place to find resources (\navailable as properties on the singleton)\n\nevennia.GLOBAL_SCRIPTS\nevennia.OPTION_CLASSES\n\n\"\"\"\n\n\nfrom pickle import dumps\nfrom django.conf import settings\nfrom evennia.utils.utils import class_from_module, callables_from_module\nfrom evennia.utils import logger\n\n\nSCRIPTDB = None\n\n\nclass Container:\n \"\"\"\n Base container class. A container is simply a storage object whose\n properties can be acquired as a property on it. This is generally\n considered a read-only affair.\n\n The container is initialized by a list of modules containing callables.\n\n \"\"\"\n\n storage_modules = []\n\n def __init__(self):\n \"\"\"\n Read data from module.\n\n \"\"\"\n self.loaded_data = None\n\n def load_data(self):\n \"\"\"\n Delayed import to avoid eventual circular imports from inside\n the storage modules.\n\n \"\"\"\n if self.loaded_data is None:\n self.loaded_data = {}\n for module in self.storage_modules:\n self.loaded_data.update(callables_from_module(module))\n\n def __getattr__(self, key):\n return self.get(key)\n\n def get(self, key, default=None):\n \"\"\"\n Retrive data by key (in case of not knowing it beforehand).\n\n Args:\n key (str): The name of the script.\n default (any, optional): Value to return if key is not found.\n\n Returns:\n any (any): The data loaded on this container.\n\n \"\"\"\n self.load_data()\n return self.loaded_data.get(key, default)\n\n def all(self):\n \"\"\"\n Get all stored data\n\n Returns:\n scripts (list): All global script objects stored on the container.\n\n \"\"\"\n self.load_data()\n return list(self.loaded_data.values())\n\n\nclass OptionContainer(Container):\n \"\"\"\n Loads and stores the final list of OPTION CLASSES.\n\n Can access these as properties or dictionary-contents.\n \"\"\"\n\n storage_modules = settings.OPTION_CLASS_MODULES\n\n\nclass GlobalScriptContainer(Container):\n \"\"\"\n Simple Handler object loaded by the Evennia API to contain and manage a\n game's Global Scripts. This will list global Scripts created on their own\n but will also auto-(re)create scripts defined in `settings.GLOBAL_SCRIPTS`.\n\n Example:\n import evennia\n evennia.GLOBAL_SCRIPTS.scriptname\n\n Note:\n This does not use much of the BaseContainer since it's not loading\n callables from settings but a custom dict of tuples.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Note: We must delay loading of typeclasses since this module may get\n initialized before Scripts are actually initialized.\n\n \"\"\"\n self.typeclass_storage = None\n self.loaded_data = {\n key: {} if data is None else data for key, data in settings.GLOBAL_SCRIPTS.items()\n }\n\n def _get_scripts(self, key=None, default=None):\n global SCRIPTDB\n if not SCRIPTDB:\n from evennia.scripts.models import ScriptDB as SCRIPTDB\n if key:\n try:\n return SCRIPTDB.objects.get(db_key__exact=key, db_obj__isnull=True)\n except SCRIPTDB.DoesNotExist:\n return default\n else:\n return SCRIPTDB.objects.filter(db_obj__isnull=True)\n\n def _load_script(self, key):\n self.load_data()\n\n typeclass = self.typeclass_storage[key]\n script = typeclass.objects.filter(\n db_key=key, db_account__isnull=True, db_obj__isnull=True\n ).first()\n\n kwargs = {**self.loaded_data[key]}\n kwargs[\"key\"] = key\n kwargs[\"persistent\"] = kwargs.get(\"persistent\", True)\n\n compare_hash = str(dumps(kwargs, protocol=4))\n\n if script:\n script_hash = script.attributes.get(\"global_script_settings\", category=\"settings_hash\")\n if script_hash is None:\n # legacy - store the hash anew and assume no change\n script.attributes.add(\n \"global_script_settings\", compare_hash, category=\"settings_hash\"\n )\n elif script_hash != compare_hash:\n # wipe the old version and create anew\n logger.log_info(f\"GLOBAL_SCRIPTS: Settings changed for {key} ({typeclass}).\")\n script.stop()\n script.delete()\n script = None\n\n if not script:\n logger.log_info(f\"GLOBAL_SCRIPTS: (Re)creating {key} ({typeclass}).\")\n\n script, errors = typeclass.create(**kwargs)\n if errors:\n logger.log_err(\"\\n\".join(errors))\n return None\n\n # store a hash representation of the setup\n script.attributes.add(\"_global_script_settings\", compare_hash, category=\"settings_hash\")\n script.start()\n\n return script\n\n def start(self):\n \"\"\"\n Called last in evennia.__init__ to initialize the container late\n (after script typeclasses have finished loading).\n\n We include all global scripts in the handler and\n make sure to auto-load time-based scripts.\n\n \"\"\"\n # populate self.typeclass_storage\n self.load_data()\n\n # start registered scripts\n for key in self.loaded_data:\n self._load_script(key)\n\n def load_data(self):\n \"\"\"\n This delayed import avoids trying to load Scripts before they are\n initialized.\n\n \"\"\"\n if self.typeclass_storage is None:\n self.typeclass_storage = {}\n for key, data in self.loaded_data.items():\n try:\n typeclass = data.get(\"typeclass\", settings.BASE_SCRIPT_TYPECLASS)\n self.typeclass_storage[key] = class_from_module(typeclass)\n except Exception:\n logger.log_trace(\n f\"GlobalScriptContainer could not start import global script {key}.\"\n )\n\n def get(self, key, default=None):\n \"\"\"\n Retrive data by key (in case of not knowing it beforehand). Any\n scripts that are in settings.GLOBAL_SCRIPTS that are not found\n will be recreated on-demand.\n\n Args:\n key (str): The name of the script.\n default (any, optional): Value to return if key is not found\n at all on this container (i.e it cannot be loaded at all).\n\n Returns:\n any (any): The data loaded on this container.\n \"\"\"\n res = self._get_scripts(key)\n if not res:\n if key in self.loaded_data:\n # recreate if we have the info\n return self._load_script(key) or default\n return default\n return res\n\n def all(self):\n \"\"\"\n Get all global scripts. Note that this will not auto-start\n scripts defined in settings.\n\n Returns:\n scripts (list): All global script objects stored on the container.\n\n \"\"\"\n self.typeclass_storage = None\n self.load_data()\n for key in self.loaded_data:\n self._load_script(key)\n return self._get_scripts(None)\n\n\n# Create all singletons\n\nGLOBAL_SCRIPTS = GlobalScriptContainer()\nOPTION_CLASSES = OptionContainer()\n", "path": "evennia/utils/containers.py"}], "after_files": [{"content": "\"\"\"\nContainers\n\nContainers are storage classes usually initialized from a setting. They\nrepresent Singletons and acts as a convenient place to find resources (\navailable as properties on the singleton)\n\nevennia.GLOBAL_SCRIPTS\nevennia.OPTION_CLASSES\n\n\"\"\"\n\n\nfrom pickle import dumps\nfrom django.conf import settings\nfrom evennia.utils.utils import class_from_module, callables_from_module\nfrom evennia.utils import logger\n\n\nSCRIPTDB = None\n\n\nclass Container:\n \"\"\"\n Base container class. A container is simply a storage object whose\n properties can be acquired as a property on it. This is generally\n considered a read-only affair.\n\n The container is initialized by a list of modules containing callables.\n\n \"\"\"\n\n storage_modules = []\n\n def __init__(self):\n \"\"\"\n Read data from module.\n\n \"\"\"\n self.loaded_data = None\n\n def load_data(self):\n \"\"\"\n Delayed import to avoid eventual circular imports from inside\n the storage modules.\n\n \"\"\"\n if self.loaded_data is None:\n self.loaded_data = {}\n for module in self.storage_modules:\n self.loaded_data.update(callables_from_module(module))\n\n def __getattr__(self, key):\n return self.get(key)\n\n def get(self, key, default=None):\n \"\"\"\n Retrive data by key (in case of not knowing it beforehand).\n\n Args:\n key (str): The name of the script.\n default (any, optional): Value to return if key is not found.\n\n Returns:\n any (any): The data loaded on this container.\n\n \"\"\"\n self.load_data()\n return self.loaded_data.get(key, default)\n\n def all(self):\n \"\"\"\n Get all stored data\n\n Returns:\n scripts (list): All global script objects stored on the container.\n\n \"\"\"\n self.load_data()\n return list(self.loaded_data.values())\n\n\nclass OptionContainer(Container):\n \"\"\"\n Loads and stores the final list of OPTION CLASSES.\n\n Can access these as properties or dictionary-contents.\n \"\"\"\n\n storage_modules = settings.OPTION_CLASS_MODULES\n\n\nclass GlobalScriptContainer(Container):\n \"\"\"\n Simple Handler object loaded by the Evennia API to contain and manage a\n game's Global Scripts. This will list global Scripts created on their own\n but will also auto-(re)create scripts defined in `settings.GLOBAL_SCRIPTS`.\n\n Example:\n import evennia\n evennia.GLOBAL_SCRIPTS.scriptname\n\n Note:\n This does not use much of the BaseContainer since it's not loading\n callables from settings but a custom dict of tuples.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Note: We must delay loading of typeclasses since this module may get\n initialized before Scripts are actually initialized.\n\n \"\"\"\n self.typeclass_storage = None\n self.loaded_data = {\n key: {} if data is None else data for key, data in settings.GLOBAL_SCRIPTS.items()\n }\n\n def _get_scripts(self, key=None, default=None):\n global SCRIPTDB\n if not SCRIPTDB:\n from evennia.scripts.models import ScriptDB as SCRIPTDB\n if key:\n try:\n return SCRIPTDB.objects.get(db_key__exact=key, db_obj__isnull=True)\n except SCRIPTDB.DoesNotExist:\n return default\n else:\n return SCRIPTDB.objects.filter(db_obj__isnull=True)\n\n def _load_script(self, key):\n self.load_data()\n\n typeclass = self.typeclass_storage[key]\n script = typeclass.objects.filter(\n db_key=key, db_account__isnull=True, db_obj__isnull=True\n ).first()\n\n kwargs = {**self.loaded_data[key]}\n kwargs[\"key\"] = key\n kwargs[\"persistent\"] = kwargs.get(\"persistent\", True)\n\n compare_hash = str(dumps(kwargs, protocol=4))\n\n if script:\n script_hash = script.attributes.get(\"global_script_settings\", category=\"settings_hash\")\n if script_hash is None:\n # legacy - store the hash anew and assume no change\n script.attributes.add(\n \"global_script_settings\", compare_hash, category=\"settings_hash\"\n )\n elif script_hash != compare_hash:\n # wipe the old version and create anew\n logger.log_info(f\"GLOBAL_SCRIPTS: Settings changed for {key} ({typeclass}).\")\n script.stop()\n script.delete()\n script = None\n\n if not script:\n logger.log_info(f\"GLOBAL_SCRIPTS: (Re)creating {key} ({typeclass}).\")\n\n script, errors = typeclass.create(**kwargs)\n if errors:\n logger.log_err(\"\\n\".join(errors))\n return None\n\n # store a hash representation of the setup\n script.attributes.add(\"_global_script_settings\", compare_hash, category=\"settings_hash\")\n script.start()\n\n return script\n\n def start(self):\n \"\"\"\n Called last in evennia.__init__ to initialize the container late\n (after script typeclasses have finished loading).\n\n We include all global scripts in the handler and\n make sure to auto-load time-based scripts.\n\n \"\"\"\n # populate self.typeclass_storage\n self.load_data()\n\n # start registered scripts\n for key in self.loaded_data:\n self._load_script(key)\n\n def load_data(self):\n \"\"\"\n This delayed import avoids trying to load Scripts before they are\n initialized.\n\n \"\"\"\n if self.typeclass_storage is None:\n self.typeclass_storage = {}\n for key, data in self.loaded_data.items():\n try:\n typeclass = data.get(\"typeclass\", settings.BASE_SCRIPT_TYPECLASS)\n self.typeclass_storage[key] = class_from_module(typeclass)\n except Exception:\n logger.log_trace(\n f\"GlobalScriptContainer could not start import global script {key}.\"\n )\n\n def get(self, key, default=None):\n \"\"\"\n Retrive data by key (in case of not knowing it beforehand). Any\n scripts that are in settings.GLOBAL_SCRIPTS that are not found\n will be recreated on-demand.\n\n Args:\n key (str): The name of the script.\n default (any, optional): Value to return if key is not found\n at all on this container (i.e it cannot be loaded at all).\n\n Returns:\n any (any): The data loaded on this container.\n \"\"\"\n res = self._get_scripts(key)\n if not res:\n if key in self.loaded_data:\n # recreate if we have the info\n return self._load_script(key) or default\n return default\n return res\n\n def all(self):\n \"\"\"\n Get all global scripts. Note that this will not auto-start\n scripts defined in settings.\n\n Returns:\n scripts (list): All global script objects stored on the container.\n\n \"\"\"\n self.typeclass_storage = None\n self.load_data()\n for key in self.loaded_data:\n self._load_script(key)\n return self._get_scripts(None)\n\n\n# Create all singletons\n\nGLOBAL_SCRIPTS = GlobalScriptContainer()\nOPTION_CLASSES = OptionContainer()\n", "path": "evennia/utils/containers.py"}]}
| 2,723 | 97 |
gh_patches_debug_7077
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-21291
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cholesky_solve
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/tensor/linalg.py`
Content:
```
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from ivy.functional.frontends.paddle import promote_types_of_paddle_inputs
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @with_supported_dtypes(
11 {"2.5.1 and below": ("float32", "float64", "int32", "int64")}, "paddle"
12 )
13 @to_ivy_arrays_and_back
14 def cross(x, y, /, *, axis=9, name=None):
15 x, y = promote_types_of_paddle_inputs(x, y)
16 return ivy.cross(x, y, axis=axis)
17
18
19 # matmul
20 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
21 @to_ivy_arrays_and_back
22 def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
23 x, y = promote_types_of_paddle_inputs(x, y)
24 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
25
26
27 # norm
28 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
29 @to_ivy_arrays_and_back
30 def norm(x, p="fro", axis=None, keepdim=False, name=None):
31 if axis is None and p is not None:
32 if p == "fro":
33 p = 2
34 ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)
35 if keepdim:
36 ret = ret.reshape([1] * len(x.shape))
37 if len(ret.shape) == 0:
38 return ivy.array([ret])
39 return ret
40
41 if isinstance(axis, tuple):
42 axis = list(axis)
43 if isinstance(axis, list) and len(axis) == 1:
44 axis = axis[0]
45
46 if isinstance(axis, int):
47 if p == "fro":
48 p = 2
49 if p in [0, 1, 2, ivy.inf, -ivy.inf]:
50 ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)
51 elif isinstance(p, (int, float)):
52 ret = ivy.pow(
53 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
54 float(1.0 / p),
55 )
56
57 elif isinstance(axis, list) and len(axis) == 2:
58 if p == 0:
59 raise ValueError
60 elif p == 1:
61 ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)
62 elif p == 2 or p == "fro":
63 ret = ivy.matrix_norm(x, ord="fro", axis=axis, keepdims=keepdim)
64 elif p == ivy.inf:
65 ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)
66 elif p == -ivy.inf:
67 ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)
68 elif isinstance(p, (int, float)) and p > 0:
69 ret = ivy.pow(
70 ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),
71 float(1.0 / p),
72 )
73 else:
74 raise ValueError
75
76 else:
77 raise ValueError
78
79 if len(ret.shape) == 0:
80 ret = ivy.array(
81 [ret]
82 ) # this is done so as to match shape of output from paddle
83 return ret
84
85
86 # eig
87 @to_ivy_arrays_and_back
88 def eig(x, name=None):
89 return ivy.eig(x)
90
91
92 # eigvals
93 @to_ivy_arrays_and_back
94 def eigvals(x, name=None):
95 return ivy.eigvals(x)
96
97
98 # eigvalsh
99 @to_ivy_arrays_and_back
100 def eigvalsh(x, UPLO="L", name=None):
101 return ivy.eigvalsh(x, UPLO=UPLO)
102
103
104 # eigh
105 @to_ivy_arrays_and_back
106 def eigh(x, UPLO="L", name=None):
107 return ivy.eigh(x, UPLO=UPLO)
108
109
110 # pinv
111 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
112 @to_ivy_arrays_and_back
113 def pinv(x, rcond=1e-15, hermitian=False, name=None):
114 # TODO: Add hermitian functionality
115 return ivy.pinv(x, rtol=rcond)
116
117
118 # solve
119 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
120 @to_ivy_arrays_and_back
121 def solve(x1, x2, name=None):
122 return ivy.solve(x1, x2)
123
124
125 # cholesky
126 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
127 @to_ivy_arrays_and_back
128 def cholesky(x, /, *, upper=False, name=None):
129 return ivy.cholesky(x, upper=upper)
130
131
132 # bmm
133 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
134 @to_ivy_arrays_and_back
135 def bmm(x, y, transpose_x=False, transpose_y=False, name=None):
136 if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:
137 raise RuntimeError("input must be 3D matrices")
138 x, y = promote_types_of_paddle_inputs(x, y)
139 return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
140
141
142 # matrix_power
143 @with_unsupported_dtypes({"2.5.1 and below": ("float16", "bfloat16")}, "paddle")
144 @to_ivy_arrays_and_back
145 def matrix_power(x, n, name=None):
146 return ivy.matrix_power(x, n)
147
148
149 # cond
150 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
151 @to_ivy_arrays_and_back
152 def cond(x, p=None, name=None):
153 ret = ivy.cond(x, p=p, out=name)
154 if ret.shape == ():
155 ret = ret.reshape((1,))
156 return ret
157
158
159 # dot
160 @with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
161 @to_ivy_arrays_and_back
162 def dot(x, y, name=None):
163 x, y = promote_types_of_paddle_inputs(x, y)
164 out = ivy.multiply(x, y)
165 return ivy.sum(out, axis=ivy.get_num_dims(x) - 1, keepdims=False)
166
167
168 # transpose
169 @with_unsupported_dtypes({"2.5.1 and below": ("uint8", "int8", "int16")}, "paddle")
170 @to_ivy_arrays_and_back
171 def transpose(x, perm, name=None):
172 return ivy.permute_dims(x, axes=perm)
173
174
175 @with_supported_dtypes({"2.4.1 and above": ("int64",)}, "paddle")
176 @to_ivy_arrays_and_back
177 def bincount(x, weights=None, minlength=0, name=None):
178 return ivy.bincount(x, weights=weights, minlength=minlength)
179
180
181 @with_supported_dtypes({"2.4.1 and above": ("float64", "float32")}, "paddle")
182 @to_ivy_arrays_and_back
183 def dist(x, y, p=2):
184 ret = ivy.vector_norm(ivy.subtract(x, y), ord=p)
185 return ivy.reshape(ret, (1,))
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py
--- a/ivy/functional/frontends/paddle/tensor/linalg.py
+++ b/ivy/functional/frontends/paddle/tensor/linalg.py
@@ -122,6 +122,16 @@
return ivy.solve(x1, x2)
+# cholesky_solve
+@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
+@to_ivy_arrays_and_back
+def cholesky_solve(x, y, /, *, upper=False, name=None):
+ if upper:
+ y = ivy.matrix_transpose(y)
+ Y = ivy.solve(y, x)
+ return ivy.solve(ivy.matrix_transpose(y), Y)
+
+
# cholesky
@with_supported_dtypes({"2.5.1 and below": ("float32", "float64")}, "paddle")
@to_ivy_arrays_and_back
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/linalg.py b/ivy/functional/frontends/paddle/tensor/linalg.py\n--- a/ivy/functional/frontends/paddle/tensor/linalg.py\n+++ b/ivy/functional/frontends/paddle/tensor/linalg.py\n@@ -122,6 +122,16 @@\n return ivy.solve(x1, x2)\n \n \n+# cholesky_solve\n+@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def cholesky_solve(x, y, /, *, upper=False, name=None):\n+ if upper:\n+ y = ivy.matrix_transpose(y)\n+ Y = ivy.solve(y, x)\n+ return ivy.solve(ivy.matrix_transpose(y), Y)\n+\n+\n # cholesky\n @with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n @to_ivy_arrays_and_back\n", "issue": " cholesky_solve\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# solve\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef solve(x1, x2, name=None):\n return ivy.solve(x1, x2)\n\n\n# cholesky\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n\n\n# cond\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cond(x, p=None, name=None):\n ret = ivy.cond(x, p=p, out=name)\n if ret.shape == ():\n ret = ret.reshape((1,))\n return ret\n\n\n# dot\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dot(x, y, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n out = ivy.multiply(x, y)\n return ivy.sum(out, axis=ivy.get_num_dims(x) - 1, keepdims=False)\n\n\n# transpose\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"uint8\", \"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef transpose(x, perm, name=None):\n return ivy.permute_dims(x, axes=perm)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"int64\",)}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bincount(x, weights=None, minlength=0, name=None):\n return ivy.bincount(x, weights=weights, minlength=minlength)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"float64\", \"float32\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dist(x, y, p=2):\n ret = ivy.vector_norm(ivy.subtract(x, y), ord=p)\n return ivy.reshape(ret, (1,))\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle import promote_types_of_paddle_inputs\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef cross(x, y, /, *, axis=9, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.cross(x, y, axis=axis)\n\n\n# matmul\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matmul(x, y, transpose_x=False, transpose_y=False, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# norm\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef norm(x, p=\"fro\", axis=None, keepdim=False, name=None):\n if axis is None and p is not None:\n if p == \"fro\":\n p = 2\n ret = ivy.vector_norm(x.flatten(), ord=p, axis=-1)\n if keepdim:\n ret = ret.reshape([1] * len(x.shape))\n if len(ret.shape) == 0:\n return ivy.array([ret])\n return ret\n\n if isinstance(axis, tuple):\n axis = list(axis)\n if isinstance(axis, list) and len(axis) == 1:\n axis = axis[0]\n\n if isinstance(axis, int):\n if p == \"fro\":\n p = 2\n if p in [0, 1, 2, ivy.inf, -ivy.inf]:\n ret = ivy.vector_norm(x, ord=p, axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)):\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n\n elif isinstance(axis, list) and len(axis) == 2:\n if p == 0:\n raise ValueError\n elif p == 1:\n ret = ivy.sum(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == 2 or p == \"fro\":\n ret = ivy.matrix_norm(x, ord=\"fro\", axis=axis, keepdims=keepdim)\n elif p == ivy.inf:\n ret = ivy.max(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif p == -ivy.inf:\n ret = ivy.min(ivy.abs(x), axis=axis, keepdims=keepdim)\n elif isinstance(p, (int, float)) and p > 0:\n ret = ivy.pow(\n ivy.sum(ivy.pow(ivy.abs(x), p), axis=axis, keepdims=keepdim),\n float(1.0 / p),\n )\n else:\n raise ValueError\n\n else:\n raise ValueError\n\n if len(ret.shape) == 0:\n ret = ivy.array(\n [ret]\n ) # this is done so as to match shape of output from paddle\n return ret\n\n\n# eig\n@to_ivy_arrays_and_back\ndef eig(x, name=None):\n return ivy.eig(x)\n\n\n# eigvals\n@to_ivy_arrays_and_back\ndef eigvals(x, name=None):\n return ivy.eigvals(x)\n\n\n# eigvalsh\n@to_ivy_arrays_and_back\ndef eigvalsh(x, UPLO=\"L\", name=None):\n return ivy.eigvalsh(x, UPLO=UPLO)\n\n\n# eigh\n@to_ivy_arrays_and_back\ndef eigh(x, UPLO=\"L\", name=None):\n return ivy.eigh(x, UPLO=UPLO)\n\n\n# pinv\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pinv(x, rcond=1e-15, hermitian=False, name=None):\n # TODO: Add hermitian functionality\n return ivy.pinv(x, rtol=rcond)\n\n\n# solve\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef solve(x1, x2, name=None):\n return ivy.solve(x1, x2)\n\n\n# cholesky_solve\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky_solve(x, y, /, *, upper=False, name=None):\n if upper:\n y = ivy.matrix_transpose(y)\n Y = ivy.solve(y, x)\n return ivy.solve(ivy.matrix_transpose(y), Y)\n\n\n# cholesky\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cholesky(x, /, *, upper=False, name=None):\n return ivy.cholesky(x, upper=upper)\n\n\n# bmm\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bmm(x, y, transpose_x=False, transpose_y=False, name=None):\n if len(ivy.shape(x)) != 3 or len(ivy.shape(y)) != 3:\n raise RuntimeError(\"input must be 3D matrices\")\n x, y = promote_types_of_paddle_inputs(x, y)\n return ivy.matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)\n\n\n# matrix_power\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef matrix_power(x, n, name=None):\n return ivy.matrix_power(x, n)\n\n\n# cond\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cond(x, p=None, name=None):\n ret = ivy.cond(x, p=p, out=name)\n if ret.shape == ():\n ret = ret.reshape((1,))\n return ret\n\n\n# dot\n@with_supported_dtypes({\"2.5.1 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dot(x, y, name=None):\n x, y = promote_types_of_paddle_inputs(x, y)\n out = ivy.multiply(x, y)\n return ivy.sum(out, axis=ivy.get_num_dims(x) - 1, keepdims=False)\n\n\n# transpose\n@with_unsupported_dtypes({\"2.5.1 and below\": (\"uint8\", \"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef transpose(x, perm, name=None):\n return ivy.permute_dims(x, axes=perm)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"int64\",)}, \"paddle\")\n@to_ivy_arrays_and_back\ndef bincount(x, weights=None, minlength=0, name=None):\n return ivy.bincount(x, weights=weights, minlength=minlength)\n\n\n@with_supported_dtypes({\"2.4.1 and above\": (\"float64\", \"float32\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef dist(x, y, p=2):\n ret = ivy.vector_norm(ivy.subtract(x, y), ord=p)\n return ivy.reshape(ret, (1,))\n", "path": "ivy/functional/frontends/paddle/tensor/linalg.py"}]}
| 2,504 | 241 |
gh_patches_debug_26734
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-4268
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kivy/examples/android/takepicture/ fails on Android
Example cloned form GIT build with:
buildozer android debug
deployed to Android 4.4.4 crashes, from adb logcat output teh following lines seem to be relevant:
I/python (25790): /data/data/org.test.takepicture/files/lib/python2.7/site-packages/kivy/core/image/img_pygame.py:13: RuntimeWarning: import cdrom: No module named cdrom
I/python (25790): Traceback (most recent call last):
I/python (25790): File "/home/jb/python/mread/.buildozer/android/app/main.py", line 32, in <module>
I/python (25790): ImportError: No module named PIL
I/python (25790): Python for android ended.
Second line indicates problem with image library, unfortunately I have no clue how to fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/android/takepicture/main.py`
Content:
```
1 '''
2 Take picture
3 ============
4
5 .. author:: Mathieu Virbel <[email protected]>
6
7 Little example to demonstrate how to start an Intent, and get the result.
8 When you use the Android.startActivityForResult(), the result will be dispatched
9 into onActivityResult. You can catch the event with the android.activity API
10 from python-for-android project.
11
12 If you want to compile it, don't forget to add the CAMERA permission::
13
14 ./build.py --name 'TakePicture' --package org.test.takepicture \
15 --permission CAMERA --version 1 \
16 --private ~/code/kivy/examples/android/takepicture \
17 debug installd
18
19 '''
20
21 __version__ = '0.1'
22
23 from kivy.app import App
24 from os.path import exists
25 from jnius import autoclass, cast
26 from android import activity
27 from functools import partial
28 from kivy.clock import Clock
29 from kivy.uix.scatter import Scatter
30 from kivy.properties import StringProperty
31
32 from PIL import Image
33
34 Intent = autoclass('android.content.Intent')
35 PythonActivity = autoclass('org.renpy.android.PythonActivity')
36 MediaStore = autoclass('android.provider.MediaStore')
37 Uri = autoclass('android.net.Uri')
38 Environment = autoclass('android.os.Environment')
39
40
41 class Picture(Scatter):
42 source = StringProperty(None)
43
44
45 class TakePictureApp(App):
46 def build(self):
47 self.index = 0
48 activity.bind(on_activity_result=self.on_activity_result)
49
50 def get_filename(self):
51 while True:
52 self.index += 1
53 fn = (Environment.getExternalStorageDirectory().getPath() +
54 '/takepicture{}.jpg'.format(self.index))
55 if not exists(fn):
56 return fn
57
58 def take_picture(self):
59 intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
60 self.last_fn = self.get_filename()
61 self.uri = Uri.parse('file://' + self.last_fn)
62 self.uri = cast('android.os.Parcelable', self.uri)
63 intent.putExtra(MediaStore.EXTRA_OUTPUT, self.uri)
64 PythonActivity.mActivity.startActivityForResult(intent, 0x123)
65
66 def on_activity_result(self, requestCode, resultCode, intent):
67 if requestCode == 0x123:
68 Clock.schedule_once(partial(self.add_picture, self.last_fn), 0)
69
70 def add_picture(self, fn, *args):
71 im = Image.open(fn)
72 width, height = im.size
73 im.thumbnail((width / 4, height / 4), Image.ANTIALIAS)
74 im.save(fn, quality=95)
75 self.root.add_widget(Picture(source=fn, center=self.root.center))
76
77 def on_pause(self):
78 return True
79
80 TakePictureApp().run()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/android/takepicture/main.py b/examples/android/takepicture/main.py
--- a/examples/android/takepicture/main.py
+++ b/examples/android/takepicture/main.py
@@ -23,7 +23,7 @@
from kivy.app import App
from os.path import exists
from jnius import autoclass, cast
-from android import activity
+from android import activity, mActivity
from functools import partial
from kivy.clock import Clock
from kivy.uix.scatter import Scatter
@@ -32,7 +32,6 @@
from PIL import Image
Intent = autoclass('android.content.Intent')
-PythonActivity = autoclass('org.renpy.android.PythonActivity')
MediaStore = autoclass('android.provider.MediaStore')
Uri = autoclass('android.net.Uri')
Environment = autoclass('android.os.Environment')
@@ -61,7 +60,7 @@
self.uri = Uri.parse('file://' + self.last_fn)
self.uri = cast('android.os.Parcelable', self.uri)
intent.putExtra(MediaStore.EXTRA_OUTPUT, self.uri)
- PythonActivity.mActivity.startActivityForResult(intent, 0x123)
+ mActivity.startActivityForResult(intent, 0x123)
def on_activity_result(self, requestCode, resultCode, intent):
if requestCode == 0x123:
|
{"golden_diff": "diff --git a/examples/android/takepicture/main.py b/examples/android/takepicture/main.py\n--- a/examples/android/takepicture/main.py\n+++ b/examples/android/takepicture/main.py\n@@ -23,7 +23,7 @@\n from kivy.app import App\n from os.path import exists\n from jnius import autoclass, cast\n-from android import activity\n+from android import activity, mActivity\n from functools import partial\n from kivy.clock import Clock\n from kivy.uix.scatter import Scatter\n@@ -32,7 +32,6 @@\n from PIL import Image\n \n Intent = autoclass('android.content.Intent')\n-PythonActivity = autoclass('org.renpy.android.PythonActivity')\n MediaStore = autoclass('android.provider.MediaStore')\n Uri = autoclass('android.net.Uri')\n Environment = autoclass('android.os.Environment')\n@@ -61,7 +60,7 @@\n self.uri = Uri.parse('file://' + self.last_fn)\n self.uri = cast('android.os.Parcelable', self.uri)\n intent.putExtra(MediaStore.EXTRA_OUTPUT, self.uri)\n- PythonActivity.mActivity.startActivityForResult(intent, 0x123)\n+ mActivity.startActivityForResult(intent, 0x123)\n \n def on_activity_result(self, requestCode, resultCode, intent):\n if requestCode == 0x123:\n", "issue": "kivy/examples/android/takepicture/ fails on Android\nExample cloned form GIT build with:\nbuildozer android debug\ndeployed to Android 4.4.4 crashes, from adb logcat output teh following lines seem to be relevant:\n\nI/python (25790): /data/data/org.test.takepicture/files/lib/python2.7/site-packages/kivy/core/image/img_pygame.py:13: RuntimeWarning: import cdrom: No module named cdrom\n\nI/python (25790): Traceback (most recent call last):\nI/python (25790): File \"/home/jb/python/mread/.buildozer/android/app/main.py\", line 32, in <module>\nI/python (25790): ImportError: No module named PIL\nI/python (25790): Python for android ended.\n\nSecond line indicates problem with image library, unfortunately I have no clue how to fix it.\n\n", "before_files": [{"content": "'''\nTake picture\n============\n\n.. author:: Mathieu Virbel <[email protected]>\n\nLittle example to demonstrate how to start an Intent, and get the result.\nWhen you use the Android.startActivityForResult(), the result will be dispatched\ninto onActivityResult. You can catch the event with the android.activity API\nfrom python-for-android project.\n\nIf you want to compile it, don't forget to add the CAMERA permission::\n\n ./build.py --name 'TakePicture' --package org.test.takepicture \\\n --permission CAMERA --version 1 \\\n --private ~/code/kivy/examples/android/takepicture \\\n debug installd\n\n'''\n\n__version__ = '0.1'\n\nfrom kivy.app import App\nfrom os.path import exists\nfrom jnius import autoclass, cast\nfrom android import activity\nfrom functools import partial\nfrom kivy.clock import Clock\nfrom kivy.uix.scatter import Scatter\nfrom kivy.properties import StringProperty\n\nfrom PIL import Image\n\nIntent = autoclass('android.content.Intent')\nPythonActivity = autoclass('org.renpy.android.PythonActivity')\nMediaStore = autoclass('android.provider.MediaStore')\nUri = autoclass('android.net.Uri')\nEnvironment = autoclass('android.os.Environment')\n\n\nclass Picture(Scatter):\n source = StringProperty(None)\n\n\nclass TakePictureApp(App):\n def build(self):\n self.index = 0\n activity.bind(on_activity_result=self.on_activity_result)\n\n def get_filename(self):\n while True:\n self.index += 1\n fn = (Environment.getExternalStorageDirectory().getPath() +\n '/takepicture{}.jpg'.format(self.index))\n if not exists(fn):\n return fn\n\n def take_picture(self):\n intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)\n self.last_fn = self.get_filename()\n self.uri = Uri.parse('file://' + self.last_fn)\n self.uri = cast('android.os.Parcelable', self.uri)\n intent.putExtra(MediaStore.EXTRA_OUTPUT, self.uri)\n PythonActivity.mActivity.startActivityForResult(intent, 0x123)\n\n def on_activity_result(self, requestCode, resultCode, intent):\n if requestCode == 0x123:\n Clock.schedule_once(partial(self.add_picture, self.last_fn), 0)\n\n def add_picture(self, fn, *args):\n im = Image.open(fn)\n width, height = im.size\n im.thumbnail((width / 4, height / 4), Image.ANTIALIAS)\n im.save(fn, quality=95)\n self.root.add_widget(Picture(source=fn, center=self.root.center))\n\n def on_pause(self):\n return True\n\nTakePictureApp().run()\n", "path": "examples/android/takepicture/main.py"}], "after_files": [{"content": "'''\nTake picture\n============\n\n.. author:: Mathieu Virbel <[email protected]>\n\nLittle example to demonstrate how to start an Intent, and get the result.\nWhen you use the Android.startActivityForResult(), the result will be dispatched\ninto onActivityResult. You can catch the event with the android.activity API\nfrom python-for-android project.\n\nIf you want to compile it, don't forget to add the CAMERA permission::\n\n ./build.py --name 'TakePicture' --package org.test.takepicture \\\n --permission CAMERA --version 1 \\\n --private ~/code/kivy/examples/android/takepicture \\\n debug installd\n\n'''\n\n__version__ = '0.1'\n\nfrom kivy.app import App\nfrom os.path import exists\nfrom jnius import autoclass, cast\nfrom android import activity, mActivity\nfrom functools import partial\nfrom kivy.clock import Clock\nfrom kivy.uix.scatter import Scatter\nfrom kivy.properties import StringProperty\n\nfrom PIL import Image\n\nIntent = autoclass('android.content.Intent')\nMediaStore = autoclass('android.provider.MediaStore')\nUri = autoclass('android.net.Uri')\nEnvironment = autoclass('android.os.Environment')\n\n\nclass Picture(Scatter):\n source = StringProperty(None)\n\n\nclass TakePictureApp(App):\n def build(self):\n self.index = 0\n activity.bind(on_activity_result=self.on_activity_result)\n\n def get_filename(self):\n while True:\n self.index += 1\n fn = (Environment.getExternalStorageDirectory().getPath() +\n '/takepicture{}.jpg'.format(self.index))\n if not exists(fn):\n return fn\n\n def take_picture(self):\n intent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)\n self.last_fn = self.get_filename()\n self.uri = Uri.parse('file://' + self.last_fn)\n self.uri = cast('android.os.Parcelable', self.uri)\n intent.putExtra(MediaStore.EXTRA_OUTPUT, self.uri)\n mActivity.startActivityForResult(intent, 0x123)\n\n def on_activity_result(self, requestCode, resultCode, intent):\n if requestCode == 0x123:\n Clock.schedule_once(partial(self.add_picture, self.last_fn), 0)\n\n def add_picture(self, fn, *args):\n im = Image.open(fn)\n width, height = im.size\n im.thumbnail((width / 4, height / 4), Image.ANTIALIAS)\n im.save(fn, quality=95)\n self.root.add_widget(Picture(source=fn, center=self.root.center))\n\n def on_pause(self):\n return True\n\nTakePictureApp().run()\n", "path": "examples/android/takepicture/main.py"}]}
| 1,194 | 286 |
gh_patches_debug_6843
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__models-211
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Encounter exception when running tools/compute_mean_std.py
```
Traceback (most recent call last):
File "tools/compute_mean_std.py", line 64, in <module>
main()
File "tools/compute_mean_std.py", line 59, in main
num_samples=args.num_samples)
File "/home/disk1/yangyaming/workspace/paddle/ds2/mandarin_unicom/default_conf/tools/../data_utils/normalizer.py", line 46, in __init__
self._compute_mean_std(manifest_path, featurize_func, num_samples)
File "/home/disk1/yangyaming/workspace/paddle/ds2/mandarin_unicom/default_conf/tools/../data_utils/normalizer.py", line 84, in _compute_mean_std
AudioSegment.from_file(instance["audio_filepath"])))
File "tools/compute_mean_std.py", line 53, in augment_and_featurize
return audio_featurizer.featurize(audio_segment)
File "/home/disk1/yangyaming/workspace/paddle/ds2/mandarin_unicom/default_conf/tools/../data_utils/featurizer/audio_featurizer.py", line 79, in featurize
allow_upsampling)):
NameError: global name 'allow_upsampling' is not defined
```
Look like a typo problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deep_speech_2/data_utils/featurizer/audio_featurizer.py`
Content:
```
1 """Contains the audio featurizer class."""
2 from __future__ import absolute_import
3 from __future__ import division
4 from __future__ import print_function
5
6 import numpy as np
7 from data_utils import utils
8 from data_utils.audio import AudioSegment
9 from python_speech_features import mfcc
10 from python_speech_features import delta
11
12
13 class AudioFeaturizer(object):
14 """Audio featurizer, for extracting features from audio contents of
15 AudioSegment or SpeechSegment.
16
17 Currently, it supports feature types of linear spectrogram and mfcc.
18
19 :param specgram_type: Specgram feature type. Options: 'linear'.
20 :type specgram_type: str
21 :param stride_ms: Striding size (in milliseconds) for generating frames.
22 :type stride_ms: float
23 :param window_ms: Window size (in milliseconds) for generating frames.
24 :type window_ms: float
25 :param max_freq: When specgram_type is 'linear', only FFT bins
26 corresponding to frequencies between [0, max_freq] are
27 returned; when specgram_type is 'mfcc', max_feq is the
28 highest band edge of mel filters.
29 :types max_freq: None|float
30 :param target_sample_rate: Audio are resampled (if upsampling or
31 downsampling is allowed) to this before
32 extracting spectrogram features.
33 :type target_sample_rate: float
34 :param use_dB_normalization: Whether to normalize the audio to a certain
35 decibels before extracting the features.
36 :type use_dB_normalization: bool
37 :param target_dB: Target audio decibels for normalization.
38 :type target_dB: float
39 """
40
41 def __init__(self,
42 specgram_type='linear',
43 stride_ms=10.0,
44 window_ms=20.0,
45 max_freq=None,
46 target_sample_rate=16000,
47 use_dB_normalization=True,
48 target_dB=-20):
49 self._specgram_type = specgram_type
50 self._stride_ms = stride_ms
51 self._window_ms = window_ms
52 self._max_freq = max_freq
53 self._target_sample_rate = target_sample_rate
54 self._use_dB_normalization = use_dB_normalization
55 self._target_dB = target_dB
56
57 def featurize(self,
58 audio_segment,
59 allow_downsampling=True,
60 allow_upsamplling=True):
61 """Extract audio features from AudioSegment or SpeechSegment.
62
63 :param audio_segment: Audio/speech segment to extract features from.
64 :type audio_segment: AudioSegment|SpeechSegment
65 :param allow_downsampling: Whether to allow audio downsampling before
66 featurizing.
67 :type allow_downsampling: bool
68 :param allow_upsampling: Whether to allow audio upsampling before
69 featurizing.
70 :type allow_upsampling: bool
71 :return: Spectrogram audio feature in 2darray.
72 :rtype: ndarray
73 :raises ValueError: If audio sample rate is not supported.
74 """
75 # upsampling or downsampling
76 if ((audio_segment.sample_rate > self._target_sample_rate and
77 allow_downsampling) or
78 (audio_segment.sample_rate < self._target_sample_rate and
79 allow_upsampling)):
80 audio_segment.resample(self._target_sample_rate)
81 if audio_segment.sample_rate != self._target_sample_rate:
82 raise ValueError("Audio sample rate is not supported. "
83 "Turn allow_downsampling or allow up_sampling on.")
84 # decibel normalization
85 if self._use_dB_normalization:
86 audio_segment.normalize(target_db=self._target_dB)
87 # extract spectrogram
88 return self._compute_specgram(audio_segment.samples,
89 audio_segment.sample_rate)
90
91 def _compute_specgram(self, samples, sample_rate):
92 """Extract various audio features."""
93 if self._specgram_type == 'linear':
94 return self._compute_linear_specgram(
95 samples, sample_rate, self._stride_ms, self._window_ms,
96 self._max_freq)
97 elif self._specgram_type == 'mfcc':
98 return self._compute_mfcc(samples, sample_rate, self._stride_ms,
99 self._window_ms, self._max_freq)
100 else:
101 raise ValueError("Unknown specgram_type %s. "
102 "Supported values: linear." % self._specgram_type)
103
104 def _compute_linear_specgram(self,
105 samples,
106 sample_rate,
107 stride_ms=10.0,
108 window_ms=20.0,
109 max_freq=None,
110 eps=1e-14):
111 """Compute the linear spectrogram from FFT energy."""
112 if max_freq is None:
113 max_freq = sample_rate / 2
114 if max_freq > sample_rate / 2:
115 raise ValueError("max_freq must be greater than half of "
116 "sample rate.")
117 if stride_ms > window_ms:
118 raise ValueError("Stride size must not be greater than "
119 "window size.")
120 stride_size = int(0.001 * sample_rate * stride_ms)
121 window_size = int(0.001 * sample_rate * window_ms)
122 specgram, freqs = self._specgram_real(
123 samples,
124 window_size=window_size,
125 stride_size=stride_size,
126 sample_rate=sample_rate)
127 ind = np.where(freqs <= max_freq)[0][-1] + 1
128 return np.log(specgram[:ind, :] + eps)
129
130 def _specgram_real(self, samples, window_size, stride_size, sample_rate):
131 """Compute the spectrogram for samples from a real signal."""
132 # extract strided windows
133 truncate_size = (len(samples) - window_size) % stride_size
134 samples = samples[:len(samples) - truncate_size]
135 nshape = (window_size, (len(samples) - window_size) // stride_size + 1)
136 nstrides = (samples.strides[0], samples.strides[0] * stride_size)
137 windows = np.lib.stride_tricks.as_strided(
138 samples, shape=nshape, strides=nstrides)
139 assert np.all(
140 windows[:, 1] == samples[stride_size:(stride_size + window_size)])
141 # window weighting, squared Fast Fourier Transform (fft), scaling
142 weighting = np.hanning(window_size)[:, None]
143 fft = np.fft.rfft(windows * weighting, axis=0)
144 fft = np.absolute(fft)**2
145 scale = np.sum(weighting**2) * sample_rate
146 fft[1:-1, :] *= (2.0 / scale)
147 fft[(0, -1), :] /= scale
148 # prepare fft frequency list
149 freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])
150 return fft, freqs
151
152 def _compute_mfcc(self,
153 samples,
154 sample_rate,
155 stride_ms=10.0,
156 window_ms=20.0,
157 max_freq=None):
158 """Compute mfcc from samples."""
159 if max_freq is None:
160 max_freq = sample_rate / 2
161 if max_freq > sample_rate / 2:
162 raise ValueError("max_freq must not be greater than half of "
163 "sample rate.")
164 if stride_ms > window_ms:
165 raise ValueError("Stride size must not be greater than "
166 "window size.")
167 # compute the 13 cepstral coefficients, and the first one is replaced
168 # by log(frame energy)
169 mfcc_feat = mfcc(
170 signal=samples,
171 samplerate=sample_rate,
172 winlen=0.001 * window_ms,
173 winstep=0.001 * stride_ms,
174 highfreq=max_freq)
175 # Deltas
176 d_mfcc_feat = delta(mfcc_feat, 2)
177 # Deltas-Deltas
178 dd_mfcc_feat = delta(d_mfcc_feat, 2)
179 # transpose
180 mfcc_feat = np.transpose(mfcc_feat)
181 d_mfcc_feat = np.transpose(d_mfcc_feat)
182 dd_mfcc_feat = np.transpose(dd_mfcc_feat)
183 # concat above three features
184 concat_mfcc_feat = np.concatenate(
185 (mfcc_feat, d_mfcc_feat, dd_mfcc_feat))
186 return concat_mfcc_feat
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deep_speech_2/data_utils/featurizer/audio_featurizer.py b/deep_speech_2/data_utils/featurizer/audio_featurizer.py
--- a/deep_speech_2/data_utils/featurizer/audio_featurizer.py
+++ b/deep_speech_2/data_utils/featurizer/audio_featurizer.py
@@ -57,7 +57,7 @@
def featurize(self,
audio_segment,
allow_downsampling=True,
- allow_upsamplling=True):
+ allow_upsampling=True):
"""Extract audio features from AudioSegment or SpeechSegment.
:param audio_segment: Audio/speech segment to extract features from.
|
{"golden_diff": "diff --git a/deep_speech_2/data_utils/featurizer/audio_featurizer.py b/deep_speech_2/data_utils/featurizer/audio_featurizer.py\n--- a/deep_speech_2/data_utils/featurizer/audio_featurizer.py\n+++ b/deep_speech_2/data_utils/featurizer/audio_featurizer.py\n@@ -57,7 +57,7 @@\n def featurize(self,\n audio_segment,\n allow_downsampling=True,\n- allow_upsamplling=True):\n+ allow_upsampling=True):\n \"\"\"Extract audio features from AudioSegment or SpeechSegment.\n \n :param audio_segment: Audio/speech segment to extract features from.\n", "issue": "Encounter exception when running tools/compute_mean_std.py\n```\r\nTraceback (most recent call last):\r\n File \"tools/compute_mean_std.py\", line 64, in <module>\r\n main()\r\n File \"tools/compute_mean_std.py\", line 59, in main\r\n num_samples=args.num_samples)\r\n File \"/home/disk1/yangyaming/workspace/paddle/ds2/mandarin_unicom/default_conf/tools/../data_utils/normalizer.py\", line 46, in __init__\r\n self._compute_mean_std(manifest_path, featurize_func, num_samples)\r\n File \"/home/disk1/yangyaming/workspace/paddle/ds2/mandarin_unicom/default_conf/tools/../data_utils/normalizer.py\", line 84, in _compute_mean_std\r\n AudioSegment.from_file(instance[\"audio_filepath\"])))\r\n File \"tools/compute_mean_std.py\", line 53, in augment_and_featurize\r\n return audio_featurizer.featurize(audio_segment)\r\n File \"/home/disk1/yangyaming/workspace/paddle/ds2/mandarin_unicom/default_conf/tools/../data_utils/featurizer/audio_featurizer.py\", line 79, in featurize\r\n allow_upsampling)):\r\nNameError: global name 'allow_upsampling' is not defined\r\n```\r\nLook like a typo problem.\n", "before_files": [{"content": "\"\"\"Contains the audio featurizer class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom data_utils import utils\nfrom data_utils.audio import AudioSegment\nfrom python_speech_features import mfcc\nfrom python_speech_features import delta\n\n\nclass AudioFeaturizer(object):\n \"\"\"Audio featurizer, for extracting features from audio contents of\n AudioSegment or SpeechSegment.\n\n Currently, it supports feature types of linear spectrogram and mfcc.\n\n :param specgram_type: Specgram feature type. Options: 'linear'.\n :type specgram_type: str\n :param stride_ms: Striding size (in milliseconds) for generating frames.\n :type stride_ms: float\n :param window_ms: Window size (in milliseconds) for generating frames.\n :type window_ms: float\n :param max_freq: When specgram_type is 'linear', only FFT bins\n corresponding to frequencies between [0, max_freq] are\n returned; when specgram_type is 'mfcc', max_feq is the\n highest band edge of mel filters.\n :types max_freq: None|float\n :param target_sample_rate: Audio are resampled (if upsampling or\n downsampling is allowed) to this before\n extracting spectrogram features.\n :type target_sample_rate: float\n :param use_dB_normalization: Whether to normalize the audio to a certain\n decibels before extracting the features.\n :type use_dB_normalization: bool\n :param target_dB: Target audio decibels for normalization.\n :type target_dB: float\n \"\"\"\n\n def __init__(self,\n specgram_type='linear',\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n target_sample_rate=16000,\n use_dB_normalization=True,\n target_dB=-20):\n self._specgram_type = specgram_type\n self._stride_ms = stride_ms\n self._window_ms = window_ms\n self._max_freq = max_freq\n self._target_sample_rate = target_sample_rate\n self._use_dB_normalization = use_dB_normalization\n self._target_dB = target_dB\n\n def featurize(self,\n audio_segment,\n allow_downsampling=True,\n allow_upsamplling=True):\n \"\"\"Extract audio features from AudioSegment or SpeechSegment.\n\n :param audio_segment: Audio/speech segment to extract features from.\n :type audio_segment: AudioSegment|SpeechSegment\n :param allow_downsampling: Whether to allow audio downsampling before\n featurizing.\n :type allow_downsampling: bool\n :param allow_upsampling: Whether to allow audio upsampling before\n featurizing.\n :type allow_upsampling: bool\n :return: Spectrogram audio feature in 2darray.\n :rtype: ndarray\n :raises ValueError: If audio sample rate is not supported.\n \"\"\"\n # upsampling or downsampling\n if ((audio_segment.sample_rate > self._target_sample_rate and\n allow_downsampling) or\n (audio_segment.sample_rate < self._target_sample_rate and\n allow_upsampling)):\n audio_segment.resample(self._target_sample_rate)\n if audio_segment.sample_rate != self._target_sample_rate:\n raise ValueError(\"Audio sample rate is not supported. \"\n \"Turn allow_downsampling or allow up_sampling on.\")\n # decibel normalization\n if self._use_dB_normalization:\n audio_segment.normalize(target_db=self._target_dB)\n # extract spectrogram\n return self._compute_specgram(audio_segment.samples,\n audio_segment.sample_rate)\n\n def _compute_specgram(self, samples, sample_rate):\n \"\"\"Extract various audio features.\"\"\"\n if self._specgram_type == 'linear':\n return self._compute_linear_specgram(\n samples, sample_rate, self._stride_ms, self._window_ms,\n self._max_freq)\n elif self._specgram_type == 'mfcc':\n return self._compute_mfcc(samples, sample_rate, self._stride_ms,\n self._window_ms, self._max_freq)\n else:\n raise ValueError(\"Unknown specgram_type %s. \"\n \"Supported values: linear.\" % self._specgram_type)\n\n def _compute_linear_specgram(self,\n samples,\n sample_rate,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n eps=1e-14):\n \"\"\"Compute the linear spectrogram from FFT energy.\"\"\"\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must be greater than half of \"\n \"sample rate.\")\n if stride_ms > window_ms:\n raise ValueError(\"Stride size must not be greater than \"\n \"window size.\")\n stride_size = int(0.001 * sample_rate * stride_ms)\n window_size = int(0.001 * sample_rate * window_ms)\n specgram, freqs = self._specgram_real(\n samples,\n window_size=window_size,\n stride_size=stride_size,\n sample_rate=sample_rate)\n ind = np.where(freqs <= max_freq)[0][-1] + 1\n return np.log(specgram[:ind, :] + eps)\n\n def _specgram_real(self, samples, window_size, stride_size, sample_rate):\n \"\"\"Compute the spectrogram for samples from a real signal.\"\"\"\n # extract strided windows\n truncate_size = (len(samples) - window_size) % stride_size\n samples = samples[:len(samples) - truncate_size]\n nshape = (window_size, (len(samples) - window_size) // stride_size + 1)\n nstrides = (samples.strides[0], samples.strides[0] * stride_size)\n windows = np.lib.stride_tricks.as_strided(\n samples, shape=nshape, strides=nstrides)\n assert np.all(\n windows[:, 1] == samples[stride_size:(stride_size + window_size)])\n # window weighting, squared Fast Fourier Transform (fft), scaling\n weighting = np.hanning(window_size)[:, None]\n fft = np.fft.rfft(windows * weighting, axis=0)\n fft = np.absolute(fft)**2\n scale = np.sum(weighting**2) * sample_rate\n fft[1:-1, :] *= (2.0 / scale)\n fft[(0, -1), :] /= scale\n # prepare fft frequency list\n freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])\n return fft, freqs\n\n def _compute_mfcc(self,\n samples,\n sample_rate,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None):\n \"\"\"Compute mfcc from samples.\"\"\"\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must not be greater than half of \"\n \"sample rate.\")\n if stride_ms > window_ms:\n raise ValueError(\"Stride size must not be greater than \"\n \"window size.\")\n # compute the 13 cepstral coefficients, and the first one is replaced\n # by log(frame energy)\n mfcc_feat = mfcc(\n signal=samples,\n samplerate=sample_rate,\n winlen=0.001 * window_ms,\n winstep=0.001 * stride_ms,\n highfreq=max_freq)\n # Deltas\n d_mfcc_feat = delta(mfcc_feat, 2)\n # Deltas-Deltas\n dd_mfcc_feat = delta(d_mfcc_feat, 2)\n # transpose\n mfcc_feat = np.transpose(mfcc_feat)\n d_mfcc_feat = np.transpose(d_mfcc_feat)\n dd_mfcc_feat = np.transpose(dd_mfcc_feat)\n # concat above three features\n concat_mfcc_feat = np.concatenate(\n (mfcc_feat, d_mfcc_feat, dd_mfcc_feat))\n return concat_mfcc_feat\n", "path": "deep_speech_2/data_utils/featurizer/audio_featurizer.py"}], "after_files": [{"content": "\"\"\"Contains the audio featurizer class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom data_utils import utils\nfrom data_utils.audio import AudioSegment\nfrom python_speech_features import mfcc\nfrom python_speech_features import delta\n\n\nclass AudioFeaturizer(object):\n \"\"\"Audio featurizer, for extracting features from audio contents of\n AudioSegment or SpeechSegment.\n\n Currently, it supports feature types of linear spectrogram and mfcc.\n\n :param specgram_type: Specgram feature type. Options: 'linear'.\n :type specgram_type: str\n :param stride_ms: Striding size (in milliseconds) for generating frames.\n :type stride_ms: float\n :param window_ms: Window size (in milliseconds) for generating frames.\n :type window_ms: float\n :param max_freq: When specgram_type is 'linear', only FFT bins\n corresponding to frequencies between [0, max_freq] are\n returned; when specgram_type is 'mfcc', max_feq is the\n highest band edge of mel filters.\n :types max_freq: None|float\n :param target_sample_rate: Audio are resampled (if upsampling or\n downsampling is allowed) to this before\n extracting spectrogram features.\n :type target_sample_rate: float\n :param use_dB_normalization: Whether to normalize the audio to a certain\n decibels before extracting the features.\n :type use_dB_normalization: bool\n :param target_dB: Target audio decibels for normalization.\n :type target_dB: float\n \"\"\"\n\n def __init__(self,\n specgram_type='linear',\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n target_sample_rate=16000,\n use_dB_normalization=True,\n target_dB=-20):\n self._specgram_type = specgram_type\n self._stride_ms = stride_ms\n self._window_ms = window_ms\n self._max_freq = max_freq\n self._target_sample_rate = target_sample_rate\n self._use_dB_normalization = use_dB_normalization\n self._target_dB = target_dB\n\n def featurize(self,\n audio_segment,\n allow_downsampling=True,\n allow_upsampling=True):\n \"\"\"Extract audio features from AudioSegment or SpeechSegment.\n\n :param audio_segment: Audio/speech segment to extract features from.\n :type audio_segment: AudioSegment|SpeechSegment\n :param allow_downsampling: Whether to allow audio downsampling before\n featurizing.\n :type allow_downsampling: bool\n :param allow_upsampling: Whether to allow audio upsampling before\n featurizing.\n :type allow_upsampling: bool\n :return: Spectrogram audio feature in 2darray.\n :rtype: ndarray\n :raises ValueError: If audio sample rate is not supported.\n \"\"\"\n # upsampling or downsampling\n if ((audio_segment.sample_rate > self._target_sample_rate and\n allow_downsampling) or\n (audio_segment.sample_rate < self._target_sample_rate and\n allow_upsampling)):\n audio_segment.resample(self._target_sample_rate)\n if audio_segment.sample_rate != self._target_sample_rate:\n raise ValueError(\"Audio sample rate is not supported. \"\n \"Turn allow_downsampling or allow up_sampling on.\")\n # decibel normalization\n if self._use_dB_normalization:\n audio_segment.normalize(target_db=self._target_dB)\n # extract spectrogram\n return self._compute_specgram(audio_segment.samples,\n audio_segment.sample_rate)\n\n def _compute_specgram(self, samples, sample_rate):\n \"\"\"Extract various audio features.\"\"\"\n if self._specgram_type == 'linear':\n return self._compute_linear_specgram(\n samples, sample_rate, self._stride_ms, self._window_ms,\n self._max_freq)\n elif self._specgram_type == 'mfcc':\n return self._compute_mfcc(samples, sample_rate, self._stride_ms,\n self._window_ms, self._max_freq)\n else:\n raise ValueError(\"Unknown specgram_type %s. \"\n \"Supported values: linear.\" % self._specgram_type)\n\n def _compute_linear_specgram(self,\n samples,\n sample_rate,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None,\n eps=1e-14):\n \"\"\"Compute the linear spectrogram from FFT energy.\"\"\"\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must be greater than half of \"\n \"sample rate.\")\n if stride_ms > window_ms:\n raise ValueError(\"Stride size must not be greater than \"\n \"window size.\")\n stride_size = int(0.001 * sample_rate * stride_ms)\n window_size = int(0.001 * sample_rate * window_ms)\n specgram, freqs = self._specgram_real(\n samples,\n window_size=window_size,\n stride_size=stride_size,\n sample_rate=sample_rate)\n ind = np.where(freqs <= max_freq)[0][-1] + 1\n return np.log(specgram[:ind, :] + eps)\n\n def _specgram_real(self, samples, window_size, stride_size, sample_rate):\n \"\"\"Compute the spectrogram for samples from a real signal.\"\"\"\n # extract strided windows\n truncate_size = (len(samples) - window_size) % stride_size\n samples = samples[:len(samples) - truncate_size]\n nshape = (window_size, (len(samples) - window_size) // stride_size + 1)\n nstrides = (samples.strides[0], samples.strides[0] * stride_size)\n windows = np.lib.stride_tricks.as_strided(\n samples, shape=nshape, strides=nstrides)\n assert np.all(\n windows[:, 1] == samples[stride_size:(stride_size + window_size)])\n # window weighting, squared Fast Fourier Transform (fft), scaling\n weighting = np.hanning(window_size)[:, None]\n fft = np.fft.rfft(windows * weighting, axis=0)\n fft = np.absolute(fft)**2\n scale = np.sum(weighting**2) * sample_rate\n fft[1:-1, :] *= (2.0 / scale)\n fft[(0, -1), :] /= scale\n # prepare fft frequency list\n freqs = float(sample_rate) / window_size * np.arange(fft.shape[0])\n return fft, freqs\n\n def _compute_mfcc(self,\n samples,\n sample_rate,\n stride_ms=10.0,\n window_ms=20.0,\n max_freq=None):\n \"\"\"Compute mfcc from samples.\"\"\"\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must not be greater than half of \"\n \"sample rate.\")\n if stride_ms > window_ms:\n raise ValueError(\"Stride size must not be greater than \"\n \"window size.\")\n # compute the 13 cepstral coefficients, and the first one is replaced\n # by log(frame energy)\n mfcc_feat = mfcc(\n signal=samples,\n samplerate=sample_rate,\n winlen=0.001 * window_ms,\n winstep=0.001 * stride_ms,\n highfreq=max_freq)\n # Deltas\n d_mfcc_feat = delta(mfcc_feat, 2)\n # Deltas-Deltas\n dd_mfcc_feat = delta(d_mfcc_feat, 2)\n # transpose\n mfcc_feat = np.transpose(mfcc_feat)\n d_mfcc_feat = np.transpose(d_mfcc_feat)\n dd_mfcc_feat = np.transpose(dd_mfcc_feat)\n # concat above three features\n concat_mfcc_feat = np.concatenate(\n (mfcc_feat, d_mfcc_feat, dd_mfcc_feat))\n return concat_mfcc_feat\n", "path": "deep_speech_2/data_utils/featurizer/audio_featurizer.py"}]}
| 2,804 | 153 |
gh_patches_debug_15821
|
rasdani/github-patches
|
git_diff
|
crytic__slither-387
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ABIEncoderV2 flagged as solidity version
The following Solidity code is flagged as being different solidity versions:
```sol
pragma solidity 0.5.12;
pragma experimental ABIEncoderV2;
```
Outputs:
```
INFO:Detectors:
Different versions of Solidity is used in :
- Version used: ['0.5.12', 'ABIEncoderV2']
- 0.5.12 (Contract.sol#1)
- ABIEncoderV2 (Contract.sol#2)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/core/declarations/pragma_directive.py`
Content:
```
1 from slither.core.source_mapping.source_mapping import SourceMapping
2
3 class Pragma(SourceMapping):
4
5 def __init__(self, directive):
6 super(Pragma, self).__init__()
7 self._directive = directive
8
9 @property
10 def directive(self):
11 '''
12 list(str)
13 '''
14 return self._directive
15
16 @property
17 def version(self):
18 return ''.join(self.directive[1:])
19
20 @property
21 def name(self):
22 return self.version
23
24 def __str__(self):
25 return 'pragma '+''.join(self.directive)
26
```
Path: `slither/detectors/attributes/constant_pragma.py`
Content:
```
1 """
2 Check that the same pragma is used in all the files
3 """
4
5 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
6 from slither.formatters.attributes.constant_pragma import format
7
8
9 class ConstantPragma(AbstractDetector):
10 """
11 Check that the same pragma is used in all the files
12 """
13
14 ARGUMENT = 'pragma'
15 HELP = 'If different pragma directives are used'
16 IMPACT = DetectorClassification.INFORMATIONAL
17 CONFIDENCE = DetectorClassification.HIGH
18
19 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#different-pragma-directives-are-used'
20
21
22 WIKI_TITLE = 'Different pragma directives are used'
23 WIKI_DESCRIPTION = 'Detect if different Solidity versions are used.'
24 WIKI_RECOMMENDATION = 'Use one Solidity version.'
25
26 def _detect(self):
27 results = []
28 pragma = self.slither.pragma_directives
29 versions = [p.version for p in pragma]
30 versions = sorted(list(set(versions)))
31
32 if len(versions) > 1:
33 info = [f"Different versions of Solidity is used in {self.filename}:\n"]
34 info += [f"\t- Version used: {[str(v) for v in versions]}\n"]
35
36 for p in pragma:
37 info += ["\t- ", p, "\n"]
38
39 res = self.generate_result(info)
40
41 results.append(res)
42
43 return results
44
45 @staticmethod
46 def _format(slither, result):
47 format(slither, result)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/slither/core/declarations/pragma_directive.py b/slither/core/declarations/pragma_directive.py
--- a/slither/core/declarations/pragma_directive.py
+++ b/slither/core/declarations/pragma_directive.py
@@ -21,5 +21,11 @@
def name(self):
return self.version
+ @property
+ def is_solidity_version(self):
+ if len(self._directive) > 0:
+ return self._directive[0].lower() == 'solidity'
+ return False
+
def __str__(self):
return 'pragma '+''.join(self.directive)
diff --git a/slither/detectors/attributes/constant_pragma.py b/slither/detectors/attributes/constant_pragma.py
--- a/slither/detectors/attributes/constant_pragma.py
+++ b/slither/detectors/attributes/constant_pragma.py
@@ -26,7 +26,7 @@
def _detect(self):
results = []
pragma = self.slither.pragma_directives
- versions = [p.version for p in pragma]
+ versions = [p.version for p in pragma if p.is_solidity_version]
versions = sorted(list(set(versions)))
if len(versions) > 1:
|
{"golden_diff": "diff --git a/slither/core/declarations/pragma_directive.py b/slither/core/declarations/pragma_directive.py\n--- a/slither/core/declarations/pragma_directive.py\n+++ b/slither/core/declarations/pragma_directive.py\n@@ -21,5 +21,11 @@\n def name(self):\n return self.version\n \n+ @property\n+ def is_solidity_version(self):\n+ if len(self._directive) > 0:\n+ return self._directive[0].lower() == 'solidity'\n+ return False\n+\n def __str__(self):\n return 'pragma '+''.join(self.directive)\ndiff --git a/slither/detectors/attributes/constant_pragma.py b/slither/detectors/attributes/constant_pragma.py\n--- a/slither/detectors/attributes/constant_pragma.py\n+++ b/slither/detectors/attributes/constant_pragma.py\n@@ -26,7 +26,7 @@\n def _detect(self):\n results = []\n pragma = self.slither.pragma_directives\n- versions = [p.version for p in pragma]\n+ versions = [p.version for p in pragma if p.is_solidity_version]\n versions = sorted(list(set(versions)))\n \n if len(versions) > 1:\n", "issue": "ABIEncoderV2 flagged as solidity version\nThe following Solidity code is flagged as being different solidity versions:\r\n\r\n```sol\r\npragma solidity 0.5.12;\r\npragma experimental ABIEncoderV2;\r\n```\r\n\r\nOutputs:\r\n\r\n```\r\nINFO:Detectors:\r\nDifferent versions of Solidity is used in :\r\n\t- Version used: ['0.5.12', 'ABIEncoderV2']\r\n\t- 0.5.12 (Contract.sol#1)\r\n\t- ABIEncoderV2 (Contract.sol#2)\r\n```\n", "before_files": [{"content": "from slither.core.source_mapping.source_mapping import SourceMapping\n\nclass Pragma(SourceMapping):\n\n def __init__(self, directive):\n super(Pragma, self).__init__()\n self._directive = directive\n\n @property\n def directive(self):\n '''\n list(str)\n '''\n return self._directive\n\n @property\n def version(self):\n return ''.join(self.directive[1:])\n\n @property\n def name(self):\n return self.version\n\n def __str__(self):\n return 'pragma '+''.join(self.directive)\n", "path": "slither/core/declarations/pragma_directive.py"}, {"content": "\"\"\"\n Check that the same pragma is used in all the files\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.attributes.constant_pragma import format\n\n\nclass ConstantPragma(AbstractDetector):\n \"\"\"\n Check that the same pragma is used in all the files\n \"\"\"\n\n ARGUMENT = 'pragma'\n HELP = 'If different pragma directives are used'\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#different-pragma-directives-are-used'\n\n\n WIKI_TITLE = 'Different pragma directives are used'\n WIKI_DESCRIPTION = 'Detect if different Solidity versions are used.'\n WIKI_RECOMMENDATION = 'Use one Solidity version.'\n\n def _detect(self):\n results = []\n pragma = self.slither.pragma_directives\n versions = [p.version for p in pragma]\n versions = sorted(list(set(versions)))\n\n if len(versions) > 1:\n info = [f\"Different versions of Solidity is used in {self.filename}:\\n\"]\n info += [f\"\\t- Version used: {[str(v) for v in versions]}\\n\"]\n\n for p in pragma:\n info += [\"\\t- \", p, \"\\n\"]\n\n res = self.generate_result(info)\n\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n format(slither, result)\n", "path": "slither/detectors/attributes/constant_pragma.py"}], "after_files": [{"content": "from slither.core.source_mapping.source_mapping import SourceMapping\n\nclass Pragma(SourceMapping):\n\n def __init__(self, directive):\n super(Pragma, self).__init__()\n self._directive = directive\n\n @property\n def directive(self):\n '''\n list(str)\n '''\n return self._directive\n\n @property\n def version(self):\n return ''.join(self.directive[1:])\n\n @property\n def name(self):\n return self.version\n\n @property\n def is_solidity_version(self):\n if len(self._directive) > 0:\n return self._directive[0].lower() == 'solidity'\n return False\n\n def __str__(self):\n return 'pragma '+''.join(self.directive)\n", "path": "slither/core/declarations/pragma_directive.py"}, {"content": "\"\"\"\n Check that the same pragma is used in all the files\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\nfrom slither.formatters.attributes.constant_pragma import format\n\n\nclass ConstantPragma(AbstractDetector):\n \"\"\"\n Check that the same pragma is used in all the files\n \"\"\"\n\n ARGUMENT = 'pragma'\n HELP = 'If different pragma directives are used'\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#different-pragma-directives-are-used'\n\n\n WIKI_TITLE = 'Different pragma directives are used'\n WIKI_DESCRIPTION = 'Detect if different Solidity versions are used.'\n WIKI_RECOMMENDATION = 'Use one Solidity version.'\n\n def _detect(self):\n results = []\n pragma = self.slither.pragma_directives\n versions = [p.version for p in pragma if p.is_solidity_version]\n versions = sorted(list(set(versions)))\n\n if len(versions) > 1:\n info = [f\"Different versions of Solidity is used in {self.filename}:\\n\"]\n info += [f\"\\t- Version used: {[str(v) for v in versions]}\\n\"]\n\n for p in pragma:\n info += [\"\\t- \", p, \"\\n\"]\n\n res = self.generate_result(info)\n\n results.append(res)\n\n return results\n\n @staticmethod\n def _format(slither, result):\n format(slither, result)\n", "path": "slither/detectors/attributes/constant_pragma.py"}]}
| 999 | 294 |
gh_patches_debug_24379
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-11585
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
User invite accept screen leaks 'project names' of projects that the new user does not necessarily have access rights to
<!--
Do you want to ask a question? Are you looking for support? The Sentry message
board is the best place for getting support: https://forum.sentry.io
-->
## Important Details
How are you running Sentry?
* [ ] On-Premise docker [Version xyz]
* [x] Saas (sentry.io)
* [ ] Other [briefly describe your environment]
## Description
We are providing sentry as a service with our products to our customer, as a result we have internal teams but would also like to invite external users (customers) to specific projects. We are able to utilise the team construct for this and setup special customer teams with limited access.
When we however invite customers into our org to become member of this restricted team the customer received a generic org invite email and is redirected to a generic org invite screen:
https://sentry.io/accept/xyz/xyz
This invite accept screen provides a list of 'active' projects with full project name, however the user does not necessary have access to these projects.
We would classify this as a security concern and leak of 'restricted' information.
## Steps to Reproduce
1. open org
2. create 2 new teams
- team A
- team B
2. create 2 new projects
- project A, assign access to team A
- project B, assign access to team B
3. invite new user to org (as member level) and assign acces to team B only
4. user receives invite by email and opens the accept link
5. on the accept webpage notice that both project A and B are listed to be part of this org
### What you expected to happen
The recommendation is to hide the project names completely in this view, or restrict it to the projects the new user has access to based on his team assignment.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/web/frontend/accept_organization_invite.py`
Content:
```
1 from __future__ import absolute_import
2
3 from django import forms
4 from django.contrib import messages
5 from django.core.urlresolvers import reverse
6 from django.utils.crypto import constant_time_compare
7 from django.utils.translation import ugettext_lazy as _
8
9 from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project
10 from sentry.signals import member_joined
11 from sentry.utils import auth
12 from sentry.web.frontend.base import BaseView
13
14 ERR_INVITE_INVALID = _('The invite link you followed is not valid, or has expired.')
15 PENDING_INVITE = 'pending-invite'
16 COOKIE_MAX_AGE = 60 * 60 * 24 * 7 # 7 days
17
18
19 class AcceptInviteForm(forms.Form):
20 pass
21
22
23 class AcceptOrganizationInviteView(BaseView):
24 auth_required = False
25
26 def get_form(self, request):
27 if request.method == 'POST':
28 return AcceptInviteForm(request.POST)
29 return AcceptInviteForm()
30
31 def redirect_with_err_message(self, request):
32 messages.add_message(
33 request,
34 messages.ERROR,
35 ERR_INVITE_INVALID,
36 )
37
38 return self.redirect(reverse('sentry'))
39
40 def handle(self, request, member_id, token):
41 assert request.method in ('POST', 'GET')
42
43 try:
44 helper = WebInviteHelper(
45 instance=self,
46 request=request,
47 member_id=member_id,
48 token=token
49 )
50 except OrganizationMember.DoesNotExist:
51 return self.redirect_with_err_message(request)
52
53 if not helper.member_pending or not helper.valid_token:
54 return self.redirect_with_err_message(request)
55
56 om = helper.om
57 organization = om.organization
58
59 qs = Project.objects.filter(
60 organization=organization,
61 )
62 project_list = list(qs[:25])
63 project_count = qs.count()
64
65 context = {
66 'org_name': organization.name,
67 'project_list': project_list,
68 'project_count': project_count,
69 'needs_authentication': not helper.user_authenticated,
70 'needs_2fa': helper.needs_2fa,
71 'logout_url': u'{}?next={}'.format(
72 reverse('sentry-logout'),
73 request.path,
74 ),
75 'login_url': u'{}?next={}'.format(
76 reverse('sentry-login'),
77 request.path,
78 ),
79 'register_url': u'{}?next={}'.format(
80 reverse('sentry-register'),
81 request.path,
82 ),
83 }
84
85 if not helper.user_authenticated:
86 # Show login or register form
87 auth.initiate_login(request, next_url=request.get_full_path())
88 request.session['can_register'] = True
89 request.session['invite_email'] = om.email
90
91 return self.respond('sentry/accept-organization-invite.html', context)
92
93 if helper.needs_2fa:
94 # redirect to setup 2fa
95 response = self.respond('sentry/accept-organization-invite.html', context)
96 response.set_cookie(PENDING_INVITE, request.path, max_age=COOKIE_MAX_AGE)
97 return response
98
99 # if they're already a member of the organization its likely they're
100 # using a shared account and either previewing this invite or
101 # are incorrectly expecting this to create a new account for them
102 context['existing_member'] = helper.member_already_exists
103
104 form = self.get_form(request)
105 if form.is_valid():
106 helper.accept_invite()
107
108 request.session.pop('can_register', None)
109 response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))
110 return helper.remove_invite_cookie(response)
111
112 context['form'] = form
113 return self.respond('sentry/accept-organization-invite.html', context)
114
115
116 class BaseInviteHelper(object):
117 def __init__(self, instance, request, member_id, token, logger=None):
118 self.request = request
119 self.instance = instance
120 self.member_id = member_id
121 self.token = token
122 self.logger = logger
123 self.om = self.get_organization_member()
124
125 def handle_success(self):
126 pass
127
128 def handle_member_already_exists(self):
129 pass
130
131 def get_organization_member(self):
132 return OrganizationMember.objects.select_related('organization').get(pk=self.member_id)
133
134 @property
135 def member_pending(self):
136 return self.om.is_pending
137
138 @property
139 def valid_token(self):
140 if self.om.token_expired:
141 return False
142 return constant_time_compare(self.om.token or self.om.legacy_token, self.token)
143
144 @property
145 def user_authenticated(self):
146 return self.request.user.is_authenticated()
147
148 @property
149 def needs_2fa(self):
150 org_requires_2fa = self.om.organization.flags.require_2fa.is_set
151 user_has_2fa = Authenticator.objects.user_has_2fa(self.request.user.id)
152 return org_requires_2fa and not user_has_2fa
153
154 @property
155 def member_already_exists(self):
156 return OrganizationMember.objects.filter(
157 organization=self.om.organization, user=self.request.user
158 ).exists()
159
160 def accept_invite(self):
161 om = self.om
162
163 if self.member_already_exists:
164 self.handle_member_already_exists()
165 om.delete()
166 else:
167 om.set_user(self.request.user)
168 om.save()
169
170 self.instance.create_audit_entry(
171 self.request,
172 organization=om.organization,
173 target_object=om.id,
174 target_user=self.request.user,
175 event=AuditLogEntryEvent.MEMBER_ACCEPT,
176 data=om.get_audit_log_data(),
177 )
178
179 self.handle_success()
180
181 def remove_invite_cookie(self, response):
182 if PENDING_INVITE in self.request.COOKIES:
183 response.delete_cookie(PENDING_INVITE)
184 return response
185
186
187 class WebInviteHelper(BaseInviteHelper):
188 def handle_success(self):
189 messages.add_message(
190 self.request, messages.SUCCESS,
191 _('You have been added to the %r organization.') %
192 (self.om.organization.name.encode('utf-8'), )
193 )
194
195 member_joined.send_robust(
196 member=self.om, organization=self.om.organization, sender=self.instance)
197
198 def handle_member_already_exists(self):
199 messages.add_message(
200 self.request, messages.SUCCESS,
201 _('You are already a member of the %r organization.') %
202 (self.om.organization.name.encode('utf-8'), )
203 )
204
205
206 class ApiInviteHelper(BaseInviteHelper):
207 def handle_member_already_exists(self):
208 self.logger.info(
209 'Pending org invite not accepted - User already org member',
210 extra={
211 'organization_id': self.om.organization.id,
212 'user_id': self.request.user.id,
213 }
214 )
215
216 def valid_request(self):
217 if (not self.member_pending or
218 not self.valid_token or
219 not self.user_authenticated or
220 self.needs_2fa):
221 return False
222 return True
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/sentry/web/frontend/accept_organization_invite.py b/src/sentry/web/frontend/accept_organization_invite.py
--- a/src/sentry/web/frontend/accept_organization_invite.py
+++ b/src/sentry/web/frontend/accept_organization_invite.py
@@ -6,7 +6,7 @@
from django.utils.crypto import constant_time_compare
from django.utils.translation import ugettext_lazy as _
-from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project
+from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember
from sentry.signals import member_joined
from sentry.utils import auth
from sentry.web.frontend.base import BaseView
@@ -56,16 +56,8 @@
om = helper.om
organization = om.organization
- qs = Project.objects.filter(
- organization=organization,
- )
- project_list = list(qs[:25])
- project_count = qs.count()
-
context = {
'org_name': organization.name,
- 'project_list': project_list,
- 'project_count': project_count,
'needs_authentication': not helper.user_authenticated,
'needs_2fa': helper.needs_2fa,
'logout_url': u'{}?next={}'.format(
|
{"golden_diff": "diff --git a/src/sentry/web/frontend/accept_organization_invite.py b/src/sentry/web/frontend/accept_organization_invite.py\n--- a/src/sentry/web/frontend/accept_organization_invite.py\n+++ b/src/sentry/web/frontend/accept_organization_invite.py\n@@ -6,7 +6,7 @@\n from django.utils.crypto import constant_time_compare\n from django.utils.translation import ugettext_lazy as _\n \n-from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project\n+from sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember\n from sentry.signals import member_joined\n from sentry.utils import auth\n from sentry.web.frontend.base import BaseView\n@@ -56,16 +56,8 @@\n om = helper.om\n organization = om.organization\n \n- qs = Project.objects.filter(\n- organization=organization,\n- )\n- project_list = list(qs[:25])\n- project_count = qs.count()\n-\n context = {\n 'org_name': organization.name,\n- 'project_list': project_list,\n- 'project_count': project_count,\n 'needs_authentication': not helper.user_authenticated,\n 'needs_2fa': helper.needs_2fa,\n 'logout_url': u'{}?next={}'.format(\n", "issue": "User invite accept screen leaks 'project names' of projects that the new user does not necessarily have access rights to\n<!--\r\n\r\nDo you want to ask a question? Are you looking for support? The Sentry message\r\nboard is the best place for getting support: https://forum.sentry.io\r\n-->\r\n\r\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [ ] On-Premise docker [Version xyz]\r\n* [x] Saas (sentry.io)\r\n* [ ] Other [briefly describe your environment]\r\n\r\n## Description\r\n\r\nWe are providing sentry as a service with our products to our customer, as a result we have internal teams but would also like to invite external users (customers) to specific projects. We are able to utilise the team construct for this and setup special customer teams with limited access.\r\n\r\nWhen we however invite customers into our org to become member of this restricted team the customer received a generic org invite email and is redirected to a generic org invite screen:\r\nhttps://sentry.io/accept/xyz/xyz\r\n\r\nThis invite accept screen provides a list of 'active' projects with full project name, however the user does not necessary have access to these projects. \r\n\r\nWe would classify this as a security concern and leak of 'restricted' information. \r\n\r\n## Steps to Reproduce\r\n\r\n1. open org\r\n2. create 2 new teams\r\n - team A\r\n - team B\r\n2. create 2 new projects\r\n - project A, assign access to team A\r\n - project B, assign access to team B\r\n3. invite new user to org (as member level) and assign acces to team B only\r\n4. user receives invite by email and opens the accept link\r\n5. on the accept webpage notice that both project A and B are listed to be part of this org\r\n\r\n### What you expected to happen\r\n\r\nThe recommendation is to hide the project names completely in this view, or restrict it to the projects the new user has access to based on his team assignment.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.utils.crypto import constant_time_compare\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember, Project\nfrom sentry.signals import member_joined\nfrom sentry.utils import auth\nfrom sentry.web.frontend.base import BaseView\n\nERR_INVITE_INVALID = _('The invite link you followed is not valid, or has expired.')\nPENDING_INVITE = 'pending-invite'\nCOOKIE_MAX_AGE = 60 * 60 * 24 * 7 # 7 days\n\n\nclass AcceptInviteForm(forms.Form):\n pass\n\n\nclass AcceptOrganizationInviteView(BaseView):\n auth_required = False\n\n def get_form(self, request):\n if request.method == 'POST':\n return AcceptInviteForm(request.POST)\n return AcceptInviteForm()\n\n def redirect_with_err_message(self, request):\n messages.add_message(\n request,\n messages.ERROR,\n ERR_INVITE_INVALID,\n )\n\n return self.redirect(reverse('sentry'))\n\n def handle(self, request, member_id, token):\n assert request.method in ('POST', 'GET')\n\n try:\n helper = WebInviteHelper(\n instance=self,\n request=request,\n member_id=member_id,\n token=token\n )\n except OrganizationMember.DoesNotExist:\n return self.redirect_with_err_message(request)\n\n if not helper.member_pending or not helper.valid_token:\n return self.redirect_with_err_message(request)\n\n om = helper.om\n organization = om.organization\n\n qs = Project.objects.filter(\n organization=organization,\n )\n project_list = list(qs[:25])\n project_count = qs.count()\n\n context = {\n 'org_name': organization.name,\n 'project_list': project_list,\n 'project_count': project_count,\n 'needs_authentication': not helper.user_authenticated,\n 'needs_2fa': helper.needs_2fa,\n 'logout_url': u'{}?next={}'.format(\n reverse('sentry-logout'),\n request.path,\n ),\n 'login_url': u'{}?next={}'.format(\n reverse('sentry-login'),\n request.path,\n ),\n 'register_url': u'{}?next={}'.format(\n reverse('sentry-register'),\n request.path,\n ),\n }\n\n if not helper.user_authenticated:\n # Show login or register form\n auth.initiate_login(request, next_url=request.get_full_path())\n request.session['can_register'] = True\n request.session['invite_email'] = om.email\n\n return self.respond('sentry/accept-organization-invite.html', context)\n\n if helper.needs_2fa:\n # redirect to setup 2fa\n response = self.respond('sentry/accept-organization-invite.html', context)\n response.set_cookie(PENDING_INVITE, request.path, max_age=COOKIE_MAX_AGE)\n return response\n\n # if they're already a member of the organization its likely they're\n # using a shared account and either previewing this invite or\n # are incorrectly expecting this to create a new account for them\n context['existing_member'] = helper.member_already_exists\n\n form = self.get_form(request)\n if form.is_valid():\n helper.accept_invite()\n\n request.session.pop('can_register', None)\n response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))\n return helper.remove_invite_cookie(response)\n\n context['form'] = form\n return self.respond('sentry/accept-organization-invite.html', context)\n\n\nclass BaseInviteHelper(object):\n def __init__(self, instance, request, member_id, token, logger=None):\n self.request = request\n self.instance = instance\n self.member_id = member_id\n self.token = token\n self.logger = logger\n self.om = self.get_organization_member()\n\n def handle_success(self):\n pass\n\n def handle_member_already_exists(self):\n pass\n\n def get_organization_member(self):\n return OrganizationMember.objects.select_related('organization').get(pk=self.member_id)\n\n @property\n def member_pending(self):\n return self.om.is_pending\n\n @property\n def valid_token(self):\n if self.om.token_expired:\n return False\n return constant_time_compare(self.om.token or self.om.legacy_token, self.token)\n\n @property\n def user_authenticated(self):\n return self.request.user.is_authenticated()\n\n @property\n def needs_2fa(self):\n org_requires_2fa = self.om.organization.flags.require_2fa.is_set\n user_has_2fa = Authenticator.objects.user_has_2fa(self.request.user.id)\n return org_requires_2fa and not user_has_2fa\n\n @property\n def member_already_exists(self):\n return OrganizationMember.objects.filter(\n organization=self.om.organization, user=self.request.user\n ).exists()\n\n def accept_invite(self):\n om = self.om\n\n if self.member_already_exists:\n self.handle_member_already_exists()\n om.delete()\n else:\n om.set_user(self.request.user)\n om.save()\n\n self.instance.create_audit_entry(\n self.request,\n organization=om.organization,\n target_object=om.id,\n target_user=self.request.user,\n event=AuditLogEntryEvent.MEMBER_ACCEPT,\n data=om.get_audit_log_data(),\n )\n\n self.handle_success()\n\n def remove_invite_cookie(self, response):\n if PENDING_INVITE in self.request.COOKIES:\n response.delete_cookie(PENDING_INVITE)\n return response\n\n\nclass WebInviteHelper(BaseInviteHelper):\n def handle_success(self):\n messages.add_message(\n self.request, messages.SUCCESS,\n _('You have been added to the %r organization.') %\n (self.om.organization.name.encode('utf-8'), )\n )\n\n member_joined.send_robust(\n member=self.om, organization=self.om.organization, sender=self.instance)\n\n def handle_member_already_exists(self):\n messages.add_message(\n self.request, messages.SUCCESS,\n _('You are already a member of the %r organization.') %\n (self.om.organization.name.encode('utf-8'), )\n )\n\n\nclass ApiInviteHelper(BaseInviteHelper):\n def handle_member_already_exists(self):\n self.logger.info(\n 'Pending org invite not accepted - User already org member',\n extra={\n 'organization_id': self.om.organization.id,\n 'user_id': self.request.user.id,\n }\n )\n\n def valid_request(self):\n if (not self.member_pending or\n not self.valid_token or\n not self.user_authenticated or\n self.needs_2fa):\n return False\n return True\n", "path": "src/sentry/web/frontend/accept_organization_invite.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom django import forms\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.utils.crypto import constant_time_compare\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.models import AuditLogEntryEvent, Authenticator, OrganizationMember\nfrom sentry.signals import member_joined\nfrom sentry.utils import auth\nfrom sentry.web.frontend.base import BaseView\n\nERR_INVITE_INVALID = _('The invite link you followed is not valid, or has expired.')\nPENDING_INVITE = 'pending-invite'\nCOOKIE_MAX_AGE = 60 * 60 * 24 * 7 # 7 days\n\n\nclass AcceptInviteForm(forms.Form):\n pass\n\n\nclass AcceptOrganizationInviteView(BaseView):\n auth_required = False\n\n def get_form(self, request):\n if request.method == 'POST':\n return AcceptInviteForm(request.POST)\n return AcceptInviteForm()\n\n def redirect_with_err_message(self, request):\n messages.add_message(\n request,\n messages.ERROR,\n ERR_INVITE_INVALID,\n )\n\n return self.redirect(reverse('sentry'))\n\n def handle(self, request, member_id, token):\n assert request.method in ('POST', 'GET')\n\n try:\n helper = WebInviteHelper(\n instance=self,\n request=request,\n member_id=member_id,\n token=token\n )\n except OrganizationMember.DoesNotExist:\n return self.redirect_with_err_message(request)\n\n if not helper.member_pending or not helper.valid_token:\n return self.redirect_with_err_message(request)\n\n om = helper.om\n organization = om.organization\n\n context = {\n 'org_name': organization.name,\n 'needs_authentication': not helper.user_authenticated,\n 'needs_2fa': helper.needs_2fa,\n 'logout_url': u'{}?next={}'.format(\n reverse('sentry-logout'),\n request.path,\n ),\n 'login_url': u'{}?next={}'.format(\n reverse('sentry-login'),\n request.path,\n ),\n 'register_url': u'{}?next={}'.format(\n reverse('sentry-register'),\n request.path,\n ),\n }\n\n if not helper.user_authenticated:\n # Show login or register form\n auth.initiate_login(request, next_url=request.get_full_path())\n request.session['can_register'] = True\n request.session['invite_email'] = om.email\n\n return self.respond('sentry/accept-organization-invite.html', context)\n\n if helper.needs_2fa:\n # redirect to setup 2fa\n response = self.respond('sentry/accept-organization-invite.html', context)\n response.set_cookie(PENDING_INVITE, request.path, max_age=COOKIE_MAX_AGE)\n return response\n\n # if they're already a member of the organization its likely they're\n # using a shared account and either previewing this invite or\n # are incorrectly expecting this to create a new account for them\n context['existing_member'] = helper.member_already_exists\n\n form = self.get_form(request)\n if form.is_valid():\n helper.accept_invite()\n\n request.session.pop('can_register', None)\n response = self.redirect(reverse('sentry-organization-home', args=[organization.slug]))\n return helper.remove_invite_cookie(response)\n\n context['form'] = form\n return self.respond('sentry/accept-organization-invite.html', context)\n\n\nclass BaseInviteHelper(object):\n def __init__(self, instance, request, member_id, token, logger=None):\n self.request = request\n self.instance = instance\n self.member_id = member_id\n self.token = token\n self.logger = logger\n self.om = self.get_organization_member()\n\n def handle_success(self):\n pass\n\n def handle_member_already_exists(self):\n pass\n\n def get_organization_member(self):\n return OrganizationMember.objects.select_related('organization').get(pk=self.member_id)\n\n @property\n def member_pending(self):\n return self.om.is_pending\n\n @property\n def valid_token(self):\n if self.om.token_expired:\n return False\n return constant_time_compare(self.om.token or self.om.legacy_token, self.token)\n\n @property\n def user_authenticated(self):\n return self.request.user.is_authenticated()\n\n @property\n def needs_2fa(self):\n org_requires_2fa = self.om.organization.flags.require_2fa.is_set\n user_has_2fa = Authenticator.objects.user_has_2fa(self.request.user.id)\n return org_requires_2fa and not user_has_2fa\n\n @property\n def member_already_exists(self):\n return OrganizationMember.objects.filter(\n organization=self.om.organization, user=self.request.user\n ).exists()\n\n def accept_invite(self):\n om = self.om\n\n if self.member_already_exists:\n self.handle_member_already_exists()\n om.delete()\n else:\n om.set_user(self.request.user)\n om.save()\n\n self.instance.create_audit_entry(\n self.request,\n organization=om.organization,\n target_object=om.id,\n target_user=self.request.user,\n event=AuditLogEntryEvent.MEMBER_ACCEPT,\n data=om.get_audit_log_data(),\n )\n\n self.handle_success()\n\n def remove_invite_cookie(self, response):\n if PENDING_INVITE in self.request.COOKIES:\n response.delete_cookie(PENDING_INVITE)\n return response\n\n\nclass WebInviteHelper(BaseInviteHelper):\n def handle_success(self):\n messages.add_message(\n self.request, messages.SUCCESS,\n _('You have been added to the %r organization.') %\n (self.om.organization.name.encode('utf-8'), )\n )\n\n member_joined.send_robust(\n member=self.om, organization=self.om.organization, sender=self.instance)\n\n def handle_member_already_exists(self):\n messages.add_message(\n self.request, messages.SUCCESS,\n _('You are already a member of the %r organization.') %\n (self.om.organization.name.encode('utf-8'), )\n )\n\n\nclass ApiInviteHelper(BaseInviteHelper):\n def handle_member_already_exists(self):\n self.logger.info(\n 'Pending org invite not accepted - User already org member',\n extra={\n 'organization_id': self.om.organization.id,\n 'user_id': self.request.user.id,\n }\n )\n\n def valid_request(self):\n if (not self.member_pending or\n not self.valid_token or\n not self.user_authenticated or\n self.needs_2fa):\n return False\n return True\n", "path": "src/sentry/web/frontend/accept_organization_invite.py"}]}
| 2,705 | 275 |
gh_patches_debug_6765
|
rasdani/github-patches
|
git_diff
|
conda-forge__conda-smithy-971
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix linter error on missing target_platform
Invoking `conda smithy recipe-lint` on the [conda-forge/go1.4-feedstock/meta.yaml](
https://github.com/conda-forge/go1.4-bootstrap-feedstock/blob/master/recipe/meta.yaml) file yields the following exception:
```
± conda smithy recipe-lint
Traceback (most recent call last):
File "/opt/conda/bin/conda-smithy", line 10, in <module>
sys.exit(main())
File "/opt/conda/lib/python3.6/site-packages/conda_smithy/cli.py", line 279, in main
args.subcommand_func(args)
File "/opt/conda/lib/python3.6/site-packages/conda_smithy/cli.py", line 203, in __call__
return_hints=True)
File "/opt/conda/lib/python3.6/site-packages/conda_smithy/lint_recipe.py", line 428, in main
content = render_meta_yaml(''.join(fh))
File "/opt/conda/lib/python3.6/site-packages/conda_smithy/utils.py", line 49, in render_meta_yaml
content = env.from_string(text).render(os=mockos, environ=mockos.environ)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "<template>", line 29, in top-level template code
jinja2.exceptions.UndefinedError: 'target_platform' is undefined
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_smithy/utils.py`
Content:
```
1 import shutil
2 import tempfile
3 import jinja2
4 import six
5 import datetime
6 import time
7 from collections import defaultdict
8 from contextlib import contextmanager
9
10 @contextmanager
11 def tmp_directory():
12 tmp_dir = tempfile.mkdtemp('_recipe')
13 yield tmp_dir
14 shutil.rmtree(tmp_dir)
15
16
17 class NullUndefined(jinja2.Undefined):
18 def __unicode__(self):
19 return self._undefined_name
20
21 def __getattr__(self, name):
22 return '{}.{}'.format(self, name)
23
24 def __getitem__(self, name):
25 return '{}["{}"]'.format(self, name)
26
27
28 class MockOS(dict):
29 def __init__(self):
30 self.environ = defaultdict(lambda: '')
31
32
33 def render_meta_yaml(text):
34 env = jinja2.Environment(undefined=NullUndefined)
35
36 # stub out cb3 jinja2 functions - they are not important for linting
37 # if we don't stub them out, the ruamel.yaml load fails to interpret them
38 # we can't just use conda-build's api.render functionality, because it would apply selectors
39 env.globals.update(dict(compiler=lambda x: x + '_compiler_stub',
40 pin_subpackage=lambda *args, **kwargs: 'subpackage_stub',
41 pin_compatible=lambda *args, **kwargs: 'compatible_pin_stub',
42 cdt=lambda *args, **kwargs: 'cdt_stub',
43 load_file_regex=lambda *args, **kwargs: \
44 defaultdict(lambda : ''),
45 datetime=datetime,
46 time=time,
47 ))
48 mockos = MockOS()
49 content = env.from_string(text).render(os=mockos, environ=mockos.environ)
50 return content
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py
--- a/conda_smithy/utils.py
+++ b/conda_smithy/utils.py
@@ -44,6 +44,7 @@
defaultdict(lambda : ''),
datetime=datetime,
time=time,
+ target_platform="linux-64",
))
mockos = MockOS()
content = env.from_string(text).render(os=mockos, environ=mockos.environ)
|
{"golden_diff": "diff --git a/conda_smithy/utils.py b/conda_smithy/utils.py\n--- a/conda_smithy/utils.py\n+++ b/conda_smithy/utils.py\n@@ -44,6 +44,7 @@\n defaultdict(lambda : ''),\n datetime=datetime,\n time=time,\n+ target_platform=\"linux-64\",\n ))\n mockos = MockOS()\n content = env.from_string(text).render(os=mockos, environ=mockos.environ)\n", "issue": "Fix linter error on missing target_platform\nInvoking `conda smithy recipe-lint` on the [conda-forge/go1.4-feedstock/meta.yaml](\r\nhttps://github.com/conda-forge/go1.4-bootstrap-feedstock/blob/master/recipe/meta.yaml) file yields the following exception:\r\n\r\n```\r\n\u00b1 conda smithy recipe-lint\r\nTraceback (most recent call last):\r\n File \"/opt/conda/bin/conda-smithy\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/opt/conda/lib/python3.6/site-packages/conda_smithy/cli.py\", line 279, in main\r\n args.subcommand_func(args)\r\n File \"/opt/conda/lib/python3.6/site-packages/conda_smithy/cli.py\", line 203, in __call__\r\n return_hints=True)\r\n File \"/opt/conda/lib/python3.6/site-packages/conda_smithy/lint_recipe.py\", line 428, in main\r\n content = render_meta_yaml(''.join(fh))\r\n File \"/opt/conda/lib/python3.6/site-packages/conda_smithy/utils.py\", line 49, in render_meta_yaml\r\n content = env.from_string(text).render(os=mockos, environ=mockos.environ)\r\n File \"/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py\", line 76, in render\r\n return original_render(self, *args, **kwargs)\r\n File \"/opt/conda/lib/python3.6/site-packages/jinja2/environment.py\", line 1008, in render\r\n return self.environment.handle_exception(exc_info, True)\r\n File \"/opt/conda/lib/python3.6/site-packages/jinja2/environment.py\", line 780, in handle_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py\", line 37, in reraise\r\n raise value.with_traceback(tb)\r\n File \"<template>\", line 29, in top-level template code\r\njinja2.exceptions.UndefinedError: 'target_platform' is undefined\r\n```\n", "before_files": [{"content": "import shutil\nimport tempfile\nimport jinja2\nimport six\nimport datetime\nimport time\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp('_recipe')\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return '{}.{}'.format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: '')\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(dict(compiler=lambda x: x + '_compiler_stub',\n pin_subpackage=lambda *args, **kwargs: 'subpackage_stub',\n pin_compatible=lambda *args, **kwargs: 'compatible_pin_stub',\n cdt=lambda *args, **kwargs: 'cdt_stub',\n load_file_regex=lambda *args, **kwargs: \\\n defaultdict(lambda : ''),\n datetime=datetime,\n time=time,\n ))\n mockos = MockOS()\n content = env.from_string(text).render(os=mockos, environ=mockos.environ)\n return content\n", "path": "conda_smithy/utils.py"}], "after_files": [{"content": "import shutil\nimport tempfile\nimport jinja2\nimport six\nimport datetime\nimport time\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\n@contextmanager\ndef tmp_directory():\n tmp_dir = tempfile.mkdtemp('_recipe')\n yield tmp_dir\n shutil.rmtree(tmp_dir)\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return self._undefined_name\n\n def __getattr__(self, name):\n return '{}.{}'.format(self, name)\n\n def __getitem__(self, name):\n return '{}[\"{}\"]'.format(self, name)\n\n\nclass MockOS(dict):\n def __init__(self):\n self.environ = defaultdict(lambda: '')\n\n\ndef render_meta_yaml(text):\n env = jinja2.Environment(undefined=NullUndefined)\n\n # stub out cb3 jinja2 functions - they are not important for linting\n # if we don't stub them out, the ruamel.yaml load fails to interpret them\n # we can't just use conda-build's api.render functionality, because it would apply selectors\n env.globals.update(dict(compiler=lambda x: x + '_compiler_stub',\n pin_subpackage=lambda *args, **kwargs: 'subpackage_stub',\n pin_compatible=lambda *args, **kwargs: 'compatible_pin_stub',\n cdt=lambda *args, **kwargs: 'cdt_stub',\n load_file_regex=lambda *args, **kwargs: \\\n defaultdict(lambda : ''),\n datetime=datetime,\n time=time,\n target_platform=\"linux-64\",\n ))\n mockos = MockOS()\n content = env.from_string(text).render(os=mockos, environ=mockos.environ)\n return content\n", "path": "conda_smithy/utils.py"}]}
| 1,181 | 104 |
gh_patches_debug_2928
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-3621
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[modin] Importing Modin before Ray can sometimes cause ImportError
### Describe the problem
<!-- Describe the problem clearly here. -->
When running Modin with Ray installed from source, I am sometimes running into `ImportError` and `ModuleNotFoundError` which is occurring when I am running a modified version of Modin. This forces me to modify Ray's source such that it does not try to use the Modin that is bundled with Ray.
I will work on a solution for this.
### Source code / logs
`import modin.pandas as pd`
```
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/function_manager.py", line 165, in fetch_and_register_remote_function
function = pickle.loads(serialized_function)
ModuleNotFoundError: No module named 'modin.data_management.utils'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/__init__.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import sys
7
8 if "pyarrow" in sys.modules:
9 raise ImportError("Ray must be imported before pyarrow because Ray "
10 "requires a specific version of pyarrow (which is "
11 "packaged along with Ray).")
12
13 # Add the directory containing pyarrow to the Python path so that we find the
14 # pyarrow version packaged with ray and not a pre-existing pyarrow.
15 pyarrow_path = os.path.join(
16 os.path.abspath(os.path.dirname(__file__)), "pyarrow_files")
17 sys.path.insert(0, pyarrow_path)
18
19 # See https://github.com/ray-project/ray/issues/131.
20 helpful_message = """
21
22 If you are using Anaconda, try fixing this problem by running:
23
24 conda install libgcc
25 """
26
27 try:
28 import pyarrow # noqa: F401
29 except ImportError as e:
30 if ((hasattr(e, "msg") and isinstance(e.msg, str)
31 and ("libstdc++" in e.msg or "CXX" in e.msg))):
32 # This code path should be taken with Python 3.
33 e.msg += helpful_message
34 elif (hasattr(e, "message") and isinstance(e.message, str)
35 and ("libstdc++" in e.message or "CXX" in e.message)):
36 # This code path should be taken with Python 2.
37 condition = (hasattr(e, "args") and isinstance(e.args, tuple)
38 and len(e.args) == 1 and isinstance(e.args[0], str))
39 if condition:
40 e.args = (e.args[0] + helpful_message, )
41 else:
42 if not hasattr(e, "args"):
43 e.args = ()
44 elif not isinstance(e.args, tuple):
45 e.args = (e.args, )
46 e.args += (helpful_message, )
47 raise
48
49 modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
50 sys.path.insert(0, modin_path)
51
52 from ray.raylet import ObjectID, _config # noqa: E402
53 from ray.profiling import profile # noqa: E402
54 from ray.worker import (error_info, init, connect, disconnect, get, put, wait,
55 remote, get_gpu_ids, get_resource_ids, get_webui_url,
56 register_custom_serializer, shutdown,
57 is_initialized) # noqa: E402
58 from ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,
59 PYTHON_MODE) # noqa: E402
60 from ray.worker import global_state # noqa: E402
61 import ray.internal # noqa: E402
62 # We import ray.actor because some code is run in actor.py which initializes
63 # some functions in the worker.
64 import ray.actor # noqa: F401
65 from ray.actor import method # noqa: E402
66
67 # Ray version string.
68 __version__ = "0.6.0"
69
70 __all__ = [
71 "error_info", "init", "connect", "disconnect", "get", "put", "wait",
72 "remote", "profile", "actor", "method", "get_gpu_ids", "get_resource_ids",
73 "get_webui_url", "register_custom_serializer", "shutdown",
74 "is_initialized", "SCRIPT_MODE", "WORKER_MODE", "LOCAL_MODE",
75 "PYTHON_MODE", "global_state", "ObjectID", "_config", "__version__",
76 "internal"
77 ]
78
79 import ctypes # noqa: E402
80 # Windows only
81 if hasattr(ctypes, "windll"):
82 # Makes sure that all child processes die when we die. Also makes sure that
83 # fatal crashes result in process termination rather than an error dialog
84 # (the latter is annoying since we have a lot of processes). This is done
85 # by associating all child processes with a "job" object that imposes this
86 # behavior.
87 (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, "\0" * 17 + chr(0x8 | 0x4 | 0x20) + "\0" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/python/ray/__init__.py b/python/ray/__init__.py
--- a/python/ray/__init__.py
+++ b/python/ray/__init__.py
@@ -47,7 +47,7 @@
raise
modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "modin")
-sys.path.insert(0, modin_path)
+sys.path.append(modin_path)
from ray.raylet import ObjectID, _config # noqa: E402
from ray.profiling import profile # noqa: E402
|
{"golden_diff": "diff --git a/python/ray/__init__.py b/python/ray/__init__.py\n--- a/python/ray/__init__.py\n+++ b/python/ray/__init__.py\n@@ -47,7 +47,7 @@\n raise\n \n modin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\n-sys.path.insert(0, modin_path)\n+sys.path.append(modin_path)\n \n from ray.raylet import ObjectID, _config # noqa: E402\n from ray.profiling import profile # noqa: E402\n", "issue": "[modin] Importing Modin before Ray can sometimes cause ImportError\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nWhen running Modin with Ray installed from source, I am sometimes running into `ImportError` and `ModuleNotFoundError` which is occurring when I am running a modified version of Modin. This forces me to modify Ray's source such that it does not try to use the Modin that is bundled with Ray.\r\n\r\nI will work on a solution for this.\r\n\r\n### Source code / logs\r\n\r\n`import modin.pandas as pd`\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/ray/python/ray/function_manager.py\", line 165, in fetch_and_register_remote_function\r\n function = pickle.loads(serialized_function)\r\nModuleNotFoundError: No module named 'modin.data_management.utils'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.insert(0, modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.6.0\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nif \"pyarrow\" in sys.modules:\n raise ImportError(\"Ray must be imported before pyarrow because Ray \"\n \"requires a specific version of pyarrow (which is \"\n \"packaged along with Ray).\")\n\n# Add the directory containing pyarrow to the Python path so that we find the\n# pyarrow version packaged with ray and not a pre-existing pyarrow.\npyarrow_path = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"pyarrow_files\")\nsys.path.insert(0, pyarrow_path)\n\n# See https://github.com/ray-project/ray/issues/131.\nhelpful_message = \"\"\"\n\nIf you are using Anaconda, try fixing this problem by running:\n\n conda install libgcc\n\"\"\"\n\ntry:\n import pyarrow # noqa: F401\nexcept ImportError as e:\n if ((hasattr(e, \"msg\") and isinstance(e.msg, str)\n and (\"libstdc++\" in e.msg or \"CXX\" in e.msg))):\n # This code path should be taken with Python 3.\n e.msg += helpful_message\n elif (hasattr(e, \"message\") and isinstance(e.message, str)\n and (\"libstdc++\" in e.message or \"CXX\" in e.message)):\n # This code path should be taken with Python 2.\n condition = (hasattr(e, \"args\") and isinstance(e.args, tuple)\n and len(e.args) == 1 and isinstance(e.args[0], str))\n if condition:\n e.args = (e.args[0] + helpful_message, )\n else:\n if not hasattr(e, \"args\"):\n e.args = ()\n elif not isinstance(e.args, tuple):\n e.args = (e.args, )\n e.args += (helpful_message, )\n raise\n\nmodin_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), \"modin\")\nsys.path.append(modin_path)\n\nfrom ray.raylet import ObjectID, _config # noqa: E402\nfrom ray.profiling import profile # noqa: E402\nfrom ray.worker import (error_info, init, connect, disconnect, get, put, wait,\n remote, get_gpu_ids, get_resource_ids, get_webui_url,\n register_custom_serializer, shutdown,\n is_initialized) # noqa: E402\nfrom ray.worker import (SCRIPT_MODE, WORKER_MODE, LOCAL_MODE,\n PYTHON_MODE) # noqa: E402\nfrom ray.worker import global_state # noqa: E402\nimport ray.internal # noqa: E402\n# We import ray.actor because some code is run in actor.py which initializes\n# some functions in the worker.\nimport ray.actor # noqa: F401\nfrom ray.actor import method # noqa: E402\n\n# Ray version string.\n__version__ = \"0.6.0\"\n\n__all__ = [\n \"error_info\", \"init\", \"connect\", \"disconnect\", \"get\", \"put\", \"wait\",\n \"remote\", \"profile\", \"actor\", \"method\", \"get_gpu_ids\", \"get_resource_ids\",\n \"get_webui_url\", \"register_custom_serializer\", \"shutdown\",\n \"is_initialized\", \"SCRIPT_MODE\", \"WORKER_MODE\", \"LOCAL_MODE\",\n \"PYTHON_MODE\", \"global_state\", \"ObjectID\", \"_config\", \"__version__\",\n \"internal\"\n]\n\nimport ctypes # noqa: E402\n# Windows only\nif hasattr(ctypes, \"windll\"):\n # Makes sure that all child processes die when we die. Also makes sure that\n # fatal crashes result in process termination rather than an error dialog\n # (the latter is annoying since we have a lot of processes). This is done\n # by associating all child processes with a \"job\" object that imposes this\n # behavior.\n (lambda kernel32: (lambda job: (lambda n: kernel32.SetInformationJobObject(job, 9, \"\\0\" * 17 + chr(0x8 | 0x4 | 0x20) + \"\\0\" * (n - 18), n))(0x90 if ctypes.sizeof(ctypes.c_void_p) > ctypes.sizeof(ctypes.c_int) else 0x70) and kernel32.AssignProcessToJobObject(job, ctypes.c_void_p(kernel32.GetCurrentProcess())))(ctypes.c_void_p(kernel32.CreateJobObjectW(None, None))) if kernel32 is not None else None)(ctypes.windll.kernel32) # noqa: E501\n", "path": "python/ray/__init__.py"}]}
| 1,639 | 131 |
gh_patches_debug_34061
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1194
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Column API should support creating columns without names
## Problem
The `name` parameter for the column API shouldn't be required.
## Proposed solution
We should auto-generate a name if it was not specified.
## Additional context
Similar to #449. Please follow a similar naming scheme of `Column n`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/columns.py`
Content:
```
1 from rest_framework import serializers
2 from rest_framework.exceptions import ValidationError
3 from rest_framework.fields import empty
4 from rest_framework.settings import api_settings
5
6 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
7 from mathesar.api.serializers.shared_serializers import (
8 DisplayOptionsMappingSerializer,
9 DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,
10 )
11 from mathesar.models import Column
12
13
14 class InputValueField(serializers.CharField):
15 """
16 Takes in an arbitrary value. Emulates the record creation endpoint,
17 which takes in arbitrary values (un-validated and un-processed request.data).
18 This field replicates that behavior in a serializer.
19 """
20
21 def to_internal_value(self, data):
22 return data
23
24 def to_representation(self, value):
25 return value
26
27
28 class TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):
29 length = serializers.IntegerField(required=False)
30 precision = serializers.IntegerField(required=False)
31 scale = serializers.IntegerField(required=False)
32 fields = serializers.CharField(required=False)
33
34 def run_validation(self, data=empty):
35 # Ensure that there are no unknown type options passed in.
36 if data is not empty:
37 unknown = set(data) - set(self.fields)
38 if unknown:
39 errors = ['Unknown field: {}'.format(field) for field in unknown]
40 raise serializers.ValidationError({
41 api_settings.NON_FIELD_ERRORS_KEY: errors,
42 })
43
44 return super(TypeOptionSerializer, self).run_validation(data)
45
46
47 class SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
48 class Meta:
49 model = Column
50 fields = ('id',
51 'name',
52 'type',
53 'type_options',
54 'display_options'
55 )
56 name = serializers.CharField()
57 type = serializers.CharField(source='plain_type')
58 type_options = TypeOptionSerializer(required=False, allow_null=True)
59 display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)
60
61 def to_representation(self, instance):
62 if isinstance(instance, dict):
63 instance_type = instance.get('type')
64 else:
65 instance_type = instance.plain_type
66 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)
67 return super().to_representation(instance)
68
69 def to_internal_value(self, data):
70 if self.partial and 'type' not in data:
71 instance_type = getattr(self.instance, 'plain_type', None)
72 if instance_type is not None:
73 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)
74 else:
75 self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = data.get('type', None)
76 return super().to_internal_value(data)
77
78
79 class ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):
80 value = InputValueField()
81 is_dynamic = serializers.BooleanField(read_only=True)
82
83
84 class ColumnSerializer(SimpleColumnSerializer):
85 class Meta(SimpleColumnSerializer.Meta):
86 fields = SimpleColumnSerializer.Meta.fields + (
87 'nullable',
88 'primary_key',
89 'source_column',
90 'copy_source_data',
91 'copy_source_constraints',
92 'index',
93 'valid_target_types',
94 'default'
95 )
96 model_fields = ('display_options',)
97
98 name = serializers.CharField(required=False)
99
100 # From scratch fields
101 type = serializers.CharField(source='plain_type', required=False)
102 nullable = serializers.BooleanField(default=True)
103 primary_key = serializers.BooleanField(default=False)
104 default = ColumnDefaultSerializer(
105 source='column_default_dict', required=False, allow_null=True, default=None
106 )
107
108 # From duplication fields
109 source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)
110 copy_source_data = serializers.BooleanField(default=True, write_only=True)
111 copy_source_constraints = serializers.BooleanField(default=True, write_only=True)
112
113 # Read only fields
114 index = serializers.IntegerField(source='column_index', read_only=True)
115 valid_target_types = serializers.ListField(read_only=True)
116
117 def validate(self, data):
118 if not self.partial:
119 from_scratch_required_fields = ['name', 'type']
120 from_scratch_specific_fields = ['type', 'nullable', 'primary_key']
121 from_dupe_required_fields = ['source_column']
122 from_dupe_specific_fields = ['source_column', 'copy_source_data',
123 'copy_source_constraints']
124
125 # Note that we run validation on self.initial_data, as `data` has defaults
126 # filled in for fields that weren't specified by the request
127 from_scratch_required_all = all([
128 f in self.initial_data for f in from_scratch_required_fields
129 ])
130 from_scratch_specific_in = [
131 f for f in from_scratch_specific_fields if f in self.initial_data
132 ]
133 from_dupe_required_all = all([
134 f in self.initial_data for f in from_dupe_required_fields
135 ])
136 from_dupe_specific_in = [
137 f for f in from_dupe_specific_fields if f in self.initial_data
138 ]
139
140 if len(from_dupe_specific_in) and len(from_scratch_specific_in):
141 raise ValidationError(
142 f'{from_scratch_specific_in} cannot be passed in if '
143 f'{from_dupe_specific_in} has also been passed in.'
144 )
145 elif not from_dupe_required_all and not from_scratch_required_all:
146 # We default to from scratch required fields if no fields are passed
147 if len(from_dupe_specific_in) and not len(from_scratch_specific_in):
148 required_fields = from_dupe_required_fields
149 else:
150 required_fields = from_scratch_required_fields
151 raise ValidationError({
152 f: ['This field is required.']
153 for f in required_fields
154 if f not in self.initial_data
155 })
156 return data
157
158 @property
159 def validated_model_fields(self):
160 return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}
161
```
Path: `db/columns/operations/create.py`
Content:
```
1 from alembic.migration import MigrationContext
2 from alembic.operations import Operations
3 from sqlalchemy.ext import compiler
4 from sqlalchemy.exc import DataError
5 from sqlalchemy.schema import DDLElement
6 from psycopg2.errors import InvalidTextRepresentation, InvalidParameterValue
7
8 from db.columns.base import MathesarColumn
9 from db.columns.defaults import DEFAULT, NAME, NULLABLE, TYPE
10 from db.columns.exceptions import InvalidDefaultError, InvalidTypeError, InvalidTypeOptionError
11 from db.columns.operations.alter import set_column_default, change_column_nullable
12 from db.columns.operations.select import (
13 get_column_attnum_from_name, get_column_default, get_column_name_from_attnum,
14 )
15 from db.columns.utils import get_mathesar_column_with_engine
16 from db.constraints.operations.create import copy_constraint
17 from db.constraints.operations.select import get_column_constraints
18 from db.constraints import utils as constraint_utils
19 from db.tables.operations.select import reflect_table_from_oid
20 from db.types.operations.cast import get_supported_alter_column_types
21
22
23 def create_column(engine, table_oid, column_data):
24 column_type = column_data.get(TYPE, column_data.get("type"))
25 column_type_options = column_data.get("type_options", {})
26 column_nullable = column_data.get(NULLABLE, True)
27 default_value = column_data.get(DEFAULT, {}).get('value')
28 prepared_default_value = str(default_value) if default_value is not None else None
29 supported_types = get_supported_alter_column_types(
30 engine, friendly_names=False,
31 )
32 sa_type = supported_types.get(column_type)
33 if sa_type is None:
34 # Requested type not supported. falling back to VARCHAR
35 sa_type = supported_types["VARCHAR"]
36 column_type_options = {}
37 table = reflect_table_from_oid(table_oid, engine)
38
39 try:
40 column = MathesarColumn(
41 column_data[NAME], sa_type(**column_type_options), nullable=column_nullable,
42 server_default=prepared_default_value,
43 )
44 except DataError as e:
45 if type(e.orig) == InvalidTextRepresentation:
46 raise InvalidTypeError
47 else:
48 raise e
49
50 table = reflect_table_from_oid(table_oid, engine)
51 try:
52 with engine.begin() as conn:
53 ctx = MigrationContext.configure(conn)
54 op = Operations(ctx)
55 op.add_column(table.name, column, schema=table.schema)
56 except DataError as e:
57 if type(e.orig) == InvalidTextRepresentation:
58 raise InvalidDefaultError
59 elif type(e.orig) == InvalidParameterValue:
60 raise InvalidTypeOptionError
61 else:
62 raise e
63
64 return get_mathesar_column_with_engine(
65 reflect_table_from_oid(table_oid, engine).columns[column_data[NAME]],
66 engine
67 )
68
69
70 def _gen_col_name(table, column_name):
71 num = 1
72 new_column_name = f"{column_name}_{num}"
73 while new_column_name in table.c:
74 num += 1
75 new_column_name = f"{column_name}_{num}"
76 return new_column_name
77
78
79 class CopyColumn(DDLElement):
80 def __init__(self, schema, table, to_column, from_column):
81 self.schema = schema
82 self.table = table
83 self.to_column = to_column
84 self.from_column = from_column
85
86
87 @compiler.compiles(CopyColumn, "postgresql")
88 def compile_copy_column(element, compiler, **_):
89 return 'UPDATE "%s"."%s" SET "%s" = "%s"' % (
90 element.schema,
91 element.table,
92 element.to_column,
93 element.from_column
94 )
95
96
97 def _duplicate_column_data(table_oid, from_column_attnum, to_column_attnum, engine):
98 table = reflect_table_from_oid(table_oid, engine)
99 from_column_name = get_column_name_from_attnum(table_oid, from_column_attnum, engine)
100 to_column_name = get_column_name_from_attnum(table_oid, to_column_attnum, engine)
101 copy = CopyColumn(
102 table.schema,
103 table.name,
104 to_column_name,
105 from_column_name,
106 )
107 with engine.begin() as conn:
108 conn.execute(copy)
109 from_default = get_column_default(table_oid, from_column_attnum, engine)
110 if from_default is not None:
111 with engine.begin() as conn:
112 set_column_default(table_oid, to_column_attnum, engine, conn, from_default)
113
114
115 def _duplicate_column_constraints(table_oid, from_column_attnum, to_column_attnum, engine, copy_nullable=True):
116 table = reflect_table_from_oid(table_oid, engine)
117 from_column_name = get_column_name_from_attnum(table_oid, from_column_attnum, engine)
118 if copy_nullable:
119 with engine.begin() as conn:
120 change_column_nullable(table_oid, to_column_attnum, engine, conn, table.c[from_column_name].nullable)
121 constraints = get_column_constraints(from_column_attnum, table_oid, engine)
122 for constraint in constraints:
123 constraint_type = constraint_utils.get_constraint_type_from_char(constraint.contype)
124 if constraint_type != constraint_utils.ConstraintType.UNIQUE.value:
125 # Don't allow duplication of primary keys
126 continue
127 copy_constraint(
128 table_oid, engine, constraint, from_column_attnum, to_column_attnum
129 )
130
131
132 def duplicate_column(table_oid, copy_from_attnum, engine, new_column_name=None, copy_data=True, copy_constraints=True):
133 table = reflect_table_from_oid(table_oid, engine)
134 copy_from_name = get_column_name_from_attnum(table_oid, copy_from_attnum, engine)
135 from_column = table.c[copy_from_name]
136 if new_column_name is None:
137 new_column_name = _gen_col_name(table, from_column.name)
138
139 column_data = {
140 NAME: new_column_name,
141 "type": from_column.type.compile(dialect=engine.dialect),
142 NULLABLE: True,
143 }
144 new_column = create_column(engine, table_oid, column_data)
145 new_column_attnum = get_column_attnum_from_name(table_oid, new_column.name, engine)
146 if copy_data:
147 _duplicate_column_data(
148 table_oid,
149 copy_from_attnum,
150 new_column_attnum,
151 engine
152 )
153
154 if copy_constraints:
155 _duplicate_column_constraints(
156 table_oid,
157 copy_from_attnum,
158 new_column_attnum,
159 engine,
160 copy_nullable=copy_data
161 )
162
163 table = reflect_table_from_oid(table_oid, engine)
164 column_name = get_column_name_from_attnum(table_oid, new_column_attnum, engine)
165 return get_mathesar_column_with_engine(table.c[column_name], engine)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/columns/operations/create.py b/db/columns/operations/create.py
--- a/db/columns/operations/create.py
+++ b/db/columns/operations/create.py
@@ -19,8 +19,14 @@
from db.tables.operations.select import reflect_table_from_oid
from db.types.operations.cast import get_supported_alter_column_types
+COLUMN_NAME_TEMPLATE = 'Column'
+
def create_column(engine, table_oid, column_data):
+ table = reflect_table_from_oid(table_oid, engine)
+ column_name = column_data.get(NAME, '').strip()
+ if column_name == '':
+ column_data[NAME] = gen_col_name(table)
column_type = column_data.get(TYPE, column_data.get("type"))
column_type_options = column_data.get("type_options", {})
column_nullable = column_data.get(NULLABLE, True)
@@ -67,6 +73,13 @@
)
+def gen_col_name(table):
+ base_name = COLUMN_NAME_TEMPLATE
+ col_num = len(table.c)
+ name = f'{base_name} {col_num}'
+ return name
+
+
def _gen_col_name(table, column_name):
num = 1
new_column_name = f"{column_name}_{num}"
diff --git a/mathesar/api/serializers/columns.py b/mathesar/api/serializers/columns.py
--- a/mathesar/api/serializers/columns.py
+++ b/mathesar/api/serializers/columns.py
@@ -95,7 +95,7 @@
)
model_fields = ('display_options',)
- name = serializers.CharField(required=False)
+ name = serializers.CharField(required=False, allow_blank=True)
# From scratch fields
type = serializers.CharField(source='plain_type', required=False)
@@ -116,7 +116,7 @@
def validate(self, data):
if not self.partial:
- from_scratch_required_fields = ['name', 'type']
+ from_scratch_required_fields = ['type']
from_scratch_specific_fields = ['type', 'nullable', 'primary_key']
from_dupe_required_fields = ['source_column']
from_dupe_specific_fields = ['source_column', 'copy_source_data',
|
{"golden_diff": "diff --git a/db/columns/operations/create.py b/db/columns/operations/create.py\n--- a/db/columns/operations/create.py\n+++ b/db/columns/operations/create.py\n@@ -19,8 +19,14 @@\n from db.tables.operations.select import reflect_table_from_oid\n from db.types.operations.cast import get_supported_alter_column_types\n \n+COLUMN_NAME_TEMPLATE = 'Column'\n+\n \n def create_column(engine, table_oid, column_data):\n+ table = reflect_table_from_oid(table_oid, engine)\n+ column_name = column_data.get(NAME, '').strip()\n+ if column_name == '':\n+ column_data[NAME] = gen_col_name(table)\n column_type = column_data.get(TYPE, column_data.get(\"type\"))\n column_type_options = column_data.get(\"type_options\", {})\n column_nullable = column_data.get(NULLABLE, True)\n@@ -67,6 +73,13 @@\n )\n \n \n+def gen_col_name(table):\n+ base_name = COLUMN_NAME_TEMPLATE\n+ col_num = len(table.c)\n+ name = f'{base_name} {col_num}'\n+ return name\n+\n+\n def _gen_col_name(table, column_name):\n num = 1\n new_column_name = f\"{column_name}_{num}\"\ndiff --git a/mathesar/api/serializers/columns.py b/mathesar/api/serializers/columns.py\n--- a/mathesar/api/serializers/columns.py\n+++ b/mathesar/api/serializers/columns.py\n@@ -95,7 +95,7 @@\n )\n model_fields = ('display_options',)\n \n- name = serializers.CharField(required=False)\n+ name = serializers.CharField(required=False, allow_blank=True)\n \n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n@@ -116,7 +116,7 @@\n \n def validate(self, data):\n if not self.partial:\n- from_scratch_required_fields = ['name', 'type']\n+ from_scratch_required_fields = ['type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n", "issue": " Column API should support creating columns without names \n## Problem\r\nThe `name` parameter for the column API shouldn't be required.\r\n\r\n## Proposed solution\r\nWe should auto-generate a name if it was not specified.\r\n\r\n## Additional context\r\nSimilar to #449. Please follow a similar naming scheme of `Column n`.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty\nfrom rest_framework.settings import api_settings\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.shared_serializers import (\n DisplayOptionsMappingSerializer,\n DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,\n)\nfrom mathesar.models import Column\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Emulates the record creation endpoint,\n which takes in arbitrary values (un-validated and un-processed request.data).\n This field replicates that behavior in a serializer.\n \"\"\"\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n length = serializers.IntegerField(required=False)\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n fields = serializers.CharField(required=False)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nclass SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = Column\n fields = ('id',\n 'name',\n 'type',\n 'type_options',\n 'display_options'\n )\n name = serializers.CharField()\n type = serializers.CharField(source='plain_type')\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)\n\n def to_representation(self, instance):\n if isinstance(instance, dict):\n instance_type = instance.get('type')\n else:\n instance_type = instance.plain_type\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n return super().to_representation(instance)\n\n def to_internal_value(self, data):\n if self.partial and 'type' not in data:\n instance_type = getattr(self.instance, 'plain_type', None)\n if instance_type is not None:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n else:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = data.get('type', None)\n return super().to_internal_value(data)\n\n\nclass ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n value = InputValueField()\n is_dynamic = serializers.BooleanField(read_only=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n class Meta(SimpleColumnSerializer.Meta):\n fields = SimpleColumnSerializer.Meta.fields + (\n 'nullable',\n 'primary_key',\n 'source_column',\n 'copy_source_data',\n 'copy_source_constraints',\n 'index',\n 'valid_target_types',\n 'default'\n )\n model_fields = ('display_options',)\n\n name = serializers.CharField(required=False)\n\n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n default = ColumnDefaultSerializer(\n source='column_default_dict', required=False, allow_null=True, default=None\n )\n\n # From duplication fields\n source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n index = serializers.IntegerField(source='column_index', read_only=True)\n valid_target_types = serializers.ListField(read_only=True)\n\n def validate(self, data):\n if not self.partial:\n from_scratch_required_fields = ['name', 'type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n @property\n def validated_model_fields(self):\n return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}\n", "path": "mathesar/api/serializers/columns.py"}, {"content": "from alembic.migration import MigrationContext\nfrom alembic.operations import Operations\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.exc import DataError\nfrom sqlalchemy.schema import DDLElement\nfrom psycopg2.errors import InvalidTextRepresentation, InvalidParameterValue\n\nfrom db.columns.base import MathesarColumn\nfrom db.columns.defaults import DEFAULT, NAME, NULLABLE, TYPE\nfrom db.columns.exceptions import InvalidDefaultError, InvalidTypeError, InvalidTypeOptionError\nfrom db.columns.operations.alter import set_column_default, change_column_nullable\nfrom db.columns.operations.select import (\n get_column_attnum_from_name, get_column_default, get_column_name_from_attnum,\n)\nfrom db.columns.utils import get_mathesar_column_with_engine\nfrom db.constraints.operations.create import copy_constraint\nfrom db.constraints.operations.select import get_column_constraints\nfrom db.constraints import utils as constraint_utils\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.types.operations.cast import get_supported_alter_column_types\n\n\ndef create_column(engine, table_oid, column_data):\n column_type = column_data.get(TYPE, column_data.get(\"type\"))\n column_type_options = column_data.get(\"type_options\", {})\n column_nullable = column_data.get(NULLABLE, True)\n default_value = column_data.get(DEFAULT, {}).get('value')\n prepared_default_value = str(default_value) if default_value is not None else None\n supported_types = get_supported_alter_column_types(\n engine, friendly_names=False,\n )\n sa_type = supported_types.get(column_type)\n if sa_type is None:\n # Requested type not supported. falling back to VARCHAR\n sa_type = supported_types[\"VARCHAR\"]\n column_type_options = {}\n table = reflect_table_from_oid(table_oid, engine)\n\n try:\n column = MathesarColumn(\n column_data[NAME], sa_type(**column_type_options), nullable=column_nullable,\n server_default=prepared_default_value,\n )\n except DataError as e:\n if type(e.orig) == InvalidTextRepresentation:\n raise InvalidTypeError\n else:\n raise e\n\n table = reflect_table_from_oid(table_oid, engine)\n try:\n with engine.begin() as conn:\n ctx = MigrationContext.configure(conn)\n op = Operations(ctx)\n op.add_column(table.name, column, schema=table.schema)\n except DataError as e:\n if type(e.orig) == InvalidTextRepresentation:\n raise InvalidDefaultError\n elif type(e.orig) == InvalidParameterValue:\n raise InvalidTypeOptionError\n else:\n raise e\n\n return get_mathesar_column_with_engine(\n reflect_table_from_oid(table_oid, engine).columns[column_data[NAME]],\n engine\n )\n\n\ndef _gen_col_name(table, column_name):\n num = 1\n new_column_name = f\"{column_name}_{num}\"\n while new_column_name in table.c:\n num += 1\n new_column_name = f\"{column_name}_{num}\"\n return new_column_name\n\n\nclass CopyColumn(DDLElement):\n def __init__(self, schema, table, to_column, from_column):\n self.schema = schema\n self.table = table\n self.to_column = to_column\n self.from_column = from_column\n\n\[email protected](CopyColumn, \"postgresql\")\ndef compile_copy_column(element, compiler, **_):\n return 'UPDATE \"%s\".\"%s\" SET \"%s\" = \"%s\"' % (\n element.schema,\n element.table,\n element.to_column,\n element.from_column\n )\n\n\ndef _duplicate_column_data(table_oid, from_column_attnum, to_column_attnum, engine):\n table = reflect_table_from_oid(table_oid, engine)\n from_column_name = get_column_name_from_attnum(table_oid, from_column_attnum, engine)\n to_column_name = get_column_name_from_attnum(table_oid, to_column_attnum, engine)\n copy = CopyColumn(\n table.schema,\n table.name,\n to_column_name,\n from_column_name,\n )\n with engine.begin() as conn:\n conn.execute(copy)\n from_default = get_column_default(table_oid, from_column_attnum, engine)\n if from_default is not None:\n with engine.begin() as conn:\n set_column_default(table_oid, to_column_attnum, engine, conn, from_default)\n\n\ndef _duplicate_column_constraints(table_oid, from_column_attnum, to_column_attnum, engine, copy_nullable=True):\n table = reflect_table_from_oid(table_oid, engine)\n from_column_name = get_column_name_from_attnum(table_oid, from_column_attnum, engine)\n if copy_nullable:\n with engine.begin() as conn:\n change_column_nullable(table_oid, to_column_attnum, engine, conn, table.c[from_column_name].nullable)\n constraints = get_column_constraints(from_column_attnum, table_oid, engine)\n for constraint in constraints:\n constraint_type = constraint_utils.get_constraint_type_from_char(constraint.contype)\n if constraint_type != constraint_utils.ConstraintType.UNIQUE.value:\n # Don't allow duplication of primary keys\n continue\n copy_constraint(\n table_oid, engine, constraint, from_column_attnum, to_column_attnum\n )\n\n\ndef duplicate_column(table_oid, copy_from_attnum, engine, new_column_name=None, copy_data=True, copy_constraints=True):\n table = reflect_table_from_oid(table_oid, engine)\n copy_from_name = get_column_name_from_attnum(table_oid, copy_from_attnum, engine)\n from_column = table.c[copy_from_name]\n if new_column_name is None:\n new_column_name = _gen_col_name(table, from_column.name)\n\n column_data = {\n NAME: new_column_name,\n \"type\": from_column.type.compile(dialect=engine.dialect),\n NULLABLE: True,\n }\n new_column = create_column(engine, table_oid, column_data)\n new_column_attnum = get_column_attnum_from_name(table_oid, new_column.name, engine)\n if copy_data:\n _duplicate_column_data(\n table_oid,\n copy_from_attnum,\n new_column_attnum,\n engine\n )\n\n if copy_constraints:\n _duplicate_column_constraints(\n table_oid,\n copy_from_attnum,\n new_column_attnum,\n engine,\n copy_nullable=copy_data\n )\n\n table = reflect_table_from_oid(table_oid, engine)\n column_name = get_column_name_from_attnum(table_oid, new_column_attnum, engine)\n return get_mathesar_column_with_engine(table.c[column_name], engine)\n", "path": "db/columns/operations/create.py"}], "after_files": [{"content": "from rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import empty\nfrom rest_framework.settings import api_settings\n\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.shared_serializers import (\n DisplayOptionsMappingSerializer,\n DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY,\n)\nfrom mathesar.models import Column\n\n\nclass InputValueField(serializers.CharField):\n \"\"\"\n Takes in an arbitrary value. Emulates the record creation endpoint,\n which takes in arbitrary values (un-validated and un-processed request.data).\n This field replicates that behavior in a serializer.\n \"\"\"\n\n def to_internal_value(self, data):\n return data\n\n def to_representation(self, value):\n return value\n\n\nclass TypeOptionSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n length = serializers.IntegerField(required=False)\n precision = serializers.IntegerField(required=False)\n scale = serializers.IntegerField(required=False)\n fields = serializers.CharField(required=False)\n\n def run_validation(self, data=empty):\n # Ensure that there are no unknown type options passed in.\n if data is not empty:\n unknown = set(data) - set(self.fields)\n if unknown:\n errors = ['Unknown field: {}'.format(field) for field in unknown]\n raise serializers.ValidationError({\n api_settings.NON_FIELD_ERRORS_KEY: errors,\n })\n\n return super(TypeOptionSerializer, self).run_validation(data)\n\n\nclass SimpleColumnSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n class Meta:\n model = Column\n fields = ('id',\n 'name',\n 'type',\n 'type_options',\n 'display_options'\n )\n name = serializers.CharField()\n type = serializers.CharField(source='plain_type')\n type_options = TypeOptionSerializer(required=False, allow_null=True)\n display_options = DisplayOptionsMappingSerializer(required=False, allow_null=True)\n\n def to_representation(self, instance):\n if isinstance(instance, dict):\n instance_type = instance.get('type')\n else:\n instance_type = instance.plain_type\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n return super().to_representation(instance)\n\n def to_internal_value(self, data):\n if self.partial and 'type' not in data:\n instance_type = getattr(self.instance, 'plain_type', None)\n if instance_type is not None:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = str(instance_type)\n else:\n self.context[DISPLAY_OPTIONS_SERIALIZER_MAPPING_KEY] = data.get('type', None)\n return super().to_internal_value(data)\n\n\nclass ColumnDefaultSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n value = InputValueField()\n is_dynamic = serializers.BooleanField(read_only=True)\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n class Meta(SimpleColumnSerializer.Meta):\n fields = SimpleColumnSerializer.Meta.fields + (\n 'nullable',\n 'primary_key',\n 'source_column',\n 'copy_source_data',\n 'copy_source_constraints',\n 'index',\n 'valid_target_types',\n 'default'\n )\n model_fields = ('display_options',)\n\n name = serializers.CharField(required=False, allow_blank=True)\n\n # From scratch fields\n type = serializers.CharField(source='plain_type', required=False)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n default = ColumnDefaultSerializer(\n source='column_default_dict', required=False, allow_null=True, default=None\n )\n\n # From duplication fields\n source_column = serializers.PrimaryKeyRelatedField(queryset=Column.current_objects.all(), required=False, write_only=True)\n copy_source_data = serializers.BooleanField(default=True, write_only=True)\n copy_source_constraints = serializers.BooleanField(default=True, write_only=True)\n\n # Read only fields\n index = serializers.IntegerField(source='column_index', read_only=True)\n valid_target_types = serializers.ListField(read_only=True)\n\n def validate(self, data):\n if not self.partial:\n from_scratch_required_fields = ['type']\n from_scratch_specific_fields = ['type', 'nullable', 'primary_key']\n from_dupe_required_fields = ['source_column']\n from_dupe_specific_fields = ['source_column', 'copy_source_data',\n 'copy_source_constraints']\n\n # Note that we run validation on self.initial_data, as `data` has defaults\n # filled in for fields that weren't specified by the request\n from_scratch_required_all = all([\n f in self.initial_data for f in from_scratch_required_fields\n ])\n from_scratch_specific_in = [\n f for f in from_scratch_specific_fields if f in self.initial_data\n ]\n from_dupe_required_all = all([\n f in self.initial_data for f in from_dupe_required_fields\n ])\n from_dupe_specific_in = [\n f for f in from_dupe_specific_fields if f in self.initial_data\n ]\n\n if len(from_dupe_specific_in) and len(from_scratch_specific_in):\n raise ValidationError(\n f'{from_scratch_specific_in} cannot be passed in if '\n f'{from_dupe_specific_in} has also been passed in.'\n )\n elif not from_dupe_required_all and not from_scratch_required_all:\n # We default to from scratch required fields if no fields are passed\n if len(from_dupe_specific_in) and not len(from_scratch_specific_in):\n required_fields = from_dupe_required_fields\n else:\n required_fields = from_scratch_required_fields\n raise ValidationError({\n f: ['This field is required.']\n for f in required_fields\n if f not in self.initial_data\n })\n return data\n\n @property\n def validated_model_fields(self):\n return {key: self.validated_data[key] for key in self.validated_data if key in self.Meta.model_fields}\n", "path": "mathesar/api/serializers/columns.py"}, {"content": "from alembic.migration import MigrationContext\nfrom alembic.operations import Operations\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.exc import DataError\nfrom sqlalchemy.schema import DDLElement\nfrom psycopg2.errors import InvalidTextRepresentation, InvalidParameterValue\n\nfrom db.columns.base import MathesarColumn\nfrom db.columns.defaults import DEFAULT, NAME, NULLABLE, TYPE\nfrom db.columns.exceptions import InvalidDefaultError, InvalidTypeError, InvalidTypeOptionError\nfrom db.columns.operations.alter import set_column_default, change_column_nullable\nfrom db.columns.operations.select import (\n get_column_attnum_from_name, get_column_default, get_column_name_from_attnum,\n)\nfrom db.columns.utils import get_mathesar_column_with_engine\nfrom db.constraints.operations.create import copy_constraint\nfrom db.constraints.operations.select import get_column_constraints\nfrom db.constraints import utils as constraint_utils\nfrom db.tables.operations.select import reflect_table_from_oid\nfrom db.types.operations.cast import get_supported_alter_column_types\n\nCOLUMN_NAME_TEMPLATE = 'Column'\n\n\ndef create_column(engine, table_oid, column_data):\n table = reflect_table_from_oid(table_oid, engine)\n column_name = column_data.get(NAME, '').strip()\n if column_name == '':\n column_data[NAME] = gen_col_name(table)\n column_type = column_data.get(TYPE, column_data.get(\"type\"))\n column_type_options = column_data.get(\"type_options\", {})\n column_nullable = column_data.get(NULLABLE, True)\n default_value = column_data.get(DEFAULT, {}).get('value')\n prepared_default_value = str(default_value) if default_value is not None else None\n supported_types = get_supported_alter_column_types(\n engine, friendly_names=False,\n )\n sa_type = supported_types.get(column_type)\n if sa_type is None:\n # Requested type not supported. falling back to VARCHAR\n sa_type = supported_types[\"VARCHAR\"]\n column_type_options = {}\n table = reflect_table_from_oid(table_oid, engine)\n\n try:\n column = MathesarColumn(\n column_data[NAME], sa_type(**column_type_options), nullable=column_nullable,\n server_default=prepared_default_value,\n )\n except DataError as e:\n if type(e.orig) == InvalidTextRepresentation:\n raise InvalidTypeError\n else:\n raise e\n\n table = reflect_table_from_oid(table_oid, engine)\n try:\n with engine.begin() as conn:\n ctx = MigrationContext.configure(conn)\n op = Operations(ctx)\n op.add_column(table.name, column, schema=table.schema)\n except DataError as e:\n if type(e.orig) == InvalidTextRepresentation:\n raise InvalidDefaultError\n elif type(e.orig) == InvalidParameterValue:\n raise InvalidTypeOptionError\n else:\n raise e\n\n return get_mathesar_column_with_engine(\n reflect_table_from_oid(table_oid, engine).columns[column_data[NAME]],\n engine\n )\n\n\ndef gen_col_name(table):\n base_name = COLUMN_NAME_TEMPLATE\n col_num = len(table.c)\n name = f'{base_name} {col_num}'\n return name\n\n\ndef _gen_col_name(table, column_name):\n num = 1\n new_column_name = f\"{column_name}_{num}\"\n while new_column_name in table.c:\n num += 1\n new_column_name = f\"{column_name}_{num}\"\n return new_column_name\n\n\nclass CopyColumn(DDLElement):\n def __init__(self, schema, table, to_column, from_column):\n self.schema = schema\n self.table = table\n self.to_column = to_column\n self.from_column = from_column\n\n\[email protected](CopyColumn, \"postgresql\")\ndef compile_copy_column(element, compiler, **_):\n return 'UPDATE \"%s\".\"%s\" SET \"%s\" = \"%s\"' % (\n element.schema,\n element.table,\n element.to_column,\n element.from_column\n )\n\n\ndef _duplicate_column_data(table_oid, from_column_attnum, to_column_attnum, engine):\n table = reflect_table_from_oid(table_oid, engine)\n from_column_name = get_column_name_from_attnum(table_oid, from_column_attnum, engine)\n to_column_name = get_column_name_from_attnum(table_oid, to_column_attnum, engine)\n copy = CopyColumn(\n table.schema,\n table.name,\n to_column_name,\n from_column_name,\n )\n with engine.begin() as conn:\n conn.execute(copy)\n from_default = get_column_default(table_oid, from_column_attnum, engine)\n if from_default is not None:\n with engine.begin() as conn:\n set_column_default(table_oid, to_column_attnum, engine, conn, from_default)\n\n\ndef _duplicate_column_constraints(table_oid, from_column_attnum, to_column_attnum, engine, copy_nullable=True):\n table = reflect_table_from_oid(table_oid, engine)\n from_column_name = get_column_name_from_attnum(table_oid, from_column_attnum, engine)\n if copy_nullable:\n with engine.begin() as conn:\n change_column_nullable(table_oid, to_column_attnum, engine, conn, table.c[from_column_name].nullable)\n constraints = get_column_constraints(from_column_attnum, table_oid, engine)\n for constraint in constraints:\n constraint_type = constraint_utils.get_constraint_type_from_char(constraint.contype)\n if constraint_type != constraint_utils.ConstraintType.UNIQUE.value:\n # Don't allow duplication of primary keys\n continue\n copy_constraint(\n table_oid, engine, constraint, from_column_attnum, to_column_attnum\n )\n\n\ndef duplicate_column(table_oid, copy_from_attnum, engine, new_column_name=None, copy_data=True, copy_constraints=True):\n table = reflect_table_from_oid(table_oid, engine)\n copy_from_name = get_column_name_from_attnum(table_oid, copy_from_attnum, engine)\n from_column = table.c[copy_from_name]\n if new_column_name is None:\n new_column_name = _gen_col_name(table, from_column.name)\n\n column_data = {\n NAME: new_column_name,\n \"type\": from_column.type.compile(dialect=engine.dialect),\n NULLABLE: True,\n }\n new_column = create_column(engine, table_oid, column_data)\n new_column_attnum = get_column_attnum_from_name(table_oid, new_column.name, engine)\n if copy_data:\n _duplicate_column_data(\n table_oid,\n copy_from_attnum,\n new_column_attnum,\n engine\n )\n\n if copy_constraints:\n _duplicate_column_constraints(\n table_oid,\n copy_from_attnum,\n new_column_attnum,\n engine,\n copy_nullable=copy_data\n )\n\n table = reflect_table_from_oid(table_oid, engine)\n column_name = get_column_name_from_attnum(table_oid, new_column_attnum, engine)\n return get_mathesar_column_with_engine(table.c[column_name], engine)\n", "path": "db/columns/operations/create.py"}]}
| 3,758 | 483 |
gh_patches_debug_35781
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-5114
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update "Masking out the solar disk" example to use maputils function
Example: https://docs.sunpy.org/en/stable/generated/gallery/computer_vision_techniques/mask_disk.html
Update to use `sunpy.map.coordinate_is_on_solar_disk()`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/computer_vision_techniques/mask_disk.py`
Content:
```
1 """
2 ==========================
3 Masking out the solar disk
4 ==========================
5
6 How to mask out all emission from the solar disk.
7 """
8 import matplotlib.pyplot as plt
9 import numpy as np
10 import numpy.ma as ma
11
12 import sunpy.map
13 from sunpy.data.sample import AIA_171_IMAGE
14 from sunpy.map.maputils import all_coordinates_from_map
15
16 ###############################################################################
17 # We start with the sample data
18 aia = sunpy.map.Map(AIA_171_IMAGE)
19
20 ###############################################################################
21 # A utility function gives us access to the helioprojective coordinate of each
22 # pixels. We can use that to create a new array which
23 # contains the normalized radial position for each pixel.
24 hpc_coords = all_coordinates_from_map(aia)
25 r = np.sqrt(hpc_coords.Tx ** 2 + hpc_coords.Ty ** 2) / aia.rsun_obs
26
27 ###############################################################################
28 # With this information, we create a mask where all values which are less then
29 # the solar radius are masked. We also make a slight change to the colormap
30 # so that masked values are shown as black instead of the default white.
31 mask = ma.masked_less_equal(r, 1)
32 palette = aia.cmap
33 palette.set_bad('black')
34
35 ###############################################################################
36 # Finally we create a new map with our new mask.
37 scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)
38
39 ###############################################################################
40 # Let's plot the results using our modified colormap
41 fig = plt.figure()
42 plt.subplot(projection=scaled_map)
43 scaled_map.plot(cmap=palette)
44 scaled_map.draw_limb()
45 plt.show()
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/computer_vision_techniques/mask_disk.py b/examples/computer_vision_techniques/mask_disk.py
--- a/examples/computer_vision_techniques/mask_disk.py
+++ b/examples/computer_vision_techniques/mask_disk.py
@@ -6,12 +6,10 @@
How to mask out all emission from the solar disk.
"""
import matplotlib.pyplot as plt
-import numpy as np
-import numpy.ma as ma
import sunpy.map
from sunpy.data.sample import AIA_171_IMAGE
-from sunpy.map.maputils import all_coordinates_from_map
+from sunpy.map.maputils import all_coordinates_from_map, coordinate_is_on_solar_disk
###############################################################################
# We start with the sample data
@@ -19,22 +17,22 @@
###############################################################################
# A utility function gives us access to the helioprojective coordinate of each
-# pixels. We can use that to create a new array which
-# contains the normalized radial position for each pixel.
+# pixels. We can use that to create a new array of all the coordinates
+# that are on the solar disk.
hpc_coords = all_coordinates_from_map(aia)
-r = np.sqrt(hpc_coords.Tx ** 2 + hpc_coords.Ty ** 2) / aia.rsun_obs
###############################################################################
-# With this information, we create a mask where all values which are less then
-# the solar radius are masked. We also make a slight change to the colormap
-# so that masked values are shown as black instead of the default white.
-mask = ma.masked_less_equal(r, 1)
+# Now, we can create a mask from the coordinates by using another utility
+# function that gives us a mask that has `True` for those coordinates that are
+# on the solar disk. We also make a slight change to the colormap so that
+# masked values are shown as black instead of the default white.
+mask = coordinate_is_on_solar_disk(hpc_coords)
palette = aia.cmap
palette.set_bad('black')
###############################################################################
# Finally we create a new map with our new mask.
-scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)
+scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask)
###############################################################################
# Let's plot the results using our modified colormap
|
{"golden_diff": "diff --git a/examples/computer_vision_techniques/mask_disk.py b/examples/computer_vision_techniques/mask_disk.py\n--- a/examples/computer_vision_techniques/mask_disk.py\n+++ b/examples/computer_vision_techniques/mask_disk.py\n@@ -6,12 +6,10 @@\n How to mask out all emission from the solar disk.\n \"\"\"\n import matplotlib.pyplot as plt\n-import numpy as np\n-import numpy.ma as ma\n \n import sunpy.map\n from sunpy.data.sample import AIA_171_IMAGE\n-from sunpy.map.maputils import all_coordinates_from_map\n+from sunpy.map.maputils import all_coordinates_from_map, coordinate_is_on_solar_disk\n \n ###############################################################################\n # We start with the sample data\n@@ -19,22 +17,22 @@\n \n ###############################################################################\n # A utility function gives us access to the helioprojective coordinate of each\n-# pixels. We can use that to create a new array which\n-# contains the normalized radial position for each pixel.\n+# pixels. We can use that to create a new array of all the coordinates\n+# that are on the solar disk.\n hpc_coords = all_coordinates_from_map(aia)\n-r = np.sqrt(hpc_coords.Tx ** 2 + hpc_coords.Ty ** 2) / aia.rsun_obs\n \n ###############################################################################\n-# With this information, we create a mask where all values which are less then\n-# the solar radius are masked. We also make a slight change to the colormap\n-# so that masked values are shown as black instead of the default white.\n-mask = ma.masked_less_equal(r, 1)\n+# Now, we can create a mask from the coordinates by using another utility\n+# function that gives us a mask that has `True` for those coordinates that are\n+# on the solar disk. We also make a slight change to the colormap so that\n+# masked values are shown as black instead of the default white.\n+mask = coordinate_is_on_solar_disk(hpc_coords)\n palette = aia.cmap\n palette.set_bad('black')\n \n ###############################################################################\n # Finally we create a new map with our new mask.\n-scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)\n+scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask)\n \n ###############################################################################\n # Let's plot the results using our modified colormap\n", "issue": "Update \"Masking out the solar disk\" example to use maputils function\nExample: https://docs.sunpy.org/en/stable/generated/gallery/computer_vision_techniques/mask_disk.html\r\n\r\nUpdate to use `sunpy.map.coordinate_is_on_solar_disk()`\n", "before_files": [{"content": "\"\"\"\n==========================\nMasking out the solar disk\n==========================\n\nHow to mask out all emission from the solar disk.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.ma as ma\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_171_IMAGE\nfrom sunpy.map.maputils import all_coordinates_from_map\n\n###############################################################################\n# We start with the sample data\naia = sunpy.map.Map(AIA_171_IMAGE)\n\n###############################################################################\n# A utility function gives us access to the helioprojective coordinate of each\n# pixels. We can use that to create a new array which\n# contains the normalized radial position for each pixel.\nhpc_coords = all_coordinates_from_map(aia)\nr = np.sqrt(hpc_coords.Tx ** 2 + hpc_coords.Ty ** 2) / aia.rsun_obs\n\n###############################################################################\n# With this information, we create a mask where all values which are less then\n# the solar radius are masked. We also make a slight change to the colormap\n# so that masked values are shown as black instead of the default white.\nmask = ma.masked_less_equal(r, 1)\npalette = aia.cmap\npalette.set_bad('black')\n\n###############################################################################\n# Finally we create a new map with our new mask.\nscaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)\n\n###############################################################################\n# Let's plot the results using our modified colormap\nfig = plt.figure()\nplt.subplot(projection=scaled_map)\nscaled_map.plot(cmap=palette)\nscaled_map.draw_limb()\nplt.show()\n", "path": "examples/computer_vision_techniques/mask_disk.py"}], "after_files": [{"content": "\"\"\"\n==========================\nMasking out the solar disk\n==========================\n\nHow to mask out all emission from the solar disk.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport sunpy.map\nfrom sunpy.data.sample import AIA_171_IMAGE\nfrom sunpy.map.maputils import all_coordinates_from_map, coordinate_is_on_solar_disk\n\n###############################################################################\n# We start with the sample data\naia = sunpy.map.Map(AIA_171_IMAGE)\n\n###############################################################################\n# A utility function gives us access to the helioprojective coordinate of each\n# pixels. We can use that to create a new array of all the coordinates\n# that are on the solar disk.\nhpc_coords = all_coordinates_from_map(aia)\n\n###############################################################################\n# Now, we can create a mask from the coordinates by using another utility\n# function that gives us a mask that has `True` for those coordinates that are\n# on the solar disk. We also make a slight change to the colormap so that\n# masked values are shown as black instead of the default white.\nmask = coordinate_is_on_solar_disk(hpc_coords)\npalette = aia.cmap\npalette.set_bad('black')\n\n###############################################################################\n# Finally we create a new map with our new mask.\nscaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask)\n\n###############################################################################\n# Let's plot the results using our modified colormap\nfig = plt.figure()\nplt.subplot(projection=scaled_map)\nscaled_map.plot(cmap=palette)\nscaled_map.draw_limb()\nplt.show()\n", "path": "examples/computer_vision_techniques/mask_disk.py"}]}
| 744 | 513 |
gh_patches_debug_7803
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-712
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Python 3 to setup.py
We need to note in the setup.py that Python 3 is supported.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Use the following command to install retriever: python setup.py install"""
2 from __future__ import absolute_import
3
4 from setuptools import setup
5 from pkg_resources import parse_version
6 import platform
7
8
9 current_platform = platform.system().lower()
10 extra_includes = []
11 if current_platform == "darwin":
12 try:
13 import py2app
14 except ImportError:
15 pass
16 extra_includes = []
17 elif current_platform == "windows":
18 try:
19 import py2exe
20 except ImportError:
21 pass
22 import sys
23 extra_includes = ['pyodbc', 'inspect']
24 sys.path.append(
25 "C:\\Windows\\winsxs\\x86_microsoft.vc90.crt_1fc8b3b9a1e18e3b_9.0.21022.8_none_bcb86ed6ac711f91")
26
27 __version__ = 'v2.0.dev'
28 with open("_version.py", "w") as version_file:
29 version_file.write("__version__ = " + "'" + __version__ + "'\n")
30 version_file.close()
31
32
33 def clean_version(v):
34 return parse_version(v).__repr__().lstrip("<Version('").rstrip("')>")
35
36 packages = [
37 'retriever.lib',
38 'retriever.engines',
39 'retriever',
40 ]
41
42 includes = [
43 'xlrd',
44 'future'
45 'pymysql',
46 'psycopg2',
47 'sqlite3',
48 ] + extra_includes
49
50 excludes = [
51 'pyreadline',
52 'doctest',
53 'optparse',
54 'getopt',
55 'pickle',
56 'calendar',
57 'pdb',
58 'inspect',
59 'email',
60 'pywin', 'pywin.debugger',
61 'pywin.debugger.dbgcon',
62 'pywin.dialogs', 'pywin.dialogs.list',
63 'Tkconstants', 'Tkinter', 'tcl',
64 ]
65
66 setup(name='retriever',
67 version=clean_version(__version__),
68 description='Data Retriever',
69 author='Ben Morris, Ethan White, Henry Senyondo',
70 author_email='[email protected]',
71 url='https://github.com/weecology/retriever',
72 classifiers=['Intended Audience :: Science/Research',
73 'License :: OSI Approved :: MIT License',
74 'Programming Language :: Python',
75 'Programming Language :: Python :: 2', ],
76 packages=packages,
77 package_dir={
78 'retriever': ''
79 },
80 entry_points={
81 'console_scripts': [
82 'retriever = retriever.__main__:main',
83 ],
84 },
85 install_requires=[
86 'xlrd',
87 'future'
88 ],
89
90 # py2exe flags
91 console=[{'script': "__main__.py",
92 'dest_base': "retriever",
93 'icon_resources': [(1, 'icon.ico')]
94 }],
95 zipfile=None,
96
97 # py2app flags
98 app=['__main__.py'],
99 data_files=[('', ['CITATION'])],
100 setup_requires=['py2app'] if current_platform == 'darwin' else [],
101
102 # options
103 # optimize is set to 1 of py2app to avoid errors with pymysql
104 # bundle_files = 1 or 2 was causing failed builds so we moved
105 # to bundle_files = 3 and Inno Setup
106 options={'py2exe': {'bundle_files': 3,
107 'compressed': 2,
108 'optimize': 1,
109 'packages': packages,
110 'includes': includes,
111 'excludes': excludes,
112 },
113 'py2app': {'packages': ['retriever'],
114 'includes': includes,
115 'site_packages': True,
116 'resources': [],
117 'optimize': 1,
118 'argv_emulation': True,
119 'no_chdir': True,
120 'iconfile': 'osx_icon.icns',
121 },
122 },
123 )
124
125
126 try:
127 from retriever.compile import compile
128 from retriever.lib.repository import check_for_updates
129 compile()
130 check_for_updates()
131 except:
132 pass
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -72,7 +72,8 @@
classifiers=['Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2', ],
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',],
packages=packages,
package_dir={
'retriever': ''
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -72,7 +72,8 @@\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n- 'Programming Language :: Python :: 2', ],\n+ 'Programming Language :: Python :: 2',\n+ 'Programming Language :: Python :: 3',],\n packages=packages,\n package_dir={\n 'retriever': ''\n", "issue": "Add Python 3 to setup.py\nWe need to note in the setup.py that Python 3 is supported.\n", "before_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nfrom setuptools import setup\nfrom pkg_resources import parse_version\nimport platform\n\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"darwin\":\n try:\n import py2app\n except ImportError:\n pass\n extra_includes = []\nelif current_platform == \"windows\":\n try:\n import py2exe\n except ImportError:\n pass\n import sys\n extra_includes = ['pyodbc', 'inspect']\n sys.path.append(\n \"C:\\\\Windows\\\\winsxs\\\\x86_microsoft.vc90.crt_1fc8b3b9a1e18e3b_9.0.21022.8_none_bcb86ed6ac711f91\")\n\n__version__ = 'v2.0.dev'\nwith open(\"_version.py\", \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\npackages = [\n 'retriever.lib',\n 'retriever.engines',\n 'retriever',\n]\n\nincludes = [\n 'xlrd',\n 'future'\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'optparse',\n 'getopt',\n 'pickle',\n 'calendar',\n 'pdb',\n 'inspect',\n 'email',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl',\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n author='Ben Morris, Ethan White, Henry Senyondo',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2', ],\n packages=packages,\n package_dir={\n 'retriever': ''\n },\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future'\n ],\n\n # py2exe flags\n console=[{'script': \"__main__.py\",\n 'dest_base': \"retriever\",\n 'icon_resources': [(1, 'icon.ico')]\n }],\n zipfile=None,\n\n # py2app flags\n app=['__main__.py'],\n data_files=[('', ['CITATION'])],\n setup_requires=['py2app'] if current_platform == 'darwin' else [],\n\n # options\n # optimize is set to 1 of py2app to avoid errors with pymysql\n # bundle_files = 1 or 2 was causing failed builds so we moved\n # to bundle_files = 3 and Inno Setup\n options={'py2exe': {'bundle_files': 3,\n 'compressed': 2,\n 'optimize': 1,\n 'packages': packages,\n 'includes': includes,\n 'excludes': excludes,\n },\n 'py2app': {'packages': ['retriever'],\n 'includes': includes,\n 'site_packages': True,\n 'resources': [],\n 'optimize': 1,\n 'argv_emulation': True,\n 'no_chdir': True,\n 'iconfile': 'osx_icon.icns',\n },\n },\n )\n\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n compile()\n check_for_updates()\nexcept:\n pass\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Use the following command to install retriever: python setup.py install\"\"\"\nfrom __future__ import absolute_import\n\nfrom setuptools import setup\nfrom pkg_resources import parse_version\nimport platform\n\n\ncurrent_platform = platform.system().lower()\nextra_includes = []\nif current_platform == \"darwin\":\n try:\n import py2app\n except ImportError:\n pass\n extra_includes = []\nelif current_platform == \"windows\":\n try:\n import py2exe\n except ImportError:\n pass\n import sys\n extra_includes = ['pyodbc', 'inspect']\n sys.path.append(\n \"C:\\\\Windows\\\\winsxs\\\\x86_microsoft.vc90.crt_1fc8b3b9a1e18e3b_9.0.21022.8_none_bcb86ed6ac711f91\")\n\n__version__ = 'v2.0.dev'\nwith open(\"_version.py\", \"w\") as version_file:\n version_file.write(\"__version__ = \" + \"'\" + __version__ + \"'\\n\")\n version_file.close()\n\n\ndef clean_version(v):\n return parse_version(v).__repr__().lstrip(\"<Version('\").rstrip(\"')>\")\n\npackages = [\n 'retriever.lib',\n 'retriever.engines',\n 'retriever',\n]\n\nincludes = [\n 'xlrd',\n 'future'\n 'pymysql',\n 'psycopg2',\n 'sqlite3',\n] + extra_includes\n\nexcludes = [\n 'pyreadline',\n 'doctest',\n 'optparse',\n 'getopt',\n 'pickle',\n 'calendar',\n 'pdb',\n 'inspect',\n 'email',\n 'pywin', 'pywin.debugger',\n 'pywin.debugger.dbgcon',\n 'pywin.dialogs', 'pywin.dialogs.list',\n 'Tkconstants', 'Tkinter', 'tcl',\n]\n\nsetup(name='retriever',\n version=clean_version(__version__),\n description='Data Retriever',\n author='Ben Morris, Ethan White, Henry Senyondo',\n author_email='[email protected]',\n url='https://github.com/weecology/retriever',\n classifiers=['Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',],\n packages=packages,\n package_dir={\n 'retriever': ''\n },\n entry_points={\n 'console_scripts': [\n 'retriever = retriever.__main__:main',\n ],\n },\n install_requires=[\n 'xlrd',\n 'future'\n ],\n\n # py2exe flags\n console=[{'script': \"__main__.py\",\n 'dest_base': \"retriever\",\n 'icon_resources': [(1, 'icon.ico')]\n }],\n zipfile=None,\n\n # py2app flags\n app=['__main__.py'],\n data_files=[('', ['CITATION'])],\n setup_requires=['py2app'] if current_platform == 'darwin' else [],\n\n # options\n # optimize is set to 1 of py2app to avoid errors with pymysql\n # bundle_files = 1 or 2 was causing failed builds so we moved\n # to bundle_files = 3 and Inno Setup\n options={'py2exe': {'bundle_files': 3,\n 'compressed': 2,\n 'optimize': 1,\n 'packages': packages,\n 'includes': includes,\n 'excludes': excludes,\n },\n 'py2app': {'packages': ['retriever'],\n 'includes': includes,\n 'site_packages': True,\n 'resources': [],\n 'optimize': 1,\n 'argv_emulation': True,\n 'no_chdir': True,\n 'iconfile': 'osx_icon.icns',\n },\n },\n )\n\n\ntry:\n from retriever.compile import compile\n from retriever.lib.repository import check_for_updates\n compile()\n check_for_updates()\nexcept:\n pass\n", "path": "setup.py"}]}
| 1,459 | 111 |
gh_patches_debug_38826
|
rasdani/github-patches
|
git_diff
|
sparcs-kaist__otlplus-979
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CHORE] 졸업플래너 신규 model의 serialize결과 캐싱
## 동기
**Is your feature request related to a problem? Please describe.**
OTL의 주요 모델에는 캐시가 구현되어 있으나 졸업플래너에서 새로 생성된 model은 아직 캐싱이 적용되어 있지 않습니다.
베타 출시 때는 우선 임시로 그대로 출시하였지만 캐시 도입이 필요합니다.
특히 트랙 부분은 페이지 접속 시에 로딩되고 한번에 많은 양이 로드되기 때문에 성능이 상당히 저하될 여지가 있습니다.
## 설명
**Describe the solution you'd like.**
A clear and concise description of what you want to happen.
## 스크린샷
(OPTIONAL) If applicable, add screenshots to help explain your feature request.
## 개발 환경
- OS: [e.g. macOS]
- ```python --version```:
- ```node --version```:
## 테스트 환경
(OPTIONAL)
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Web Version: [e.g. 1.1.0]
## 추가 정보
(OPTIONAL) Add any other context or screenshots about the feature request here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/graduation/models.py`
Content:
```
1 from django.db import models
2
3 from apps.subject.models import Department
4
5
6 UNBOUND_START_YEAR = 2000
7 UNBOUND_END_YEAR = 2100
8
9
10 class GeneralTrack(models.Model):
11 start_year = models.IntegerField(db_index=True)
12 end_year = models.IntegerField(db_index=True)
13 is_foreign = models.BooleanField(db_index=True)
14
15 total_credit = models.IntegerField()
16 total_au = models.IntegerField()
17 basic_required = models.IntegerField()
18 basic_elective = models.IntegerField()
19 thesis_study = models.IntegerField()
20 thesis_study_doublemajor = models.IntegerField()
21 general_required_credit = models.IntegerField()
22 general_required_au = models.IntegerField()
23 humanities = models.IntegerField()
24 humanities_doublemajor = models.IntegerField()
25
26 class Meta:
27 unique_together = [["start_year", "is_foreign"], ["end_year", "is_foreign"]]
28
29 def to_json(self):
30 result = {
31 "id": self.id,
32 "start_year": self.start_year,
33 "end_year": self.end_year,
34 "is_foreign": self.is_foreign,
35 "total_credit": self.total_credit,
36 "total_au": self.total_au,
37 "basic_required": self.basic_required,
38 "basic_elective": self.basic_elective,
39 "thesis_study": self.thesis_study,
40 "thesis_study_doublemajor": self.thesis_study_doublemajor,
41 "general_required_credit": self.general_required_credit,
42 "general_required_au": self.general_required_au,
43 "humanities": self.humanities,
44 "humanities_doublemajor": self.humanities_doublemajor,
45 }
46
47 return result
48
49
50 class MajorTrack(models.Model):
51 start_year = models.IntegerField(db_index=True)
52 end_year = models.IntegerField(db_index=True)
53 department = models.ForeignKey(Department,
54 on_delete=models.CASCADE, db_index=True)
55
56 basic_elective_doublemajor = models.IntegerField()
57 major_required = models.IntegerField()
58 major_elective = models.IntegerField()
59
60 class Meta:
61 unique_together = [["start_year", "department"], ["end_year", "department"]]
62
63 def to_json(self):
64 result = {
65 "id": self.id,
66 "start_year": self.start_year,
67 "end_year": self.end_year,
68 "department": self.department.to_json(nested=False),
69 "basic_elective_doublemajor": self.basic_elective_doublemajor,
70 "major_required": self.major_required,
71 "major_elective": self.major_elective,
72 }
73
74 return result
75
76
77 class AdditionalTrack(models.Model):
78 ADDITIONAL_TYPE_CHOICES = [
79 ('DOUBLE', 'DOUBLE'),
80 ('MINOR', 'MINOR'),
81 ('ADVANCED', 'ADVANCED'),
82 ('INTERDISCIPLINARY', 'INTERDISCIPLINARY'),
83 ]
84
85 start_year = models.IntegerField(db_index=True)
86 end_year = models.IntegerField(db_index=True)
87 type = models.CharField(db_index=True, max_length=32, choices=ADDITIONAL_TYPE_CHOICES)
88 department = models.ForeignKey(Department,
89 null=True, blank=True,
90 on_delete=models.CASCADE, db_index=True)
91
92 major_required = models.IntegerField()
93 major_elective = models.IntegerField()
94
95 class Meta:
96 unique_together = [["start_year", "type", "department"], ["end_year", "type", "department"]]
97
98 def to_json(self):
99 result = {
100 "id": self.id,
101 "start_year": self.start_year,
102 "end_year": self.end_year,
103 "type": self.type,
104 "department": self.department.to_json(nested=False) if self.department else None,
105 "major_required": self.major_required,
106 "major_elective": self.major_elective,
107 }
108
109 return result
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/apps/graduation/models.py b/apps/graduation/models.py
--- a/apps/graduation/models.py
+++ b/apps/graduation/models.py
@@ -1,4 +1,5 @@
from django.db import models
+from django.core.cache import cache
from apps.subject.models import Department
@@ -26,7 +27,15 @@
class Meta:
unique_together = [["start_year", "is_foreign"], ["end_year", "is_foreign"]]
+ def get_cache_key(self):
+ return "generaltrack:%d-%d-%s" % (self.start_year, self.end_year, self.is_foreign)
+
def to_json(self):
+ cache_id = self.get_cache_key()
+ result_cached = cache.get(cache_id)
+ if result_cached is not None:
+ return result_cached
+
result = {
"id": self.id,
"start_year": self.start_year,
@@ -44,6 +53,8 @@
"humanities_doublemajor": self.humanities_doublemajor,
}
+ cache.set(cache_id, result, 60 * 60)
+
return result
@@ -60,7 +71,15 @@
class Meta:
unique_together = [["start_year", "department"], ["end_year", "department"]]
+ def get_cache_key(self):
+ return "majortrack:%d-%d-%d" % (self.start_year, self.end_year, self.department.id)
+
def to_json(self):
+ cache_id = self.get_cache_key()
+ result_cached = cache.get(cache_id)
+ if result_cached is not None:
+ return result_cached
+
result = {
"id": self.id,
"start_year": self.start_year,
@@ -71,6 +90,8 @@
"major_elective": self.major_elective,
}
+ cache.set(cache_id, result, 60 * 60)
+
return result
@@ -95,7 +116,15 @@
class Meta:
unique_together = [["start_year", "type", "department"], ["end_year", "type", "department"]]
+ def get_cache_key(self):
+ return "additionaltrack:%d-%d-%s-%d" % (self.start_year, self.end_year, self.type, self.department.id if self.department else 0)
+
def to_json(self):
+ cache_id = self.get_cache_key()
+ result_cached = cache.get(cache_id)
+ if result_cached is not None:
+ return result_cached
+
result = {
"id": self.id,
"start_year": self.start_year,
@@ -106,4 +135,6 @@
"major_elective": self.major_elective,
}
+ cache.set(cache_id, result, 60 * 60)
+
return result
|
{"golden_diff": "diff --git a/apps/graduation/models.py b/apps/graduation/models.py\n--- a/apps/graduation/models.py\n+++ b/apps/graduation/models.py\n@@ -1,4 +1,5 @@\n from django.db import models\n+from django.core.cache import cache\n \n from apps.subject.models import Department\n \n@@ -26,7 +27,15 @@\n class Meta:\n unique_together = [[\"start_year\", \"is_foreign\"], [\"end_year\", \"is_foreign\"]]\n \n+ def get_cache_key(self):\n+ return \"generaltrack:%d-%d-%s\" % (self.start_year, self.end_year, self.is_foreign)\n+\n def to_json(self):\n+ cache_id = self.get_cache_key()\n+ result_cached = cache.get(cache_id)\n+ if result_cached is not None:\n+ return result_cached\n+\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n@@ -44,6 +53,8 @@\n \"humanities_doublemajor\": self.humanities_doublemajor,\n }\n \n+ cache.set(cache_id, result, 60 * 60)\n+\n return result\n \n \n@@ -60,7 +71,15 @@\n class Meta:\n unique_together = [[\"start_year\", \"department\"], [\"end_year\", \"department\"]]\n \n+ def get_cache_key(self):\n+ return \"majortrack:%d-%d-%d\" % (self.start_year, self.end_year, self.department.id)\n+\n def to_json(self):\n+ cache_id = self.get_cache_key()\n+ result_cached = cache.get(cache_id)\n+ if result_cached is not None:\n+ return result_cached\n+\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n@@ -71,6 +90,8 @@\n \"major_elective\": self.major_elective,\n }\n \n+ cache.set(cache_id, result, 60 * 60)\n+\n return result\n \n \n@@ -95,7 +116,15 @@\n class Meta:\n unique_together = [[\"start_year\", \"type\", \"department\"], [\"end_year\", \"type\", \"department\"]]\n \n+ def get_cache_key(self):\n+ return \"additionaltrack:%d-%d-%s-%d\" % (self.start_year, self.end_year, self.type, self.department.id if self.department else 0)\n+\n def to_json(self):\n+ cache_id = self.get_cache_key()\n+ result_cached = cache.get(cache_id)\n+ if result_cached is not None:\n+ return result_cached\n+\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n@@ -106,4 +135,6 @@\n \"major_elective\": self.major_elective,\n }\n \n+ cache.set(cache_id, result, 60 * 60)\n+\n return result\n", "issue": "[CHORE] \uc878\uc5c5\ud50c\ub798\ub108 \uc2e0\uaddc model\uc758 serialize\uacb0\uacfc \uce90\uc2f1\n## \ub3d9\uae30\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\n\r\nOTL\uc758 \uc8fc\uc694 \ubaa8\ub378\uc5d0\ub294 \uce90\uc2dc\uac00 \uad6c\ud604\ub418\uc5b4 \uc788\uc73c\ub098 \uc878\uc5c5\ud50c\ub798\ub108\uc5d0\uc11c \uc0c8\ub85c \uc0dd\uc131\ub41c model\uc740 \uc544\uc9c1 \uce90\uc2f1\uc774 \uc801\uc6a9\ub418\uc5b4 \uc788\uc9c0 \uc54a\uc2b5\ub2c8\ub2e4.\r\n\ubca0\ud0c0 \ucd9c\uc2dc \ub54c\ub294 \uc6b0\uc120 \uc784\uc2dc\ub85c \uadf8\ub300\ub85c \ucd9c\uc2dc\ud558\uc600\uc9c0\ub9cc \uce90\uc2dc \ub3c4\uc785\uc774 \ud544\uc694\ud569\ub2c8\ub2e4.\r\n\ud2b9\ud788 \ud2b8\ub799 \ubd80\ubd84\uc740 \ud398\uc774\uc9c0 \uc811\uc18d \uc2dc\uc5d0 \ub85c\ub529\ub418\uace0 \ud55c\ubc88\uc5d0 \ub9ce\uc740 \uc591\uc774 \ub85c\ub4dc\ub418\uae30 \ub54c\ubb38\uc5d0 \uc131\ub2a5\uc774 \uc0c1\ub2f9\ud788 \uc800\ud558\ub420 \uc5ec\uc9c0\uac00 \uc788\uc2b5\ub2c8\ub2e4.\r\n\r\n## \uc124\uba85\r\n\r\n**Describe the solution you'd like.**\r\n\r\nA clear and concise description of what you want to happen.\r\n\r\n## \uc2a4\ud06c\ub9b0\uc0f7\r\n\r\n(OPTIONAL) If applicable, add screenshots to help explain your feature request.\r\n\r\n## \uac1c\ubc1c \ud658\uacbd\r\n\r\n- OS: [e.g. macOS]\r\n- ```python --version```:\r\n- ```node --version```:\r\n\r\n## \ud14c\uc2a4\ud2b8 \ud658\uacbd\r\n\r\n(OPTIONAL)\r\n\r\n- Device: [e.g. iPhone6]\r\n- OS: [e.g. iOS8.1]\r\n- Web Version: [e.g. 1.1.0]\r\n\r\n## \ucd94\uac00 \uc815\ubcf4\r\n\r\n(OPTIONAL) Add any other context or screenshots about the feature request here.\r\n\n", "before_files": [{"content": "from django.db import models\n\nfrom apps.subject.models import Department\n\n\nUNBOUND_START_YEAR = 2000\nUNBOUND_END_YEAR = 2100\n\n\nclass GeneralTrack(models.Model):\n start_year = models.IntegerField(db_index=True)\n end_year = models.IntegerField(db_index=True)\n is_foreign = models.BooleanField(db_index=True)\n\n total_credit = models.IntegerField()\n total_au = models.IntegerField()\n basic_required = models.IntegerField()\n basic_elective = models.IntegerField()\n thesis_study = models.IntegerField()\n thesis_study_doublemajor = models.IntegerField()\n general_required_credit = models.IntegerField()\n general_required_au = models.IntegerField()\n humanities = models.IntegerField()\n humanities_doublemajor = models.IntegerField()\n\n class Meta:\n unique_together = [[\"start_year\", \"is_foreign\"], [\"end_year\", \"is_foreign\"]]\n\n def to_json(self):\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n \"end_year\": self.end_year,\n \"is_foreign\": self.is_foreign,\n \"total_credit\": self.total_credit,\n \"total_au\": self.total_au,\n \"basic_required\": self.basic_required,\n \"basic_elective\": self.basic_elective,\n \"thesis_study\": self.thesis_study,\n \"thesis_study_doublemajor\": self.thesis_study_doublemajor,\n \"general_required_credit\": self.general_required_credit,\n \"general_required_au\": self.general_required_au,\n \"humanities\": self.humanities,\n \"humanities_doublemajor\": self.humanities_doublemajor,\n }\n\n return result\n\n\nclass MajorTrack(models.Model):\n start_year = models.IntegerField(db_index=True)\n end_year = models.IntegerField(db_index=True)\n department = models.ForeignKey(Department,\n on_delete=models.CASCADE, db_index=True)\n\n basic_elective_doublemajor = models.IntegerField()\n major_required = models.IntegerField()\n major_elective = models.IntegerField()\n\n class Meta:\n unique_together = [[\"start_year\", \"department\"], [\"end_year\", \"department\"]]\n\n def to_json(self):\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n \"end_year\": self.end_year,\n \"department\": self.department.to_json(nested=False),\n \"basic_elective_doublemajor\": self.basic_elective_doublemajor,\n \"major_required\": self.major_required,\n \"major_elective\": self.major_elective,\n }\n\n return result\n\n\nclass AdditionalTrack(models.Model):\n ADDITIONAL_TYPE_CHOICES = [\n ('DOUBLE', 'DOUBLE'),\n ('MINOR', 'MINOR'),\n ('ADVANCED', 'ADVANCED'),\n ('INTERDISCIPLINARY', 'INTERDISCIPLINARY'),\n ]\n\n start_year = models.IntegerField(db_index=True)\n end_year = models.IntegerField(db_index=True)\n type = models.CharField(db_index=True, max_length=32, choices=ADDITIONAL_TYPE_CHOICES)\n department = models.ForeignKey(Department,\n null=True, blank=True,\n on_delete=models.CASCADE, db_index=True)\n\n major_required = models.IntegerField()\n major_elective = models.IntegerField()\n\n class Meta:\n unique_together = [[\"start_year\", \"type\", \"department\"], [\"end_year\", \"type\", \"department\"]]\n\n def to_json(self):\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n \"end_year\": self.end_year,\n \"type\": self.type,\n \"department\": self.department.to_json(nested=False) if self.department else None,\n \"major_required\": self.major_required,\n \"major_elective\": self.major_elective,\n }\n\n return result\n", "path": "apps/graduation/models.py"}], "after_files": [{"content": "from django.db import models\nfrom django.core.cache import cache\n\nfrom apps.subject.models import Department\n\n\nUNBOUND_START_YEAR = 2000\nUNBOUND_END_YEAR = 2100\n\n\nclass GeneralTrack(models.Model):\n start_year = models.IntegerField(db_index=True)\n end_year = models.IntegerField(db_index=True)\n is_foreign = models.BooleanField(db_index=True)\n\n total_credit = models.IntegerField()\n total_au = models.IntegerField()\n basic_required = models.IntegerField()\n basic_elective = models.IntegerField()\n thesis_study = models.IntegerField()\n thesis_study_doublemajor = models.IntegerField()\n general_required_credit = models.IntegerField()\n general_required_au = models.IntegerField()\n humanities = models.IntegerField()\n humanities_doublemajor = models.IntegerField()\n\n class Meta:\n unique_together = [[\"start_year\", \"is_foreign\"], [\"end_year\", \"is_foreign\"]]\n\n def get_cache_key(self):\n return \"generaltrack:%d-%d-%s\" % (self.start_year, self.end_year, self.is_foreign)\n\n def to_json(self):\n cache_id = self.get_cache_key()\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n return result_cached\n\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n \"end_year\": self.end_year,\n \"is_foreign\": self.is_foreign,\n \"total_credit\": self.total_credit,\n \"total_au\": self.total_au,\n \"basic_required\": self.basic_required,\n \"basic_elective\": self.basic_elective,\n \"thesis_study\": self.thesis_study,\n \"thesis_study_doublemajor\": self.thesis_study_doublemajor,\n \"general_required_credit\": self.general_required_credit,\n \"general_required_au\": self.general_required_au,\n \"humanities\": self.humanities,\n \"humanities_doublemajor\": self.humanities_doublemajor,\n }\n\n cache.set(cache_id, result, 60 * 60)\n\n return result\n\n\nclass MajorTrack(models.Model):\n start_year = models.IntegerField(db_index=True)\n end_year = models.IntegerField(db_index=True)\n department = models.ForeignKey(Department,\n on_delete=models.CASCADE, db_index=True)\n\n basic_elective_doublemajor = models.IntegerField()\n major_required = models.IntegerField()\n major_elective = models.IntegerField()\n\n class Meta:\n unique_together = [[\"start_year\", \"department\"], [\"end_year\", \"department\"]]\n\n def get_cache_key(self):\n return \"majortrack:%d-%d-%d\" % (self.start_year, self.end_year, self.department.id)\n\n def to_json(self):\n cache_id = self.get_cache_key()\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n return result_cached\n\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n \"end_year\": self.end_year,\n \"department\": self.department.to_json(nested=False),\n \"basic_elective_doublemajor\": self.basic_elective_doublemajor,\n \"major_required\": self.major_required,\n \"major_elective\": self.major_elective,\n }\n\n cache.set(cache_id, result, 60 * 60)\n\n return result\n\n\nclass AdditionalTrack(models.Model):\n ADDITIONAL_TYPE_CHOICES = [\n ('DOUBLE', 'DOUBLE'),\n ('MINOR', 'MINOR'),\n ('ADVANCED', 'ADVANCED'),\n ('INTERDISCIPLINARY', 'INTERDISCIPLINARY'),\n ]\n\n start_year = models.IntegerField(db_index=True)\n end_year = models.IntegerField(db_index=True)\n type = models.CharField(db_index=True, max_length=32, choices=ADDITIONAL_TYPE_CHOICES)\n department = models.ForeignKey(Department,\n null=True, blank=True,\n on_delete=models.CASCADE, db_index=True)\n\n major_required = models.IntegerField()\n major_elective = models.IntegerField()\n\n class Meta:\n unique_together = [[\"start_year\", \"type\", \"department\"], [\"end_year\", \"type\", \"department\"]]\n\n def get_cache_key(self):\n return \"additionaltrack:%d-%d-%s-%d\" % (self.start_year, self.end_year, self.type, self.department.id if self.department else 0)\n\n def to_json(self):\n cache_id = self.get_cache_key()\n result_cached = cache.get(cache_id)\n if result_cached is not None:\n return result_cached\n\n result = {\n \"id\": self.id,\n \"start_year\": self.start_year,\n \"end_year\": self.end_year,\n \"type\": self.type,\n \"department\": self.department.to_json(nested=False) if self.department else None,\n \"major_required\": self.major_required,\n \"major_elective\": self.major_elective,\n }\n\n cache.set(cache_id, result, 60 * 60)\n\n return result\n", "path": "apps/graduation/models.py"}]}
| 1,590 | 648 |
gh_patches_debug_28860
|
rasdani/github-patches
|
git_diff
|
rotki__rotki-4382
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed at database upgrade from version 31 to 32
## Problem Definition
Login fails with following message:
`Failed at database upgrade from version 31 to 32: UNIQUE constraint failed: history_events_copy.event_identifier, history_events_copy.sequence_index`
## Logs
Which logs do you need, if you need any?
### System Description
Operating system: Debian Buster (using the AppImage)
Rotki version: 1.24.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/db/upgrades/v31_v32.py`
Content:
```
1 from collections import defaultdict
2 from typing import TYPE_CHECKING, Dict, Set
3
4 from rotkehlchen.db.constants import BINANCE_MARKETS_KEY
5
6 if TYPE_CHECKING:
7 from sqlite3 import Cursor
8
9 from rotkehlchen.db.dbhandler import DBHandler
10
11
12 def _upgrade_history_events(cursor: 'Cursor') -> None:
13 cursor.execute("""
14 CREATE TABLE IF NOT EXISTS history_events_copy (
15 identifier INTEGER NOT NULL PRIMARY KEY,
16 event_identifier TEXT NOT NULL,
17 sequence_index INTEGER NOT NULL,
18 timestamp INTEGER NOT NULL,
19 location TEXT NOT NULL,
20 location_label TEXT,
21 asset TEXT NOT NULL,
22 amount TEXT NOT NULL,
23 usd_value TEXT NOT NULL,
24 notes TEXT,
25 type TEXT NOT NULL,
26 subtype TEXT,
27 counterparty TEXT,
28 extra_data TEXT,
29 UNIQUE(event_identifier, sequence_index)
30 );""")
31 cursor.execute('UPDATE history_events SET timestamp = timestamp / 10;')
32 cursor.execute('UPDATE history_events SET subtype = "deposit asset" WHERE subtype = "staking deposit asset";') # noqa: E501
33 cursor.execute('UPDATE history_events SET subtype = "receive wrapped" WHERE subtype = "staking receive asset";') # noqa: E501
34 cursor.execute('UPDATE history_events SET subtype = "remove asset", type = "staking" WHERE subtype = "staking remove asset" AND type = "unstaking";') # noqa: E501
35 cursor.execute('UPDATE history_events SET subtype = "return wrapped", type = "staking" WHERE subtype = "staking receive asset" AND type = "unstaking";') # noqa: E501
36 cursor.execute('UPDATE history_events SET type = "informational" WHERE subtype = "unknown";')
37 cursor.execute("""
38 INSERT INTO history_events_copy (event_identifier, sequence_index, timestamp, location,
39 location_label, asset, amount, usd_value, notes, type, subtype)
40 SELECT event_identifier, sequence_index, timestamp, location, location_label, asset,
41 amount, usd_value, notes, type, subtype
42 FROM history_events;
43 """)
44 cursor.execute('DROP TABLE history_events;')
45 cursor.execute('ALTER TABLE history_events_copy RENAME TO history_events;')
46 cursor.execute(
47 'UPDATE history_events SET subtype="reward" WHERE type="staking" AND subtype IS NULL;',
48 )
49
50
51 def _remove_gitcoin(cursor: 'Cursor') -> None:
52 cursor.execute('DELETE from ledger_actions WHERE identifier IN (SELECT parent_id FROM ledger_actions_gitcoin_data)') # noqa: E501
53 cursor.execute('DELETE from used_query_ranges WHERE name LIKE "gitcoingrants_%"')
54 cursor.execute('DROP TABLE IF exists gitcoin_grant_metadata')
55 cursor.execute('DROP TABLE IF exists ledger_actions_gitcoin_data')
56 cursor.execute('DROP TABLE IF exists gitcoin_tx_type')
57
58
59 def _add_new_tables(cursor: 'Cursor') -> None:
60 cursor.execute('INSERT OR IGNORE INTO location(location, seq) VALUES ("d", 36)')
61 cursor.execute("""
62 CREATE TABLE IF NOT EXISTS ethereum_internal_transactions (
63 parent_tx_hash BLOB NOT NULL,
64 trace_id INTEGER NOT NULL,
65 timestamp INTEGER NOT NULL,
66 block_number INTEGER NOT NULL,
67 from_address TEXT NOT NULL,
68 to_address TEXT,
69 value TEXT NOT NULL,
70 FOREIGN KEY(parent_tx_hash) REFERENCES ethereum_transactions(tx_hash) ON DELETE CASCADE ON UPDATE CASCADE,
71 PRIMARY KEY(parent_tx_hash, trace_id)
72 );""") # noqa: E501
73 cursor.execute("""
74 CREATE TABLE IF NOT EXISTS ethtx_address_mappings (
75 address TEXT NOT NULL,
76 tx_hash BLOB NOT NULL,
77 blockchain TEXT NOT NULL,
78 FOREIGN KEY(blockchain, address) REFERENCES blockchain_accounts(blockchain, account) ON DELETE CASCADE,
79 FOREIGN KEY(tx_hash) references ethereum_transactions(tx_hash) ON UPDATE CASCADE ON DELETE CASCADE,
80 PRIMARY KEY (address, tx_hash, blockchain)
81 );""") # noqa: E501
82 cursor.execute("""
83 CREATE TABLE IF NOT EXISTS evm_tx_mappings (
84 tx_hash BLOB NOT NULL,
85 blockchain TEXT NOT NULL,
86 value TEXT NOT NULL,
87 FOREIGN KEY(tx_hash) references ethereum_transactions(tx_hash) ON UPDATE CASCADE ON DELETE CASCADE,
88 PRIMARY KEY (tx_hash, value)
89 );""") # noqa: E501
90 cursor.execute("""
91 CREATE TABLE IF NOT EXISTS history_events_mappings (
92 parent_identifier INTEGER NOT NULL,
93 value TEXT NOT NULL,
94 FOREIGN KEY(parent_identifier) references history_events(identifier) ON UPDATE CASCADE ON DELETE CASCADE,
95 PRIMARY KEY (parent_identifier, value)
96 );""") # noqa: E501
97 cursor.execute("""
98 CREATE TABLE IF NOT EXISTS ens_mappings (
99 address TEXT NOT NULL PRIMARY KEY,
100 ens_name TEXT UNIQUE,
101 last_update INTEGER NOT NULL
102 );
103 """)
104
105
106 def _refactor_manual_balance_id(cursor: 'Cursor') -> None:
107 cursor.execute("""
108 CREATE TABLE manually_tracked_balances_copy (
109 id INTEGER PRIMARY KEY,
110 asset TEXT NOT NULL,
111 label TEXT NOT NULL,
112 amount TEXT,
113 location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location),
114 category CHAR(1) NOT NULL DEFAULT('A') REFERENCES balance_category(category),
115 FOREIGN KEY(asset) REFERENCES assets(identifier) ON UPDATE CASCADE
116 );""")
117 cursor.execute("""
118 INSERT INTO manually_tracked_balances_copy(asset, label, amount, location, category)
119 SELECT asset, label, amount, location, category
120 FROM manually_tracked_balances;
121 """)
122 cursor.execute('DROP TABLE manually_tracked_balances;')
123 cursor.execute(
124 'ALTER TABLE manually_tracked_balances_copy RENAME TO '
125 'manually_tracked_balances;',
126 )
127
128
129 def _update_fee_for_existing_trades(cursor: 'Cursor') -> None:
130 cursor.execute('UPDATE trades SET fee = NULL WHERE fee_currency IS NULL')
131 cursor.execute('UPDATE trades SET fee_currency = NULL WHERE fee IS NULL')
132
133
134 def _update_history_entries_from_kraken(cursor: 'Cursor') -> None:
135 """The logic for kraken was adding additional entries for trades when fee + kfee was
136 being used. This function makes the state of the database consistent with the upgraded
137 logic by:
138 - Removing extra row additions
139 - Make sure that no other event has duplicated sequence indexes
140 """
141 cursor.execute("""
142 DELETE FROM history_events where location="B" AND asset="KFEE" AND
143 type="trade" AND subtype=NULL;
144 """)
145 cursor.execute("""
146 SELECT e.event_identifier, e.sequence_index, e.identifier from history_events e JOIN (SELECT event_identifier,
147 sequence_index, COUNT(*) as cnt FROM history_events GROUP BY event_identifier, sequence_index)
148 other ON e.event_identifier = other.event_identifier and e.sequence_index=other.sequence_index
149 WHERE other.cnt > 1;
150 """) # noqa: E501
151
152 update_tuples = []
153 eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)
154 for event_identifier, sequence_index, identifier in cursor:
155 last_indices = eventid_to_indices.get(event_identifier)
156 if last_indices is None:
157 # Let the first one be the same as it was in the database
158 eventid_to_indices[event_identifier].add(sequence_index)
159 continue
160
161 new_index = sequence_index + 1
162 while new_index in eventid_to_indices[event_identifier]:
163 new_index += 1
164 eventid_to_indices[event_identifier].add(new_index)
165 update_tuples.append((new_index, identifier))
166
167 if len(update_tuples) != 0:
168 cursor.executemany(
169 'UPDATE history_events SET sequence_index=? WHERE identifier=?',
170 update_tuples,
171 )
172
173
174 def _update_settings_name_for_selected_binance_markets(cursor: 'Cursor') -> None:
175 cursor.execute("""
176 UPDATE user_credentials_mappings SET setting_name = ? WHERE setting_name = "PAIRS"
177 """, (BINANCE_MARKETS_KEY,))
178
179
180 def _update_manual_balances_tags(cursor_fetch: 'Cursor', cursor_update: 'Cursor') -> None:
181 manual_balances = cursor_fetch.execute('SELECT id, label FROM manually_tracked_balances')
182 for balance_id, label in manual_balances:
183 cursor_update.execute('UPDATE tag_mappings SET object_reference=? WHERE object_reference=?', (balance_id, label)) # noqa: E501
184
185
186 def upgrade_v31_to_v32(db: 'DBHandler') -> None:
187 """Upgrades the DB from v31 to v32
188 - use new identifiers for the history_events table. The id will be generated by sqlite
189 and will be the column rowid
190
191 -Add the subtype REWARD to staking rewards (before they had type staking
192 and no subtype)
193
194 -Remove all gitcoin grant related data that was pulled from their API and saved in
195 specific tables along with the tables themselves
196
197 -Sets fee to null for existing trades if fee_currency is missing.
198 """
199 primary_cursor = db.conn.cursor()
200 secondary_cursor = db.conn.cursor()
201 _update_history_entries_from_kraken(primary_cursor)
202 _upgrade_history_events(primary_cursor)
203 _remove_gitcoin(primary_cursor)
204 _add_new_tables(primary_cursor)
205 _refactor_manual_balance_id(primary_cursor)
206 _update_fee_for_existing_trades(primary_cursor)
207 _update_settings_name_for_selected_binance_markets(primary_cursor)
208 _update_manual_balances_tags(cursor_fetch=primary_cursor, cursor_update=secondary_cursor)
209 db.conn.commit()
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/rotkehlchen/db/upgrades/v31_v32.py b/rotkehlchen/db/upgrades/v31_v32.py
--- a/rotkehlchen/db/upgrades/v31_v32.py
+++ b/rotkehlchen/db/upgrades/v31_v32.py
@@ -142,22 +142,20 @@
DELETE FROM history_events where location="B" AND asset="KFEE" AND
type="trade" AND subtype=NULL;
""")
+
+ cursor.execute('SELECT event_identifier, sequence_index from history_events')
+ eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)
+ for event_identifier, sequence_index in cursor:
+ eventid_to_indices[event_identifier].add(sequence_index)
+
cursor.execute("""
SELECT e.event_identifier, e.sequence_index, e.identifier from history_events e JOIN (SELECT event_identifier,
sequence_index, COUNT(*) as cnt FROM history_events GROUP BY event_identifier, sequence_index)
other ON e.event_identifier = other.event_identifier and e.sequence_index=other.sequence_index
WHERE other.cnt > 1;
""") # noqa: E501
-
update_tuples = []
- eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)
for event_identifier, sequence_index, identifier in cursor:
- last_indices = eventid_to_indices.get(event_identifier)
- if last_indices is None:
- # Let the first one be the same as it was in the database
- eventid_to_indices[event_identifier].add(sequence_index)
- continue
-
new_index = sequence_index + 1
while new_index in eventid_to_indices[event_identifier]:
new_index += 1
|
{"golden_diff": "diff --git a/rotkehlchen/db/upgrades/v31_v32.py b/rotkehlchen/db/upgrades/v31_v32.py\n--- a/rotkehlchen/db/upgrades/v31_v32.py\n+++ b/rotkehlchen/db/upgrades/v31_v32.py\n@@ -142,22 +142,20 @@\n DELETE FROM history_events where location=\"B\" AND asset=\"KFEE\" AND\n type=\"trade\" AND subtype=NULL;\n \"\"\")\n+\n+ cursor.execute('SELECT event_identifier, sequence_index from history_events')\n+ eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)\n+ for event_identifier, sequence_index in cursor:\n+ eventid_to_indices[event_identifier].add(sequence_index)\n+\n cursor.execute(\"\"\"\n SELECT e.event_identifier, e.sequence_index, e.identifier from history_events e JOIN (SELECT event_identifier,\n sequence_index, COUNT(*) as cnt FROM history_events GROUP BY event_identifier, sequence_index)\n other ON e.event_identifier = other.event_identifier and e.sequence_index=other.sequence_index\n WHERE other.cnt > 1;\n \"\"\") # noqa: E501\n-\n update_tuples = []\n- eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)\n for event_identifier, sequence_index, identifier in cursor:\n- last_indices = eventid_to_indices.get(event_identifier)\n- if last_indices is None:\n- # Let the first one be the same as it was in the database\n- eventid_to_indices[event_identifier].add(sequence_index)\n- continue\n-\n new_index = sequence_index + 1\n while new_index in eventid_to_indices[event_identifier]:\n new_index += 1\n", "issue": "Failed at database upgrade from version 31 to 32\n## Problem Definition\r\n\r\nLogin fails with following message:\r\n`Failed at database upgrade from version 31 to 32: UNIQUE constraint failed: history_events_copy.event_identifier, history_events_copy.sequence_index`\r\n\r\n## Logs\r\n\r\nWhich logs do you need, if you need any?\r\n\r\n### System Description\r\n\r\nOperating system: Debian Buster (using the AppImage)\r\nRotki version: 1.24.0\r\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, Set\n\nfrom rotkehlchen.db.constants import BINANCE_MARKETS_KEY\n\nif TYPE_CHECKING:\n from sqlite3 import Cursor\n\n from rotkehlchen.db.dbhandler import DBHandler\n\n\ndef _upgrade_history_events(cursor: 'Cursor') -> None:\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS history_events_copy (\n identifier INTEGER NOT NULL PRIMARY KEY,\n event_identifier TEXT NOT NULL,\n sequence_index INTEGER NOT NULL,\n timestamp INTEGER NOT NULL,\n location TEXT NOT NULL,\n location_label TEXT,\n asset TEXT NOT NULL,\n amount TEXT NOT NULL,\n usd_value TEXT NOT NULL,\n notes TEXT,\n type TEXT NOT NULL,\n subtype TEXT,\n counterparty TEXT,\n extra_data TEXT,\n UNIQUE(event_identifier, sequence_index)\n );\"\"\")\n cursor.execute('UPDATE history_events SET timestamp = timestamp / 10;')\n cursor.execute('UPDATE history_events SET subtype = \"deposit asset\" WHERE subtype = \"staking deposit asset\";') # noqa: E501\n cursor.execute('UPDATE history_events SET subtype = \"receive wrapped\" WHERE subtype = \"staking receive asset\";') # noqa: E501\n cursor.execute('UPDATE history_events SET subtype = \"remove asset\", type = \"staking\" WHERE subtype = \"staking remove asset\" AND type = \"unstaking\";') # noqa: E501\n cursor.execute('UPDATE history_events SET subtype = \"return wrapped\", type = \"staking\" WHERE subtype = \"staking receive asset\" AND type = \"unstaking\";') # noqa: E501\n cursor.execute('UPDATE history_events SET type = \"informational\" WHERE subtype = \"unknown\";')\n cursor.execute(\"\"\"\n INSERT INTO history_events_copy (event_identifier, sequence_index, timestamp, location,\n location_label, asset, amount, usd_value, notes, type, subtype)\n SELECT event_identifier, sequence_index, timestamp, location, location_label, asset,\n amount, usd_value, notes, type, subtype\n FROM history_events;\n \"\"\")\n cursor.execute('DROP TABLE history_events;')\n cursor.execute('ALTER TABLE history_events_copy RENAME TO history_events;')\n cursor.execute(\n 'UPDATE history_events SET subtype=\"reward\" WHERE type=\"staking\" AND subtype IS NULL;',\n )\n\n\ndef _remove_gitcoin(cursor: 'Cursor') -> None:\n cursor.execute('DELETE from ledger_actions WHERE identifier IN (SELECT parent_id FROM ledger_actions_gitcoin_data)') # noqa: E501\n cursor.execute('DELETE from used_query_ranges WHERE name LIKE \"gitcoingrants_%\"')\n cursor.execute('DROP TABLE IF exists gitcoin_grant_metadata')\n cursor.execute('DROP TABLE IF exists ledger_actions_gitcoin_data')\n cursor.execute('DROP TABLE IF exists gitcoin_tx_type')\n\n\ndef _add_new_tables(cursor: 'Cursor') -> None:\n cursor.execute('INSERT OR IGNORE INTO location(location, seq) VALUES (\"d\", 36)')\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ethereum_internal_transactions (\n parent_tx_hash BLOB NOT NULL,\n trace_id INTEGER NOT NULL,\n timestamp INTEGER NOT NULL,\n block_number INTEGER NOT NULL,\n from_address TEXT NOT NULL,\n to_address TEXT,\n value TEXT NOT NULL,\n FOREIGN KEY(parent_tx_hash) REFERENCES ethereum_transactions(tx_hash) ON DELETE CASCADE ON UPDATE CASCADE,\n PRIMARY KEY(parent_tx_hash, trace_id)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ethtx_address_mappings (\n address TEXT NOT NULL,\n tx_hash BLOB NOT NULL,\n blockchain TEXT NOT NULL,\n FOREIGN KEY(blockchain, address) REFERENCES blockchain_accounts(blockchain, account) ON DELETE CASCADE,\n FOREIGN KEY(tx_hash) references ethereum_transactions(tx_hash) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY (address, tx_hash, blockchain)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS evm_tx_mappings (\n tx_hash BLOB NOT NULL,\n blockchain TEXT NOT NULL,\n value TEXT NOT NULL,\n FOREIGN KEY(tx_hash) references ethereum_transactions(tx_hash) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY (tx_hash, value)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS history_events_mappings (\n parent_identifier INTEGER NOT NULL,\n value TEXT NOT NULL,\n FOREIGN KEY(parent_identifier) references history_events(identifier) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY (parent_identifier, value)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ens_mappings (\n address TEXT NOT NULL PRIMARY KEY,\n ens_name TEXT UNIQUE,\n last_update INTEGER NOT NULL\n);\n\"\"\")\n\n\ndef _refactor_manual_balance_id(cursor: 'Cursor') -> None:\n cursor.execute(\"\"\"\n CREATE TABLE manually_tracked_balances_copy (\n id INTEGER PRIMARY KEY,\n asset TEXT NOT NULL,\n label TEXT NOT NULL,\n amount TEXT,\n location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location),\n category CHAR(1) NOT NULL DEFAULT('A') REFERENCES balance_category(category),\n FOREIGN KEY(asset) REFERENCES assets(identifier) ON UPDATE CASCADE\n );\"\"\")\n cursor.execute(\"\"\"\n INSERT INTO manually_tracked_balances_copy(asset, label, amount, location, category)\n SELECT asset, label, amount, location, category\n FROM manually_tracked_balances;\n \"\"\")\n cursor.execute('DROP TABLE manually_tracked_balances;')\n cursor.execute(\n 'ALTER TABLE manually_tracked_balances_copy RENAME TO '\n 'manually_tracked_balances;',\n )\n\n\ndef _update_fee_for_existing_trades(cursor: 'Cursor') -> None:\n cursor.execute('UPDATE trades SET fee = NULL WHERE fee_currency IS NULL')\n cursor.execute('UPDATE trades SET fee_currency = NULL WHERE fee IS NULL')\n\n\ndef _update_history_entries_from_kraken(cursor: 'Cursor') -> None:\n \"\"\"The logic for kraken was adding additional entries for trades when fee + kfee was\n being used. This function makes the state of the database consistent with the upgraded\n logic by:\n - Removing extra row additions\n - Make sure that no other event has duplicated sequence indexes\n \"\"\"\n cursor.execute(\"\"\"\n DELETE FROM history_events where location=\"B\" AND asset=\"KFEE\" AND\n type=\"trade\" AND subtype=NULL;\n \"\"\")\n cursor.execute(\"\"\"\n SELECT e.event_identifier, e.sequence_index, e.identifier from history_events e JOIN (SELECT event_identifier,\n sequence_index, COUNT(*) as cnt FROM history_events GROUP BY event_identifier, sequence_index)\n other ON e.event_identifier = other.event_identifier and e.sequence_index=other.sequence_index\n WHERE other.cnt > 1;\n \"\"\") # noqa: E501\n\n update_tuples = []\n eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)\n for event_identifier, sequence_index, identifier in cursor:\n last_indices = eventid_to_indices.get(event_identifier)\n if last_indices is None:\n # Let the first one be the same as it was in the database\n eventid_to_indices[event_identifier].add(sequence_index)\n continue\n\n new_index = sequence_index + 1\n while new_index in eventid_to_indices[event_identifier]:\n new_index += 1\n eventid_to_indices[event_identifier].add(new_index)\n update_tuples.append((new_index, identifier))\n\n if len(update_tuples) != 0:\n cursor.executemany(\n 'UPDATE history_events SET sequence_index=? WHERE identifier=?',\n update_tuples,\n )\n\n\ndef _update_settings_name_for_selected_binance_markets(cursor: 'Cursor') -> None:\n cursor.execute(\"\"\"\n UPDATE user_credentials_mappings SET setting_name = ? WHERE setting_name = \"PAIRS\"\n \"\"\", (BINANCE_MARKETS_KEY,))\n\n\ndef _update_manual_balances_tags(cursor_fetch: 'Cursor', cursor_update: 'Cursor') -> None:\n manual_balances = cursor_fetch.execute('SELECT id, label FROM manually_tracked_balances')\n for balance_id, label in manual_balances:\n cursor_update.execute('UPDATE tag_mappings SET object_reference=? WHERE object_reference=?', (balance_id, label)) # noqa: E501\n\n\ndef upgrade_v31_to_v32(db: 'DBHandler') -> None:\n \"\"\"Upgrades the DB from v31 to v32\n - use new identifiers for the history_events table. The id will be generated by sqlite\n and will be the column rowid\n\n -Add the subtype REWARD to staking rewards (before they had type staking\n and no subtype)\n\n -Remove all gitcoin grant related data that was pulled from their API and saved in\n specific tables along with the tables themselves\n\n -Sets fee to null for existing trades if fee_currency is missing.\n \"\"\"\n primary_cursor = db.conn.cursor()\n secondary_cursor = db.conn.cursor()\n _update_history_entries_from_kraken(primary_cursor)\n _upgrade_history_events(primary_cursor)\n _remove_gitcoin(primary_cursor)\n _add_new_tables(primary_cursor)\n _refactor_manual_balance_id(primary_cursor)\n _update_fee_for_existing_trades(primary_cursor)\n _update_settings_name_for_selected_binance_markets(primary_cursor)\n _update_manual_balances_tags(cursor_fetch=primary_cursor, cursor_update=secondary_cursor)\n db.conn.commit()\n", "path": "rotkehlchen/db/upgrades/v31_v32.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, Set\n\nfrom rotkehlchen.db.constants import BINANCE_MARKETS_KEY\n\nif TYPE_CHECKING:\n from sqlite3 import Cursor\n\n from rotkehlchen.db.dbhandler import DBHandler\n\n\ndef _upgrade_history_events(cursor: 'Cursor') -> None:\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS history_events_copy (\n identifier INTEGER NOT NULL PRIMARY KEY,\n event_identifier TEXT NOT NULL,\n sequence_index INTEGER NOT NULL,\n timestamp INTEGER NOT NULL,\n location TEXT NOT NULL,\n location_label TEXT,\n asset TEXT NOT NULL,\n amount TEXT NOT NULL,\n usd_value TEXT NOT NULL,\n notes TEXT,\n type TEXT NOT NULL,\n subtype TEXT,\n counterparty TEXT,\n extra_data TEXT,\n UNIQUE(event_identifier, sequence_index)\n );\"\"\")\n cursor.execute('UPDATE history_events SET timestamp = timestamp / 10;')\n cursor.execute('UPDATE history_events SET subtype = \"deposit asset\" WHERE subtype = \"staking deposit asset\";') # noqa: E501\n cursor.execute('UPDATE history_events SET subtype = \"receive wrapped\" WHERE subtype = \"staking receive asset\";') # noqa: E501\n cursor.execute('UPDATE history_events SET subtype = \"remove asset\", type = \"staking\" WHERE subtype = \"staking remove asset\" AND type = \"unstaking\";') # noqa: E501\n cursor.execute('UPDATE history_events SET subtype = \"return wrapped\", type = \"staking\" WHERE subtype = \"staking receive asset\" AND type = \"unstaking\";') # noqa: E501\n cursor.execute('UPDATE history_events SET type = \"informational\" WHERE subtype = \"unknown\";')\n cursor.execute(\"\"\"\n INSERT INTO history_events_copy (event_identifier, sequence_index, timestamp, location,\n location_label, asset, amount, usd_value, notes, type, subtype)\n SELECT event_identifier, sequence_index, timestamp, location, location_label, asset,\n amount, usd_value, notes, type, subtype\n FROM history_events;\n \"\"\")\n cursor.execute('DROP TABLE history_events;')\n cursor.execute('ALTER TABLE history_events_copy RENAME TO history_events;')\n cursor.execute(\n 'UPDATE history_events SET subtype=\"reward\" WHERE type=\"staking\" AND subtype IS NULL;',\n )\n\n\ndef _remove_gitcoin(cursor: 'Cursor') -> None:\n cursor.execute('DELETE from ledger_actions WHERE identifier IN (SELECT parent_id FROM ledger_actions_gitcoin_data)') # noqa: E501\n cursor.execute('DELETE from used_query_ranges WHERE name LIKE \"gitcoingrants_%\"')\n cursor.execute('DROP TABLE IF exists gitcoin_grant_metadata')\n cursor.execute('DROP TABLE IF exists ledger_actions_gitcoin_data')\n cursor.execute('DROP TABLE IF exists gitcoin_tx_type')\n\n\ndef _add_new_tables(cursor: 'Cursor') -> None:\n cursor.execute('INSERT OR IGNORE INTO location(location, seq) VALUES (\"d\", 36)')\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ethereum_internal_transactions (\n parent_tx_hash BLOB NOT NULL,\n trace_id INTEGER NOT NULL,\n timestamp INTEGER NOT NULL,\n block_number INTEGER NOT NULL,\n from_address TEXT NOT NULL,\n to_address TEXT,\n value TEXT NOT NULL,\n FOREIGN KEY(parent_tx_hash) REFERENCES ethereum_transactions(tx_hash) ON DELETE CASCADE ON UPDATE CASCADE,\n PRIMARY KEY(parent_tx_hash, trace_id)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ethtx_address_mappings (\n address TEXT NOT NULL,\n tx_hash BLOB NOT NULL,\n blockchain TEXT NOT NULL,\n FOREIGN KEY(blockchain, address) REFERENCES blockchain_accounts(blockchain, account) ON DELETE CASCADE,\n FOREIGN KEY(tx_hash) references ethereum_transactions(tx_hash) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY (address, tx_hash, blockchain)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS evm_tx_mappings (\n tx_hash BLOB NOT NULL,\n blockchain TEXT NOT NULL,\n value TEXT NOT NULL,\n FOREIGN KEY(tx_hash) references ethereum_transactions(tx_hash) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY (tx_hash, value)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS history_events_mappings (\n parent_identifier INTEGER NOT NULL,\n value TEXT NOT NULL,\n FOREIGN KEY(parent_identifier) references history_events(identifier) ON UPDATE CASCADE ON DELETE CASCADE,\n PRIMARY KEY (parent_identifier, value)\n);\"\"\") # noqa: E501\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS ens_mappings (\n address TEXT NOT NULL PRIMARY KEY,\n ens_name TEXT UNIQUE,\n last_update INTEGER NOT NULL\n);\n\"\"\")\n\n\ndef _refactor_manual_balance_id(cursor: 'Cursor') -> None:\n cursor.execute(\"\"\"\n CREATE TABLE manually_tracked_balances_copy (\n id INTEGER PRIMARY KEY,\n asset TEXT NOT NULL,\n label TEXT NOT NULL,\n amount TEXT,\n location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location),\n category CHAR(1) NOT NULL DEFAULT('A') REFERENCES balance_category(category),\n FOREIGN KEY(asset) REFERENCES assets(identifier) ON UPDATE CASCADE\n );\"\"\")\n cursor.execute(\"\"\"\n INSERT INTO manually_tracked_balances_copy(asset, label, amount, location, category)\n SELECT asset, label, amount, location, category\n FROM manually_tracked_balances;\n \"\"\")\n cursor.execute('DROP TABLE manually_tracked_balances;')\n cursor.execute(\n 'ALTER TABLE manually_tracked_balances_copy RENAME TO '\n 'manually_tracked_balances;',\n )\n\n\ndef _update_fee_for_existing_trades(cursor: 'Cursor') -> None:\n cursor.execute('UPDATE trades SET fee = NULL WHERE fee_currency IS NULL')\n cursor.execute('UPDATE trades SET fee_currency = NULL WHERE fee IS NULL')\n\n\ndef _update_history_entries_from_kraken(cursor: 'Cursor') -> None:\n \"\"\"The logic for kraken was adding additional entries for trades when fee + kfee was\n being used. This function makes the state of the database consistent with the upgraded\n logic by:\n - Removing extra row additions\n - Make sure that no other event has duplicated sequence indexes\n \"\"\"\n cursor.execute(\"\"\"\n DELETE FROM history_events where location=\"B\" AND asset=\"KFEE\" AND\n type=\"trade\" AND subtype=NULL;\n \"\"\")\n\n cursor.execute('SELECT event_identifier, sequence_index from history_events')\n eventid_to_indices: Dict[str, Set[int]] = defaultdict(set)\n for event_identifier, sequence_index in cursor:\n eventid_to_indices[event_identifier].add(sequence_index)\n\n cursor.execute(\"\"\"\n SELECT e.event_identifier, e.sequence_index, e.identifier from history_events e JOIN (SELECT event_identifier,\n sequence_index, COUNT(*) as cnt FROM history_events GROUP BY event_identifier, sequence_index)\n other ON e.event_identifier = other.event_identifier and e.sequence_index=other.sequence_index\n WHERE other.cnt > 1;\n \"\"\") # noqa: E501\n update_tuples = []\n for event_identifier, sequence_index, identifier in cursor:\n new_index = sequence_index + 1\n while new_index in eventid_to_indices[event_identifier]:\n new_index += 1\n eventid_to_indices[event_identifier].add(new_index)\n update_tuples.append((new_index, identifier))\n\n if len(update_tuples) != 0:\n cursor.executemany(\n 'UPDATE history_events SET sequence_index=? WHERE identifier=?',\n update_tuples,\n )\n\n\ndef _update_settings_name_for_selected_binance_markets(cursor: 'Cursor') -> None:\n cursor.execute(\"\"\"\n UPDATE user_credentials_mappings SET setting_name = ? WHERE setting_name = \"PAIRS\"\n \"\"\", (BINANCE_MARKETS_KEY,))\n\n\ndef _update_manual_balances_tags(cursor_fetch: 'Cursor', cursor_update: 'Cursor') -> None:\n manual_balances = cursor_fetch.execute('SELECT id, label FROM manually_tracked_balances')\n for balance_id, label in manual_balances:\n cursor_update.execute('UPDATE tag_mappings SET object_reference=? WHERE object_reference=?', (balance_id, label)) # noqa: E501\n\n\ndef upgrade_v31_to_v32(db: 'DBHandler') -> None:\n \"\"\"Upgrades the DB from v31 to v32\n - use new identifiers for the history_events table. The id will be generated by sqlite\n and will be the column rowid\n\n -Add the subtype REWARD to staking rewards (before they had type staking\n and no subtype)\n\n -Remove all gitcoin grant related data that was pulled from their API and saved in\n specific tables along with the tables themselves\n\n -Sets fee to null for existing trades if fee_currency is missing.\n \"\"\"\n primary_cursor = db.conn.cursor()\n secondary_cursor = db.conn.cursor()\n _update_history_entries_from_kraken(primary_cursor)\n _upgrade_history_events(primary_cursor)\n _remove_gitcoin(primary_cursor)\n _add_new_tables(primary_cursor)\n _refactor_manual_balance_id(primary_cursor)\n _update_fee_for_existing_trades(primary_cursor)\n _update_settings_name_for_selected_binance_markets(primary_cursor)\n _update_manual_balances_tags(cursor_fetch=primary_cursor, cursor_update=secondary_cursor)\n db.conn.commit()\n", "path": "rotkehlchen/db/upgrades/v31_v32.py"}]}
| 2,913 | 381 |
gh_patches_debug_4968
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2649
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dbt clean regression
### Describe the bug
In dbt 0.16.1 `dbt clean` fails without a profile:
```bash
(dbt) dbt$ dbt --version
installed version: 0.16.1
latest version: 0.17.0
Your version of dbt is out of date! You can find instructions for upgrading here:
https://docs.getdbt.com/docs/installation
(dbt) dbt$ dbt clean
Running with dbt=0.16.1
Encountered an error while reading the project:
ERROR: Runtime Error
Could not find profile named 'profile'
Encountered an error:
Runtime Error
Could not run dbt
```
In dbt 0.15.1, `dbt clean` works.
```bash
(dbt) dbt$ dbt --version
installed version: 0.15.1
latest version: 0.17.0
Your version of dbt is out of date! You can find instructions for upgrading here:
https://docs.getdbt.com/docs/installation
(dbt) dbt$ dbt clean
Running with dbt=0.15.1
Checking target/*
Cleaned target/*
Finished cleaning all paths.
```
### Steps To Reproduce
Delete any profile found in `~/.dbt/profile.yml`.
Install 0.16.1:
```bash
pip install dbt==0.16.1
```
Navigate to dbt project:
```
dbt clean
```
Repeat for 0.15.1 to confirm regression.
### Expected behavior
I expected `dbt clean` to work without a profile. This broke some of our automated jobs when we tried to upgrade.
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [X] redshift
- [ ] bigquery
- [ ] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
Multiple versions. See above.
**The operating system you're using:**
macOS 10.14.6
**The output of `python --version`:**
```
(dbt) dbt$ python --version
Python 3.7.3
```
### Additional context
Most people probably don't run `dbt clean` without a profile, but it was causing us confusion, so wanted to document it as a breaking change at least.
I also tested this with 0.17.0: same error as 0.16.1.
```
(dbt) dbt$ dbt --version
installed version: 0.17.0
latest version: 0.17.0
Up to date!
Plugins:
- bigquery: 0.17.0
- snowflake: 0.17.0
- redshift: 0.17.0
- postgres: 0.17.0
(dbt) dbt$ dbt clean
Running with dbt=0.17.0
Encountered an error while reading the project:
ERROR: Runtime Error
Could not find profile named 'profile'
Encountered an error:
Runtime Error
Could not run dbt
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/task/clean.py`
Content:
```
1 import os.path
2 import os
3 import shutil
4
5 from dbt.task.base import ConfiguredTask
6 from dbt.logger import GLOBAL_LOGGER as logger
7
8
9 class CleanTask(ConfiguredTask):
10
11 def __is_project_path(self, path):
12 proj_path = os.path.abspath('.')
13 return not os.path.commonprefix(
14 [proj_path, os.path.abspath(path)]
15 ) == proj_path
16
17 def __is_protected_path(self, path):
18 """
19 This function identifies protected paths, so as not to clean them.
20 """
21 abs_path = os.path.abspath(path)
22 protected_paths = self.config.source_paths + \
23 self.config.test_paths + ['.']
24 protected_abs_paths = [os.path.abspath(p) for p in protected_paths]
25 return abs_path in set(protected_abs_paths) or \
26 self.__is_project_path(abs_path)
27
28 def run(self):
29 """
30 This function takes all the paths in the target file
31 and cleans the project paths that are not protected.
32 """
33 for path in self.config.clean_targets:
34 logger.info("Checking {}/*".format(path))
35 if not self.__is_protected_path(path):
36 shutil.rmtree(path, True)
37 logger.info(" Cleaned {}/*".format(path))
38 else:
39 logger.info("ERROR: not cleaning {}/* because it is "
40 "protected".format(path))
41 logger.info("Finished cleaning all paths.")
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/core/dbt/task/clean.py b/core/dbt/task/clean.py
--- a/core/dbt/task/clean.py
+++ b/core/dbt/task/clean.py
@@ -2,11 +2,13 @@
import os
import shutil
-from dbt.task.base import ConfiguredTask
+from dbt.task.base import BaseTask
from dbt.logger import GLOBAL_LOGGER as logger
+from dbt.config import UnsetProfileConfig
-class CleanTask(ConfiguredTask):
+class CleanTask(BaseTask):
+ ConfigType = UnsetProfileConfig
def __is_project_path(self, path):
proj_path = os.path.abspath('.')
|
{"golden_diff": "diff --git a/core/dbt/task/clean.py b/core/dbt/task/clean.py\n--- a/core/dbt/task/clean.py\n+++ b/core/dbt/task/clean.py\n@@ -2,11 +2,13 @@\n import os\n import shutil\n \n-from dbt.task.base import ConfiguredTask\n+from dbt.task.base import BaseTask\n from dbt.logger import GLOBAL_LOGGER as logger\n+from dbt.config import UnsetProfileConfig\n \n \n-class CleanTask(ConfiguredTask):\n+class CleanTask(BaseTask):\n+ ConfigType = UnsetProfileConfig\n \n def __is_project_path(self, path):\n proj_path = os.path.abspath('.')\n", "issue": "dbt clean regression\n### Describe the bug\r\nIn dbt 0.16.1 `dbt clean` fails without a profile: \r\n\r\n```bash\r\n(dbt) dbt$ dbt --version\r\ninstalled version: 0.16.1\r\n latest version: 0.17.0\r\n\r\nYour version of dbt is out of date! You can find instructions for upgrading here:\r\nhttps://docs.getdbt.com/docs/installation\r\n(dbt) dbt$ dbt clean\r\nRunning with dbt=0.16.1\r\nEncountered an error while reading the project:\r\n ERROR: Runtime Error\r\n Could not find profile named 'profile'\r\nEncountered an error:\r\nRuntime Error\r\n Could not run dbt\r\n```\r\n\r\nIn dbt 0.15.1, `dbt clean` works.\r\n\r\n```bash\r\n(dbt) dbt$ dbt --version\r\ninstalled version: 0.15.1\r\n latest version: 0.17.0\r\n\r\nYour version of dbt is out of date! You can find instructions for upgrading here:\r\nhttps://docs.getdbt.com/docs/installation\r\n(dbt) dbt$ dbt clean\r\nRunning with dbt=0.15.1\r\nChecking target/*\r\n Cleaned target/*\r\nFinished cleaning all paths.\r\n```\r\n\r\n### Steps To Reproduce\r\nDelete any profile found in `~/.dbt/profile.yml`. \r\n\r\nInstall 0.16.1:\r\n```bash\r\npip install dbt==0.16.1\r\n```\r\nNavigate to dbt project:\r\n```\r\ndbt clean\r\n```\r\n\r\nRepeat for 0.15.1 to confirm regression.\r\n\r\n### Expected behavior\r\nI expected `dbt clean` to work without a profile. This broke some of our automated jobs when we tried to upgrade.\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [X] redshift\r\n- [ ] bigquery\r\n- [ ] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\nMultiple versions. See above.\r\n\r\n**The operating system you're using:**\r\nmacOS 10.14.6\r\n\r\n**The output of `python --version`:**\r\n```\r\n(dbt) dbt$ python --version\r\nPython 3.7.3\r\n```\r\n\r\n### Additional context\r\nMost people probably don't run `dbt clean` without a profile, but it was causing us confusion, so wanted to document it as a breaking change at least.\r\n\r\nI also tested this with 0.17.0: same error as 0.16.1.\r\n\r\n```\r\n(dbt) dbt$ dbt --version\r\ninstalled version: 0.17.0\r\n latest version: 0.17.0\r\n\r\nUp to date!\r\n\r\nPlugins:\r\n - bigquery: 0.17.0\r\n - snowflake: 0.17.0\r\n - redshift: 0.17.0\r\n - postgres: 0.17.0\r\n(dbt) dbt$ dbt clean\r\nRunning with dbt=0.17.0\r\nEncountered an error while reading the project:\r\n ERROR: Runtime Error\r\n Could not find profile named 'profile'\r\nEncountered an error:\r\nRuntime Error\r\n Could not run dbt\r\n```\r\n\n", "before_files": [{"content": "import os.path\nimport os\nimport shutil\n\nfrom dbt.task.base import ConfiguredTask\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\n\nclass CleanTask(ConfiguredTask):\n\n def __is_project_path(self, path):\n proj_path = os.path.abspath('.')\n return not os.path.commonprefix(\n [proj_path, os.path.abspath(path)]\n ) == proj_path\n\n def __is_protected_path(self, path):\n \"\"\"\n This function identifies protected paths, so as not to clean them.\n \"\"\"\n abs_path = os.path.abspath(path)\n protected_paths = self.config.source_paths + \\\n self.config.test_paths + ['.']\n protected_abs_paths = [os.path.abspath(p) for p in protected_paths]\n return abs_path in set(protected_abs_paths) or \\\n self.__is_project_path(abs_path)\n\n def run(self):\n \"\"\"\n This function takes all the paths in the target file\n and cleans the project paths that are not protected.\n \"\"\"\n for path in self.config.clean_targets:\n logger.info(\"Checking {}/*\".format(path))\n if not self.__is_protected_path(path):\n shutil.rmtree(path, True)\n logger.info(\" Cleaned {}/*\".format(path))\n else:\n logger.info(\"ERROR: not cleaning {}/* because it is \"\n \"protected\".format(path))\n logger.info(\"Finished cleaning all paths.\")\n", "path": "core/dbt/task/clean.py"}], "after_files": [{"content": "import os.path\nimport os\nimport shutil\n\nfrom dbt.task.base import BaseTask\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.config import UnsetProfileConfig\n\n\nclass CleanTask(BaseTask):\n ConfigType = UnsetProfileConfig\n\n def __is_project_path(self, path):\n proj_path = os.path.abspath('.')\n return not os.path.commonprefix(\n [proj_path, os.path.abspath(path)]\n ) == proj_path\n\n def __is_protected_path(self, path):\n \"\"\"\n This function identifies protected paths, so as not to clean them.\n \"\"\"\n abs_path = os.path.abspath(path)\n protected_paths = self.config.source_paths + \\\n self.config.test_paths + ['.']\n protected_abs_paths = [os.path.abspath(p) for p in protected_paths]\n return abs_path in set(protected_abs_paths) or \\\n self.__is_project_path(abs_path)\n\n def run(self):\n \"\"\"\n This function takes all the paths in the target file\n and cleans the project paths that are not protected.\n \"\"\"\n for path in self.config.clean_targets:\n logger.info(\"Checking {}/*\".format(path))\n if not self.__is_protected_path(path):\n shutil.rmtree(path, True)\n logger.info(\" Cleaned {}/*\".format(path))\n else:\n logger.info(\"ERROR: not cleaning {}/* because it is \"\n \"protected\".format(path))\n logger.info(\"Finished cleaning all paths.\")\n", "path": "core/dbt/task/clean.py"}]}
| 1,358 | 142 |
gh_patches_debug_39629
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-7411
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in `skimage.measure.find_contours`
### Description:
There is a typo in the `skimage.measure.find_contours` docstring:
Uses the “marching squares” method to compute **a the** iso-valued contours
### Way to reproduce:
_No response_
### Version information:
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/measure/_find_contours.py`
Content:
```
1 import numpy as np
2
3 from ._find_contours_cy import _get_contour_segments
4
5 from collections import deque
6
7 _param_options = ('high', 'low')
8
9
10 def find_contours(
11 image, level=None, fully_connected='low', positive_orientation='low', *, mask=None
12 ):
13 """Find iso-valued contours in a 2D array for a given level value.
14
15 Uses the "marching squares" method to compute a the iso-valued contours of
16 the input 2D array for a particular level value. Array values are linearly
17 interpolated to provide better precision for the output contours.
18
19 Parameters
20 ----------
21 image : (M, N) ndarray of double
22 Input image in which to find contours.
23 level : float, optional
24 Value along which to find contours in the array. By default, the level
25 is set to (max(image) + min(image)) / 2
26
27 .. versionchanged:: 0.18
28 This parameter is now optional.
29 fully_connected : str, {'low', 'high'}
30 Indicates whether array elements below the given level value are to be
31 considered fully-connected (and hence elements above the value will
32 only be face connected), or vice-versa. (See notes below for details.)
33 positive_orientation : str, {'low', 'high'}
34 Indicates whether the output contours will produce positively-oriented
35 polygons around islands of low- or high-valued elements. If 'low' then
36 contours will wind counter- clockwise around elements below the
37 iso-value. Alternately, this means that low-valued elements are always
38 on the left of the contour. (See below for details.)
39 mask : (M, N) ndarray of bool or None
40 A boolean mask, True where we want to draw contours.
41 Note that NaN values are always excluded from the considered region
42 (``mask`` is set to ``False`` wherever ``array`` is ``NaN``).
43
44 Returns
45 -------
46 contours : list of (K, 2) ndarrays
47 Each contour is a ndarray of ``(row, column)`` coordinates along the contour.
48
49 See Also
50 --------
51 skimage.measure.marching_cubes
52
53 Notes
54 -----
55 The marching squares algorithm is a special case of the marching cubes
56 algorithm [1]_. A simple explanation is available here:
57
58 http://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html
59
60 There is a single ambiguous case in the marching squares algorithm: when
61 a given ``2 x 2``-element square has two high-valued and two low-valued
62 elements, each pair diagonally adjacent. (Where high- and low-valued is
63 with respect to the contour value sought.) In this case, either the
64 high-valued elements can be 'connected together' via a thin isthmus that
65 separates the low-valued elements, or vice-versa. When elements are
66 connected together across a diagonal, they are considered 'fully
67 connected' (also known as 'face+vertex-connected' or '8-connected'). Only
68 high-valued or low-valued elements can be fully-connected, the other set
69 will be considered as 'face-connected' or '4-connected'. By default,
70 low-valued elements are considered fully-connected; this can be altered
71 with the 'fully_connected' parameter.
72
73 Output contours are not guaranteed to be closed: contours which intersect
74 the array edge or a masked-off region (either where mask is False or where
75 array is NaN) will be left open. All other contours will be closed. (The
76 closed-ness of a contours can be tested by checking whether the beginning
77 point is the same as the end point.)
78
79 Contours are oriented. By default, array values lower than the contour
80 value are to the left of the contour and values greater than the contour
81 value are to the right. This means that contours will wind
82 counter-clockwise (i.e. in 'positive orientation') around islands of
83 low-valued pixels. This behavior can be altered with the
84 'positive_orientation' parameter.
85
86 The order of the contours in the output list is determined by the position
87 of the smallest ``x,y`` (in lexicographical order) coordinate in the
88 contour. This is a side-effect of how the input array is traversed, but
89 can be relied upon.
90
91 .. warning::
92
93 Array coordinates/values are assumed to refer to the *center* of the
94 array element. Take a simple example input: ``[0, 1]``. The interpolated
95 position of 0.5 in this array is midway between the 0-element (at
96 ``x=0``) and the 1-element (at ``x=1``), and thus would fall at
97 ``x=0.5``.
98
99 This means that to find reasonable contours, it is best to find contours
100 midway between the expected "light" and "dark" values. In particular,
101 given a binarized array, *do not* choose to find contours at the low or
102 high value of the array. This will often yield degenerate contours,
103 especially around structures that are a single array element wide. Instead
104 choose a middle value, as above.
105
106 References
107 ----------
108 .. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High
109 Resolution 3D Surface Construction Algorithm. Computer Graphics
110 (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).
111 :DOI:`10.1145/37401.37422`
112
113 Examples
114 --------
115 >>> a = np.zeros((3, 3))
116 >>> a[0, 0] = 1
117 >>> a
118 array([[1., 0., 0.],
119 [0., 0., 0.],
120 [0., 0., 0.]])
121 >>> find_contours(a, 0.5)
122 [array([[0. , 0.5],
123 [0.5, 0. ]])]
124 """
125 if fully_connected not in _param_options:
126 raise ValueError(
127 'Parameters "fully_connected" must be either ' '"high" or "low".'
128 )
129 if positive_orientation not in _param_options:
130 raise ValueError(
131 'Parameters "positive_orientation" must be either ' '"high" or "low".'
132 )
133 if image.shape[0] < 2 or image.shape[1] < 2:
134 raise ValueError("Input array must be at least 2x2.")
135 if image.ndim != 2:
136 raise ValueError('Only 2D arrays are supported.')
137 if mask is not None:
138 if mask.shape != image.shape:
139 raise ValueError('Parameters "array" and "mask"' ' must have same shape.')
140 if not np.can_cast(mask.dtype, bool, casting='safe'):
141 raise TypeError('Parameter "mask" must be a binary array.')
142 mask = mask.astype(np.uint8, copy=False)
143 if level is None:
144 level = (np.nanmin(image) + np.nanmax(image)) / 2.0
145
146 segments = _get_contour_segments(
147 image.astype(np.float64), float(level), fully_connected == 'high', mask=mask
148 )
149 contours = _assemble_contours(segments)
150 if positive_orientation == 'high':
151 contours = [c[::-1] for c in contours]
152 return contours
153
154
155 def _assemble_contours(segments):
156 current_index = 0
157 contours = {}
158 starts = {}
159 ends = {}
160 for from_point, to_point in segments:
161 # Ignore degenerate segments.
162 # This happens when (and only when) one vertex of the square is
163 # exactly the contour level, and the rest are above or below.
164 # This degenerate vertex will be picked up later by neighboring
165 # squares.
166 if from_point == to_point:
167 continue
168
169 tail, tail_num = starts.pop(to_point, (None, None))
170 head, head_num = ends.pop(from_point, (None, None))
171
172 if tail is not None and head is not None:
173 # We need to connect these two contours.
174 if tail is head:
175 # We need to closed a contour: add the end point
176 head.append(to_point)
177 else: # tail is not head
178 # We need to join two distinct contours.
179 # We want to keep the first contour segment created, so that
180 # the final contours are ordered left->right, top->bottom.
181 if tail_num > head_num:
182 # tail was created second. Append tail to head.
183 head.extend(tail)
184 # Remove tail from the detected contours
185 contours.pop(tail_num, None)
186 # Update starts and ends
187 starts[head[0]] = (head, head_num)
188 ends[head[-1]] = (head, head_num)
189 else: # tail_num <= head_num
190 # head was created second. Prepend head to tail.
191 tail.extendleft(reversed(head))
192 # Remove head from the detected contours
193 starts.pop(head[0], None) # head[0] can be == to_point!
194 contours.pop(head_num, None)
195 # Update starts and ends
196 starts[tail[0]] = (tail, tail_num)
197 ends[tail[-1]] = (tail, tail_num)
198 elif tail is None and head is None:
199 # We need to add a new contour
200 new_contour = deque((from_point, to_point))
201 contours[current_index] = new_contour
202 starts[from_point] = (new_contour, current_index)
203 ends[to_point] = (new_contour, current_index)
204 current_index += 1
205 elif head is None: # tail is not None
206 # tail first element is to_point: the new segment should be
207 # prepended.
208 tail.appendleft(from_point)
209 # Update starts
210 starts[from_point] = (tail, tail_num)
211 else: # tail is None and head is not None:
212 # head last element is from_point: the new segment should be
213 # appended
214 head.append(to_point)
215 # Update ends
216 ends[to_point] = (head, head_num)
217
218 return [np.array(contour) for _, contour in sorted(contours.items())]
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/skimage/measure/_find_contours.py b/skimage/measure/_find_contours.py
--- a/skimage/measure/_find_contours.py
+++ b/skimage/measure/_find_contours.py
@@ -12,7 +12,7 @@
):
"""Find iso-valued contours in a 2D array for a given level value.
- Uses the "marching squares" method to compute a the iso-valued contours of
+ Uses the "marching squares" method to compute the iso-valued contours of
the input 2D array for a particular level value. Array values are linearly
interpolated to provide better precision for the output contours.
@@ -33,7 +33,7 @@
positive_orientation : str, {'low', 'high'}
Indicates whether the output contours will produce positively-oriented
polygons around islands of low- or high-valued elements. If 'low' then
- contours will wind counter- clockwise around elements below the
+ contours will wind counter-clockwise around elements below the
iso-value. Alternately, this means that low-valued elements are always
on the left of the contour. (See below for details.)
mask : (M, N) ndarray of bool or None
@@ -55,7 +55,7 @@
The marching squares algorithm is a special case of the marching cubes
algorithm [1]_. A simple explanation is available here:
- http://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html
+ https://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html
There is a single ambiguous case in the marching squares algorithm: when
a given ``2 x 2``-element square has two high-valued and two low-valued
@@ -85,7 +85,7 @@
The order of the contours in the output list is determined by the position
of the smallest ``x,y`` (in lexicographical order) coordinate in the
- contour. This is a side-effect of how the input array is traversed, but
+ contour. This is a side effect of how the input array is traversed, but
can be relied upon.
.. warning::
@@ -100,7 +100,7 @@
midway between the expected "light" and "dark" values. In particular,
given a binarized array, *do not* choose to find contours at the low or
high value of the array. This will often yield degenerate contours,
- especially around structures that are a single array element wide. Instead
+ especially around structures that are a single array element wide. Instead,
choose a middle value, as above.
References
|
{"golden_diff": "diff --git a/skimage/measure/_find_contours.py b/skimage/measure/_find_contours.py\n--- a/skimage/measure/_find_contours.py\n+++ b/skimage/measure/_find_contours.py\n@@ -12,7 +12,7 @@\n ):\n \"\"\"Find iso-valued contours in a 2D array for a given level value.\n \n- Uses the \"marching squares\" method to compute a the iso-valued contours of\n+ Uses the \"marching squares\" method to compute the iso-valued contours of\n the input 2D array for a particular level value. Array values are linearly\n interpolated to provide better precision for the output contours.\n \n@@ -33,7 +33,7 @@\n positive_orientation : str, {'low', 'high'}\n Indicates whether the output contours will produce positively-oriented\n polygons around islands of low- or high-valued elements. If 'low' then\n- contours will wind counter- clockwise around elements below the\n+ contours will wind counter-clockwise around elements below the\n iso-value. Alternately, this means that low-valued elements are always\n on the left of the contour. (See below for details.)\n mask : (M, N) ndarray of bool or None\n@@ -55,7 +55,7 @@\n The marching squares algorithm is a special case of the marching cubes\n algorithm [1]_. A simple explanation is available here:\n \n- http://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html\n+ https://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html\n \n There is a single ambiguous case in the marching squares algorithm: when\n a given ``2 x 2``-element square has two high-valued and two low-valued\n@@ -85,7 +85,7 @@\n \n The order of the contours in the output list is determined by the position\n of the smallest ``x,y`` (in lexicographical order) coordinate in the\n- contour. This is a side-effect of how the input array is traversed, but\n+ contour. This is a side effect of how the input array is traversed, but\n can be relied upon.\n \n .. warning::\n@@ -100,7 +100,7 @@\n midway between the expected \"light\" and \"dark\" values. In particular,\n given a binarized array, *do not* choose to find contours at the low or\n high value of the array. This will often yield degenerate contours,\n- especially around structures that are a single array element wide. Instead\n+ especially around structures that are a single array element wide. Instead,\n choose a middle value, as above.\n \n References\n", "issue": "Typo in `skimage.measure.find_contours`\n### Description:\n\nThere is a typo in the `skimage.measure.find_contours` docstring: \r\nUses the \u201cmarching squares\u201d method to compute **a the** iso-valued contours\n\n### Way to reproduce:\n\n_No response_\n\n### Version information:\n\n_No response_\n", "before_files": [{"content": "import numpy as np\n\nfrom ._find_contours_cy import _get_contour_segments\n\nfrom collections import deque\n\n_param_options = ('high', 'low')\n\n\ndef find_contours(\n image, level=None, fully_connected='low', positive_orientation='low', *, mask=None\n):\n \"\"\"Find iso-valued contours in a 2D array for a given level value.\n\n Uses the \"marching squares\" method to compute a the iso-valued contours of\n the input 2D array for a particular level value. Array values are linearly\n interpolated to provide better precision for the output contours.\n\n Parameters\n ----------\n image : (M, N) ndarray of double\n Input image in which to find contours.\n level : float, optional\n Value along which to find contours in the array. By default, the level\n is set to (max(image) + min(image)) / 2\n\n .. versionchanged:: 0.18\n This parameter is now optional.\n fully_connected : str, {'low', 'high'}\n Indicates whether array elements below the given level value are to be\n considered fully-connected (and hence elements above the value will\n only be face connected), or vice-versa. (See notes below for details.)\n positive_orientation : str, {'low', 'high'}\n Indicates whether the output contours will produce positively-oriented\n polygons around islands of low- or high-valued elements. If 'low' then\n contours will wind counter- clockwise around elements below the\n iso-value. Alternately, this means that low-valued elements are always\n on the left of the contour. (See below for details.)\n mask : (M, N) ndarray of bool or None\n A boolean mask, True where we want to draw contours.\n Note that NaN values are always excluded from the considered region\n (``mask`` is set to ``False`` wherever ``array`` is ``NaN``).\n\n Returns\n -------\n contours : list of (K, 2) ndarrays\n Each contour is a ndarray of ``(row, column)`` coordinates along the contour.\n\n See Also\n --------\n skimage.measure.marching_cubes\n\n Notes\n -----\n The marching squares algorithm is a special case of the marching cubes\n algorithm [1]_. A simple explanation is available here:\n\n http://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html\n\n There is a single ambiguous case in the marching squares algorithm: when\n a given ``2 x 2``-element square has two high-valued and two low-valued\n elements, each pair diagonally adjacent. (Where high- and low-valued is\n with respect to the contour value sought.) In this case, either the\n high-valued elements can be 'connected together' via a thin isthmus that\n separates the low-valued elements, or vice-versa. When elements are\n connected together across a diagonal, they are considered 'fully\n connected' (also known as 'face+vertex-connected' or '8-connected'). Only\n high-valued or low-valued elements can be fully-connected, the other set\n will be considered as 'face-connected' or '4-connected'. By default,\n low-valued elements are considered fully-connected; this can be altered\n with the 'fully_connected' parameter.\n\n Output contours are not guaranteed to be closed: contours which intersect\n the array edge or a masked-off region (either where mask is False or where\n array is NaN) will be left open. All other contours will be closed. (The\n closed-ness of a contours can be tested by checking whether the beginning\n point is the same as the end point.)\n\n Contours are oriented. By default, array values lower than the contour\n value are to the left of the contour and values greater than the contour\n value are to the right. This means that contours will wind\n counter-clockwise (i.e. in 'positive orientation') around islands of\n low-valued pixels. This behavior can be altered with the\n 'positive_orientation' parameter.\n\n The order of the contours in the output list is determined by the position\n of the smallest ``x,y`` (in lexicographical order) coordinate in the\n contour. This is a side-effect of how the input array is traversed, but\n can be relied upon.\n\n .. warning::\n\n Array coordinates/values are assumed to refer to the *center* of the\n array element. Take a simple example input: ``[0, 1]``. The interpolated\n position of 0.5 in this array is midway between the 0-element (at\n ``x=0``) and the 1-element (at ``x=1``), and thus would fall at\n ``x=0.5``.\n\n This means that to find reasonable contours, it is best to find contours\n midway between the expected \"light\" and \"dark\" values. In particular,\n given a binarized array, *do not* choose to find contours at the low or\n high value of the array. This will often yield degenerate contours,\n especially around structures that are a single array element wide. Instead\n choose a middle value, as above.\n\n References\n ----------\n .. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High\n Resolution 3D Surface Construction Algorithm. Computer Graphics\n (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).\n :DOI:`10.1145/37401.37422`\n\n Examples\n --------\n >>> a = np.zeros((3, 3))\n >>> a[0, 0] = 1\n >>> a\n array([[1., 0., 0.],\n [0., 0., 0.],\n [0., 0., 0.]])\n >>> find_contours(a, 0.5)\n [array([[0. , 0.5],\n [0.5, 0. ]])]\n \"\"\"\n if fully_connected not in _param_options:\n raise ValueError(\n 'Parameters \"fully_connected\" must be either ' '\"high\" or \"low\".'\n )\n if positive_orientation not in _param_options:\n raise ValueError(\n 'Parameters \"positive_orientation\" must be either ' '\"high\" or \"low\".'\n )\n if image.shape[0] < 2 or image.shape[1] < 2:\n raise ValueError(\"Input array must be at least 2x2.\")\n if image.ndim != 2:\n raise ValueError('Only 2D arrays are supported.')\n if mask is not None:\n if mask.shape != image.shape:\n raise ValueError('Parameters \"array\" and \"mask\"' ' must have same shape.')\n if not np.can_cast(mask.dtype, bool, casting='safe'):\n raise TypeError('Parameter \"mask\" must be a binary array.')\n mask = mask.astype(np.uint8, copy=False)\n if level is None:\n level = (np.nanmin(image) + np.nanmax(image)) / 2.0\n\n segments = _get_contour_segments(\n image.astype(np.float64), float(level), fully_connected == 'high', mask=mask\n )\n contours = _assemble_contours(segments)\n if positive_orientation == 'high':\n contours = [c[::-1] for c in contours]\n return contours\n\n\ndef _assemble_contours(segments):\n current_index = 0\n contours = {}\n starts = {}\n ends = {}\n for from_point, to_point in segments:\n # Ignore degenerate segments.\n # This happens when (and only when) one vertex of the square is\n # exactly the contour level, and the rest are above or below.\n # This degenerate vertex will be picked up later by neighboring\n # squares.\n if from_point == to_point:\n continue\n\n tail, tail_num = starts.pop(to_point, (None, None))\n head, head_num = ends.pop(from_point, (None, None))\n\n if tail is not None and head is not None:\n # We need to connect these two contours.\n if tail is head:\n # We need to closed a contour: add the end point\n head.append(to_point)\n else: # tail is not head\n # We need to join two distinct contours.\n # We want to keep the first contour segment created, so that\n # the final contours are ordered left->right, top->bottom.\n if tail_num > head_num:\n # tail was created second. Append tail to head.\n head.extend(tail)\n # Remove tail from the detected contours\n contours.pop(tail_num, None)\n # Update starts and ends\n starts[head[0]] = (head, head_num)\n ends[head[-1]] = (head, head_num)\n else: # tail_num <= head_num\n # head was created second. Prepend head to tail.\n tail.extendleft(reversed(head))\n # Remove head from the detected contours\n starts.pop(head[0], None) # head[0] can be == to_point!\n contours.pop(head_num, None)\n # Update starts and ends\n starts[tail[0]] = (tail, tail_num)\n ends[tail[-1]] = (tail, tail_num)\n elif tail is None and head is None:\n # We need to add a new contour\n new_contour = deque((from_point, to_point))\n contours[current_index] = new_contour\n starts[from_point] = (new_contour, current_index)\n ends[to_point] = (new_contour, current_index)\n current_index += 1\n elif head is None: # tail is not None\n # tail first element is to_point: the new segment should be\n # prepended.\n tail.appendleft(from_point)\n # Update starts\n starts[from_point] = (tail, tail_num)\n else: # tail is None and head is not None:\n # head last element is from_point: the new segment should be\n # appended\n head.append(to_point)\n # Update ends\n ends[to_point] = (head, head_num)\n\n return [np.array(contour) for _, contour in sorted(contours.items())]\n", "path": "skimage/measure/_find_contours.py"}], "after_files": [{"content": "import numpy as np\n\nfrom ._find_contours_cy import _get_contour_segments\n\nfrom collections import deque\n\n_param_options = ('high', 'low')\n\n\ndef find_contours(\n image, level=None, fully_connected='low', positive_orientation='low', *, mask=None\n):\n \"\"\"Find iso-valued contours in a 2D array for a given level value.\n\n Uses the \"marching squares\" method to compute the iso-valued contours of\n the input 2D array for a particular level value. Array values are linearly\n interpolated to provide better precision for the output contours.\n\n Parameters\n ----------\n image : (M, N) ndarray of double\n Input image in which to find contours.\n level : float, optional\n Value along which to find contours in the array. By default, the level\n is set to (max(image) + min(image)) / 2\n\n .. versionchanged:: 0.18\n This parameter is now optional.\n fully_connected : str, {'low', 'high'}\n Indicates whether array elements below the given level value are to be\n considered fully-connected (and hence elements above the value will\n only be face connected), or vice-versa. (See notes below for details.)\n positive_orientation : str, {'low', 'high'}\n Indicates whether the output contours will produce positively-oriented\n polygons around islands of low- or high-valued elements. If 'low' then\n contours will wind counter-clockwise around elements below the\n iso-value. Alternately, this means that low-valued elements are always\n on the left of the contour. (See below for details.)\n mask : (M, N) ndarray of bool or None\n A boolean mask, True where we want to draw contours.\n Note that NaN values are always excluded from the considered region\n (``mask`` is set to ``False`` wherever ``array`` is ``NaN``).\n\n Returns\n -------\n contours : list of (K, 2) ndarrays\n Each contour is a ndarray of ``(row, column)`` coordinates along the contour.\n\n See Also\n --------\n skimage.measure.marching_cubes\n\n Notes\n -----\n The marching squares algorithm is a special case of the marching cubes\n algorithm [1]_. A simple explanation is available here:\n\n https://users.polytech.unice.fr/~lingrand/MarchingCubes/algo.html\n\n There is a single ambiguous case in the marching squares algorithm: when\n a given ``2 x 2``-element square has two high-valued and two low-valued\n elements, each pair diagonally adjacent. (Where high- and low-valued is\n with respect to the contour value sought.) In this case, either the\n high-valued elements can be 'connected together' via a thin isthmus that\n separates the low-valued elements, or vice-versa. When elements are\n connected together across a diagonal, they are considered 'fully\n connected' (also known as 'face+vertex-connected' or '8-connected'). Only\n high-valued or low-valued elements can be fully-connected, the other set\n will be considered as 'face-connected' or '4-connected'. By default,\n low-valued elements are considered fully-connected; this can be altered\n with the 'fully_connected' parameter.\n\n Output contours are not guaranteed to be closed: contours which intersect\n the array edge or a masked-off region (either where mask is False or where\n array is NaN) will be left open. All other contours will be closed. (The\n closed-ness of a contours can be tested by checking whether the beginning\n point is the same as the end point.)\n\n Contours are oriented. By default, array values lower than the contour\n value are to the left of the contour and values greater than the contour\n value are to the right. This means that contours will wind\n counter-clockwise (i.e. in 'positive orientation') around islands of\n low-valued pixels. This behavior can be altered with the\n 'positive_orientation' parameter.\n\n The order of the contours in the output list is determined by the position\n of the smallest ``x,y`` (in lexicographical order) coordinate in the\n contour. This is a side effect of how the input array is traversed, but\n can be relied upon.\n\n .. warning::\n\n Array coordinates/values are assumed to refer to the *center* of the\n array element. Take a simple example input: ``[0, 1]``. The interpolated\n position of 0.5 in this array is midway between the 0-element (at\n ``x=0``) and the 1-element (at ``x=1``), and thus would fall at\n ``x=0.5``.\n\n This means that to find reasonable contours, it is best to find contours\n midway between the expected \"light\" and \"dark\" values. In particular,\n given a binarized array, *do not* choose to find contours at the low or\n high value of the array. This will often yield degenerate contours,\n especially around structures that are a single array element wide. Instead,\n choose a middle value, as above.\n\n References\n ----------\n .. [1] Lorensen, William and Harvey E. Cline. Marching Cubes: A High\n Resolution 3D Surface Construction Algorithm. Computer Graphics\n (SIGGRAPH 87 Proceedings) 21(4) July 1987, p. 163-170).\n :DOI:`10.1145/37401.37422`\n\n Examples\n --------\n >>> a = np.zeros((3, 3))\n >>> a[0, 0] = 1\n >>> a\n array([[1., 0., 0.],\n [0., 0., 0.],\n [0., 0., 0.]])\n >>> find_contours(a, 0.5)\n [array([[0. , 0.5],\n [0.5, 0. ]])]\n \"\"\"\n if fully_connected not in _param_options:\n raise ValueError(\n 'Parameters \"fully_connected\" must be either ' '\"high\" or \"low\".'\n )\n if positive_orientation not in _param_options:\n raise ValueError(\n 'Parameters \"positive_orientation\" must be either ' '\"high\" or \"low\".'\n )\n if image.shape[0] < 2 or image.shape[1] < 2:\n raise ValueError(\"Input array must be at least 2x2.\")\n if image.ndim != 2:\n raise ValueError('Only 2D arrays are supported.')\n if mask is not None:\n if mask.shape != image.shape:\n raise ValueError('Parameters \"array\" and \"mask\"' ' must have same shape.')\n if not np.can_cast(mask.dtype, bool, casting='safe'):\n raise TypeError('Parameter \"mask\" must be a binary array.')\n mask = mask.astype(np.uint8, copy=False)\n if level is None:\n level = (np.nanmin(image) + np.nanmax(image)) / 2.0\n\n segments = _get_contour_segments(\n image.astype(np.float64), float(level), fully_connected == 'high', mask=mask\n )\n contours = _assemble_contours(segments)\n if positive_orientation == 'high':\n contours = [c[::-1] for c in contours]\n return contours\n\n\ndef _assemble_contours(segments):\n current_index = 0\n contours = {}\n starts = {}\n ends = {}\n for from_point, to_point in segments:\n # Ignore degenerate segments.\n # This happens when (and only when) one vertex of the square is\n # exactly the contour level, and the rest are above or below.\n # This degenerate vertex will be picked up later by neighboring\n # squares.\n if from_point == to_point:\n continue\n\n tail, tail_num = starts.pop(to_point, (None, None))\n head, head_num = ends.pop(from_point, (None, None))\n\n if tail is not None and head is not None:\n # We need to connect these two contours.\n if tail is head:\n # We need to closed a contour: add the end point\n head.append(to_point)\n else: # tail is not head\n # We need to join two distinct contours.\n # We want to keep the first contour segment created, so that\n # the final contours are ordered left->right, top->bottom.\n if tail_num > head_num:\n # tail was created second. Append tail to head.\n head.extend(tail)\n # Remove tail from the detected contours\n contours.pop(tail_num, None)\n # Update starts and ends\n starts[head[0]] = (head, head_num)\n ends[head[-1]] = (head, head_num)\n else: # tail_num <= head_num\n # head was created second. Prepend head to tail.\n tail.extendleft(reversed(head))\n # Remove head from the detected contours\n starts.pop(head[0], None) # head[0] can be == to_point!\n contours.pop(head_num, None)\n # Update starts and ends\n starts[tail[0]] = (tail, tail_num)\n ends[tail[-1]] = (tail, tail_num)\n elif tail is None and head is None:\n # We need to add a new contour\n new_contour = deque((from_point, to_point))\n contours[current_index] = new_contour\n starts[from_point] = (new_contour, current_index)\n ends[to_point] = (new_contour, current_index)\n current_index += 1\n elif head is None: # tail is not None\n # tail first element is to_point: the new segment should be\n # prepended.\n tail.appendleft(from_point)\n # Update starts\n starts[from_point] = (tail, tail_num)\n else: # tail is None and head is not None:\n # head last element is from_point: the new segment should be\n # appended\n head.append(to_point)\n # Update ends\n ends[to_point] = (head, head_num)\n\n return [np.array(contour) for _, contour in sorted(contours.items())]\n", "path": "skimage/measure/_find_contours.py"}]}
| 3,161 | 612 |
gh_patches_debug_19311
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-3617
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No registration message no longer accepts links
### Describe the bug
Since #2458 the event registration status (specifically no_registration_message) doesn't support URLs any more on the website (app is unaltered I think). I aslo think the formatting that happens now might be a bit risky.
### How to reproduce
Make an event with URL in the no_registration message and view it.
### Expected behaviour
A nice link.
### Screenshots
<img width="627" alt="afbeelding" src="https://github.com/svthalia/concrexit/assets/41264528/66d61a15-0ceb-43bf-aa5a-a6536a02739d">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/admin/event.py`
Content:
```
1 """Registers admin interfaces for the event model."""
2
3 from django.contrib import admin, messages
4 from django.template.defaultfilters import date as _date
5 from django.urls import path, reverse
6 from django.utils import timezone
7 from django.utils.html import format_html
8 from django.utils.translation import gettext_lazy as _
9
10 from events import emails, models, services
11 from events.admin.filters import LectureYearFilter
12 from events.admin.forms import EventAdminForm, RegistrationInformationFieldForm
13 from events.admin.inlines import (
14 PizzaEventInline,
15 PromotionRequestInline,
16 RegistrationInformationFieldInline,
17 )
18 from events.admin.views import (
19 EventAdminDetails,
20 EventMarkPresentQR,
21 EventRegistrationsExport,
22 )
23 from utils.admin import DoNextModelAdmin
24
25
26 @admin.register(models.Event)
27 class EventAdmin(DoNextModelAdmin):
28 """Manage the events."""
29
30 form = EventAdminForm
31
32 inlines = (
33 RegistrationInformationFieldInline,
34 PizzaEventInline,
35 PromotionRequestInline,
36 )
37
38 list_display = (
39 "overview_link",
40 "event_date",
41 "registration_date",
42 "num_participants",
43 "get_organisers",
44 "category",
45 "published",
46 "edit_link",
47 )
48 list_display_links = ("edit_link",)
49 list_filter = (LectureYearFilter, "start", "published", "category")
50 actions = ("make_published", "make_unpublished")
51 date_hierarchy = "start"
52 search_fields = ("title", "description")
53 prepopulated_fields = {
54 "map_location": ("location",),
55 }
56
57 filter_horizontal = ("documents", "organisers")
58
59 fieldsets = (
60 (
61 _("General"),
62 {
63 "fields": (
64 "title",
65 "slug",
66 "published",
67 "organisers",
68 )
69 },
70 ),
71 (
72 _("Detail"),
73 {
74 "fields": (
75 "category",
76 "start",
77 "end",
78 "description",
79 "caption",
80 "location",
81 "map_location",
82 "show_map_location",
83 ),
84 "classes": ("collapse", "start-open"),
85 },
86 ),
87 (
88 _("Registrations"),
89 {
90 "fields": (
91 "price",
92 "fine",
93 "tpay_allowed",
94 "max_participants",
95 "registration_without_membership",
96 "registration_start",
97 "registration_end",
98 "cancel_deadline",
99 "send_cancel_email",
100 "optional_registrations",
101 "no_registration_message",
102 ),
103 "classes": ("collapse",),
104 },
105 ),
106 (
107 _("Extra"),
108 {"fields": ("documents", "shift"), "classes": ("collapse",)},
109 ),
110 )
111
112 def get_queryset(self, request):
113 queryset = (
114 super()
115 .get_queryset(request)
116 .select_properties("participant_count")
117 .prefetch_related("organisers")
118 )
119 if not (
120 request.user.has_perm("events.override_organiser")
121 or request.user.has_perm("events.view_unpublished")
122 ):
123 queryset_published = queryset.filter(published=True)
124 queryset_unpublished = queryset.filter(
125 published=False,
126 organisers__in=list(
127 request.member.get_member_groups().values_list("id", flat=True)
128 ),
129 )
130 queryset = queryset_published | queryset_unpublished
131 return queryset
132
133 def get_form(self, request, obj=None, change=False, **kwargs):
134 form = super().get_form(request, obj, change, **kwargs)
135 form.request = request
136 return form
137
138 def overview_link(self, obj):
139 return format_html(
140 '<a href="{link}">{title}</a>',
141 link=reverse("admin:events_event_details", kwargs={"pk": obj.pk}),
142 title=obj.title,
143 )
144
145 def has_delete_permission(self, request, obj=None):
146 """Only allow deleting an event if the user is an organiser."""
147 if obj is not None and not services.is_organiser(request.member, obj):
148 return False
149 return super().has_delete_permission(request, obj)
150
151 def has_change_permission(self, request, obj=None):
152 """Only allow access to the change form if the user is an organiser."""
153 if obj is not None and not services.is_organiser(request.member, obj):
154 return False
155 return super().has_change_permission(request, obj)
156
157 def event_date(self, obj):
158 event_date = timezone.make_naive(obj.start)
159 return _date(event_date, "l d b Y, G:i")
160
161 event_date.short_description = _("Event Date")
162 event_date.admin_order_field = "start"
163
164 def registration_date(self, obj):
165 if obj.registration_start is not None:
166 start_date = timezone.make_naive(obj.registration_start)
167 else:
168 start_date = obj.registration_start
169
170 return _date(start_date, "l d b Y, G:i")
171
172 registration_date.short_description = _("Registration Start")
173 registration_date.admin_order_field = "registration_start"
174
175 def edit_link(self, obj):
176 return _("Edit")
177
178 edit_link.short_description = ""
179
180 def num_participants(self, obj):
181 """Pretty-print the number of participants."""
182 num = obj.participant_count # prefetched aggregateproperty
183 if not obj.max_participants:
184 return f"{num}/∞"
185 return f"{num}/{obj.max_participants}"
186
187 num_participants.short_description = _("Number of participants")
188
189 def get_organisers(self, obj):
190 return ", ".join(str(o) for o in obj.organisers.all())
191
192 get_organisers.short_description = _("Organisers")
193
194 def make_published(self, request, queryset):
195 """Change the status of the event to published."""
196 self._change_published(request, queryset, True)
197
198 make_published.short_description = _("Publish selected events")
199
200 def make_unpublished(self, request, queryset):
201 """Change the status of the event to unpublished."""
202 self._change_published(request, queryset, False)
203
204 make_unpublished.short_description = _("Unpublish selected events")
205
206 @staticmethod
207 def _change_published(request, queryset, published):
208 if not request.user.is_superuser:
209 queryset = queryset.filter(
210 organisers__in=request.member.get_member_groups()
211 )
212 queryset.update(published=published)
213
214 def save_formset(self, request, form, formset, change):
215 """Save formsets with their order."""
216 formset.save()
217
218 informationfield_forms = (
219 x
220 for x in formset.forms
221 if isinstance(x, RegistrationInformationFieldForm)
222 and "DELETE" not in x.changed_data
223 )
224 form.instance.set_registrationinformationfield_order(
225 [
226 f.instance.pk
227 for f in sorted(
228 informationfield_forms,
229 key=lambda x: (x.cleaned_data["order"], x.instance.pk),
230 )
231 ]
232 )
233 form.instance.save()
234
235 def save_model(self, request, obj, form, change):
236 if change and "max_participants" in form.changed_data:
237 prev = self.model.objects.get(id=obj.id)
238 prev_limit = prev.max_participants
239 self_limit = obj.max_participants
240 if prev_limit is None:
241 prev_limit = prev.participant_count
242 if self_limit is None:
243 self_limit = obj.participant_count
244
245 if prev_limit < self_limit and prev_limit < obj.participant_count:
246 diff = self_limit - prev_limit
247 joiners = prev.queue[:diff]
248 for registration in joiners:
249 emails.notify_waiting(obj, registration)
250 messages.info(
251 request,
252 "The maximum number of participants was increased. Any members that moved from the waiting list to the participants list have been notified.",
253 )
254 elif self_limit < prev_limit and self_limit < obj.participant_count:
255 diff = self_limit - prev_limit
256 leavers = prev.registrations[self_limit:]
257 address = map(lambda r: r.email, leavers)
258 link = "mailto:?bcc=" + ",".join(address)
259 messages.warning(
260 request,
261 format_html(
262 "The maximum number of participants was decreased and some members moved to the waiting list. <a href='{}' style='text-decoration: underline;'>Use this link to send them an email.</a>",
263 link,
264 ),
265 )
266 super().save_model(request, obj, form, change)
267
268 def get_actions(self, request):
269 actions = super().get_actions(request)
270 if "delete_selected" in actions:
271 del actions["delete_selected"]
272 return actions
273
274 def get_formsets_with_inlines(self, request, obj=None):
275 for inline in self.get_inline_instances(request, obj):
276 if self.has_change_permission(request, obj) or obj is None:
277 yield inline.get_formset(request, obj), inline
278
279 def get_urls(self):
280 urls = super().get_urls()
281 custom_urls = [
282 path(
283 "<int:pk>/details/",
284 self.admin_site.admin_view(EventAdminDetails.as_view()),
285 name="events_event_details",
286 ),
287 path(
288 "<int:pk>/export/",
289 self.admin_site.admin_view(EventRegistrationsExport.as_view()),
290 name="events_event_export",
291 ),
292 path(
293 "<int:pk>/mark-present-qr/",
294 self.admin_site.admin_view(EventMarkPresentQR.as_view()),
295 name="events_event_mark_present_qr",
296 ),
297 ]
298 return custom_urls + urls
299
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/events/admin/event.py b/website/events/admin/event.py
--- a/website/events/admin/event.py
+++ b/website/events/admin/event.py
@@ -98,7 +98,26 @@
"cancel_deadline",
"send_cancel_email",
"optional_registrations",
+ ),
+ "classes": ("collapse",),
+ },
+ ),
+ (
+ _("Registration status messages"),
+ {
+ "fields": (
"no_registration_message",
+ "registration_msg_optional",
+ "registration_msg_optional_registered",
+ "registration_msg_registered",
+ "registration_msg_open",
+ "registration_msg_full",
+ "registration_msg_waitinglist",
+ "registration_msg_will_open",
+ "registration_msg_expired",
+ "registration_msg_cancelled",
+ "registration_msg_cancelled_late",
+ "registration_msg_cancelled_final",
),
"classes": ("collapse",),
},
|
{"golden_diff": "diff --git a/website/events/admin/event.py b/website/events/admin/event.py\n--- a/website/events/admin/event.py\n+++ b/website/events/admin/event.py\n@@ -98,7 +98,26 @@\n \"cancel_deadline\",\n \"send_cancel_email\",\n \"optional_registrations\",\n+ ),\n+ \"classes\": (\"collapse\",),\n+ },\n+ ),\n+ (\n+ _(\"Registration status messages\"),\n+ {\n+ \"fields\": (\n \"no_registration_message\",\n+ \"registration_msg_optional\",\n+ \"registration_msg_optional_registered\",\n+ \"registration_msg_registered\",\n+ \"registration_msg_open\",\n+ \"registration_msg_full\",\n+ \"registration_msg_waitinglist\",\n+ \"registration_msg_will_open\",\n+ \"registration_msg_expired\",\n+ \"registration_msg_cancelled\",\n+ \"registration_msg_cancelled_late\",\n+ \"registration_msg_cancelled_final\",\n ),\n \"classes\": (\"collapse\",),\n },\n", "issue": "No registration message no longer accepts links\n### Describe the bug\r\nSince #2458 the event registration status (specifically no_registration_message) doesn't support URLs any more on the website (app is unaltered I think). I aslo think the formatting that happens now might be a bit risky.\r\n\r\n### How to reproduce\r\nMake an event with URL in the no_registration message and view it.\r\n\r\n### Expected behaviour\r\nA nice link.\r\n\r\n### Screenshots\r\n<img width=\"627\" alt=\"afbeelding\" src=\"https://github.com/svthalia/concrexit/assets/41264528/66d61a15-0ceb-43bf-aa5a-a6536a02739d\">\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the event model.\"\"\"\n\nfrom django.contrib import admin, messages\nfrom django.template.defaultfilters import date as _date\nfrom django.urls import path, reverse\nfrom django.utils import timezone\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\nfrom events import emails, models, services\nfrom events.admin.filters import LectureYearFilter\nfrom events.admin.forms import EventAdminForm, RegistrationInformationFieldForm\nfrom events.admin.inlines import (\n PizzaEventInline,\n PromotionRequestInline,\n RegistrationInformationFieldInline,\n)\nfrom events.admin.views import (\n EventAdminDetails,\n EventMarkPresentQR,\n EventRegistrationsExport,\n)\nfrom utils.admin import DoNextModelAdmin\n\n\[email protected](models.Event)\nclass EventAdmin(DoNextModelAdmin):\n \"\"\"Manage the events.\"\"\"\n\n form = EventAdminForm\n\n inlines = (\n RegistrationInformationFieldInline,\n PizzaEventInline,\n PromotionRequestInline,\n )\n\n list_display = (\n \"overview_link\",\n \"event_date\",\n \"registration_date\",\n \"num_participants\",\n \"get_organisers\",\n \"category\",\n \"published\",\n \"edit_link\",\n )\n list_display_links = (\"edit_link\",)\n list_filter = (LectureYearFilter, \"start\", \"published\", \"category\")\n actions = (\"make_published\", \"make_unpublished\")\n date_hierarchy = \"start\"\n search_fields = (\"title\", \"description\")\n prepopulated_fields = {\n \"map_location\": (\"location\",),\n }\n\n filter_horizontal = (\"documents\", \"organisers\")\n\n fieldsets = (\n (\n _(\"General\"),\n {\n \"fields\": (\n \"title\",\n \"slug\",\n \"published\",\n \"organisers\",\n )\n },\n ),\n (\n _(\"Detail\"),\n {\n \"fields\": (\n \"category\",\n \"start\",\n \"end\",\n \"description\",\n \"caption\",\n \"location\",\n \"map_location\",\n \"show_map_location\",\n ),\n \"classes\": (\"collapse\", \"start-open\"),\n },\n ),\n (\n _(\"Registrations\"),\n {\n \"fields\": (\n \"price\",\n \"fine\",\n \"tpay_allowed\",\n \"max_participants\",\n \"registration_without_membership\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"send_cancel_email\",\n \"optional_registrations\",\n \"no_registration_message\",\n ),\n \"classes\": (\"collapse\",),\n },\n ),\n (\n _(\"Extra\"),\n {\"fields\": (\"documents\", \"shift\"), \"classes\": (\"collapse\",)},\n ),\n )\n\n def get_queryset(self, request):\n queryset = (\n super()\n .get_queryset(request)\n .select_properties(\"participant_count\")\n .prefetch_related(\"organisers\")\n )\n if not (\n request.user.has_perm(\"events.override_organiser\")\n or request.user.has_perm(\"events.view_unpublished\")\n ):\n queryset_published = queryset.filter(published=True)\n queryset_unpublished = queryset.filter(\n published=False,\n organisers__in=list(\n request.member.get_member_groups().values_list(\"id\", flat=True)\n ),\n )\n queryset = queryset_published | queryset_unpublished\n return queryset\n\n def get_form(self, request, obj=None, change=False, **kwargs):\n form = super().get_form(request, obj, change, **kwargs)\n form.request = request\n return form\n\n def overview_link(self, obj):\n return format_html(\n '<a href=\"{link}\">{title}</a>',\n link=reverse(\"admin:events_event_details\", kwargs={\"pk\": obj.pk}),\n title=obj.title,\n )\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow deleting an event if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj):\n return False\n return super().has_delete_permission(request, obj)\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj):\n return False\n return super().has_change_permission(request, obj)\n\n def event_date(self, obj):\n event_date = timezone.make_naive(obj.start)\n return _date(event_date, \"l d b Y, G:i\")\n\n event_date.short_description = _(\"Event Date\")\n event_date.admin_order_field = \"start\"\n\n def registration_date(self, obj):\n if obj.registration_start is not None:\n start_date = timezone.make_naive(obj.registration_start)\n else:\n start_date = obj.registration_start\n\n return _date(start_date, \"l d b Y, G:i\")\n\n registration_date.short_description = _(\"Registration Start\")\n registration_date.admin_order_field = \"registration_start\"\n\n def edit_link(self, obj):\n return _(\"Edit\")\n\n edit_link.short_description = \"\"\n\n def num_participants(self, obj):\n \"\"\"Pretty-print the number of participants.\"\"\"\n num = obj.participant_count # prefetched aggregateproperty\n if not obj.max_participants:\n return f\"{num}/\u221e\"\n return f\"{num}/{obj.max_participants}\"\n\n num_participants.short_description = _(\"Number of participants\")\n\n def get_organisers(self, obj):\n return \", \".join(str(o) for o in obj.organisers.all())\n\n get_organisers.short_description = _(\"Organisers\")\n\n def make_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(request, queryset, True)\n\n make_published.short_description = _(\"Publish selected events\")\n\n def make_unpublished(self, request, queryset):\n \"\"\"Change the status of the event to unpublished.\"\"\"\n self._change_published(request, queryset, False)\n\n make_unpublished.short_description = _(\"Unpublish selected events\")\n\n @staticmethod\n def _change_published(request, queryset, published):\n if not request.user.is_superuser:\n queryset = queryset.filter(\n organisers__in=request.member.get_member_groups()\n )\n queryset.update(published=published)\n\n def save_formset(self, request, form, formset, change):\n \"\"\"Save formsets with their order.\"\"\"\n formset.save()\n\n informationfield_forms = (\n x\n for x in formset.forms\n if isinstance(x, RegistrationInformationFieldForm)\n and \"DELETE\" not in x.changed_data\n )\n form.instance.set_registrationinformationfield_order(\n [\n f.instance.pk\n for f in sorted(\n informationfield_forms,\n key=lambda x: (x.cleaned_data[\"order\"], x.instance.pk),\n )\n ]\n )\n form.instance.save()\n\n def save_model(self, request, obj, form, change):\n if change and \"max_participants\" in form.changed_data:\n prev = self.model.objects.get(id=obj.id)\n prev_limit = prev.max_participants\n self_limit = obj.max_participants\n if prev_limit is None:\n prev_limit = prev.participant_count\n if self_limit is None:\n self_limit = obj.participant_count\n\n if prev_limit < self_limit and prev_limit < obj.participant_count:\n diff = self_limit - prev_limit\n joiners = prev.queue[:diff]\n for registration in joiners:\n emails.notify_waiting(obj, registration)\n messages.info(\n request,\n \"The maximum number of participants was increased. Any members that moved from the waiting list to the participants list have been notified.\",\n )\n elif self_limit < prev_limit and self_limit < obj.participant_count:\n diff = self_limit - prev_limit\n leavers = prev.registrations[self_limit:]\n address = map(lambda r: r.email, leavers)\n link = \"mailto:?bcc=\" + \",\".join(address)\n messages.warning(\n request,\n format_html(\n \"The maximum number of participants was decreased and some members moved to the waiting list. <a href='{}' style='text-decoration: underline;'>Use this link to send them an email.</a>\",\n link,\n ),\n )\n super().save_model(request, obj, form, change)\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if \"delete_selected\" in actions:\n del actions[\"delete_selected\"]\n return actions\n\n def get_formsets_with_inlines(self, request, obj=None):\n for inline in self.get_inline_instances(request, obj):\n if self.has_change_permission(request, obj) or obj is None:\n yield inline.get_formset(request, obj), inline\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\n \"<int:pk>/details/\",\n self.admin_site.admin_view(EventAdminDetails.as_view()),\n name=\"events_event_details\",\n ),\n path(\n \"<int:pk>/export/\",\n self.admin_site.admin_view(EventRegistrationsExport.as_view()),\n name=\"events_event_export\",\n ),\n path(\n \"<int:pk>/mark-present-qr/\",\n self.admin_site.admin_view(EventMarkPresentQR.as_view()),\n name=\"events_event_mark_present_qr\",\n ),\n ]\n return custom_urls + urls\n", "path": "website/events/admin/event.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the event model.\"\"\"\n\nfrom django.contrib import admin, messages\nfrom django.template.defaultfilters import date as _date\nfrom django.urls import path, reverse\nfrom django.utils import timezone\nfrom django.utils.html import format_html\nfrom django.utils.translation import gettext_lazy as _\n\nfrom events import emails, models, services\nfrom events.admin.filters import LectureYearFilter\nfrom events.admin.forms import EventAdminForm, RegistrationInformationFieldForm\nfrom events.admin.inlines import (\n PizzaEventInline,\n PromotionRequestInline,\n RegistrationInformationFieldInline,\n)\nfrom events.admin.views import (\n EventAdminDetails,\n EventMarkPresentQR,\n EventRegistrationsExport,\n)\nfrom utils.admin import DoNextModelAdmin\n\n\[email protected](models.Event)\nclass EventAdmin(DoNextModelAdmin):\n \"\"\"Manage the events.\"\"\"\n\n form = EventAdminForm\n\n inlines = (\n RegistrationInformationFieldInline,\n PizzaEventInline,\n PromotionRequestInline,\n )\n\n list_display = (\n \"overview_link\",\n \"event_date\",\n \"registration_date\",\n \"num_participants\",\n \"get_organisers\",\n \"category\",\n \"published\",\n \"edit_link\",\n )\n list_display_links = (\"edit_link\",)\n list_filter = (LectureYearFilter, \"start\", \"published\", \"category\")\n actions = (\"make_published\", \"make_unpublished\")\n date_hierarchy = \"start\"\n search_fields = (\"title\", \"description\")\n prepopulated_fields = {\n \"map_location\": (\"location\",),\n }\n\n filter_horizontal = (\"documents\", \"organisers\")\n\n fieldsets = (\n (\n _(\"General\"),\n {\n \"fields\": (\n \"title\",\n \"slug\",\n \"published\",\n \"organisers\",\n )\n },\n ),\n (\n _(\"Detail\"),\n {\n \"fields\": (\n \"category\",\n \"start\",\n \"end\",\n \"description\",\n \"caption\",\n \"location\",\n \"map_location\",\n \"show_map_location\",\n ),\n \"classes\": (\"collapse\", \"start-open\"),\n },\n ),\n (\n _(\"Registrations\"),\n {\n \"fields\": (\n \"price\",\n \"fine\",\n \"tpay_allowed\",\n \"max_participants\",\n \"registration_without_membership\",\n \"registration_start\",\n \"registration_end\",\n \"cancel_deadline\",\n \"send_cancel_email\",\n \"optional_registrations\",\n ),\n \"classes\": (\"collapse\",),\n },\n ),\n (\n _(\"Registration status messages\"),\n {\n \"fields\": (\n \"no_registration_message\",\n \"registration_msg_optional\",\n \"registration_msg_optional_registered\",\n \"registration_msg_registered\",\n \"registration_msg_open\",\n \"registration_msg_full\",\n \"registration_msg_waitinglist\",\n \"registration_msg_will_open\",\n \"registration_msg_expired\",\n \"registration_msg_cancelled\",\n \"registration_msg_cancelled_late\",\n \"registration_msg_cancelled_final\",\n ),\n \"classes\": (\"collapse\",),\n },\n ),\n (\n _(\"Extra\"),\n {\"fields\": (\"documents\", \"shift\"), \"classes\": (\"collapse\",)},\n ),\n )\n\n def get_queryset(self, request):\n queryset = (\n super()\n .get_queryset(request)\n .select_properties(\"participant_count\")\n .prefetch_related(\"organisers\")\n )\n if not (\n request.user.has_perm(\"events.override_organiser\")\n or request.user.has_perm(\"events.view_unpublished\")\n ):\n queryset_published = queryset.filter(published=True)\n queryset_unpublished = queryset.filter(\n published=False,\n organisers__in=list(\n request.member.get_member_groups().values_list(\"id\", flat=True)\n ),\n )\n queryset = queryset_published | queryset_unpublished\n return queryset\n\n def get_form(self, request, obj=None, change=False, **kwargs):\n form = super().get_form(request, obj, change, **kwargs)\n form.request = request\n return form\n\n def overview_link(self, obj):\n return format_html(\n '<a href=\"{link}\">{title}</a>',\n link=reverse(\"admin:events_event_details\", kwargs={\"pk\": obj.pk}),\n title=obj.title,\n )\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Only allow deleting an event if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj):\n return False\n return super().has_delete_permission(request, obj)\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Only allow access to the change form if the user is an organiser.\"\"\"\n if obj is not None and not services.is_organiser(request.member, obj):\n return False\n return super().has_change_permission(request, obj)\n\n def event_date(self, obj):\n event_date = timezone.make_naive(obj.start)\n return _date(event_date, \"l d b Y, G:i\")\n\n event_date.short_description = _(\"Event Date\")\n event_date.admin_order_field = \"start\"\n\n def registration_date(self, obj):\n if obj.registration_start is not None:\n start_date = timezone.make_naive(obj.registration_start)\n else:\n start_date = obj.registration_start\n\n return _date(start_date, \"l d b Y, G:i\")\n\n registration_date.short_description = _(\"Registration Start\")\n registration_date.admin_order_field = \"registration_start\"\n\n def edit_link(self, obj):\n return _(\"Edit\")\n\n edit_link.short_description = \"\"\n\n def num_participants(self, obj):\n \"\"\"Pretty-print the number of participants.\"\"\"\n num = obj.participant_count # prefetched aggregateproperty\n if not obj.max_participants:\n return f\"{num}/\u221e\"\n return f\"{num}/{obj.max_participants}\"\n\n num_participants.short_description = _(\"Number of participants\")\n\n def get_organisers(self, obj):\n return \", \".join(str(o) for o in obj.organisers.all())\n\n get_organisers.short_description = _(\"Organisers\")\n\n def make_published(self, request, queryset):\n \"\"\"Change the status of the event to published.\"\"\"\n self._change_published(request, queryset, True)\n\n make_published.short_description = _(\"Publish selected events\")\n\n def make_unpublished(self, request, queryset):\n \"\"\"Change the status of the event to unpublished.\"\"\"\n self._change_published(request, queryset, False)\n\n make_unpublished.short_description = _(\"Unpublish selected events\")\n\n @staticmethod\n def _change_published(request, queryset, published):\n if not request.user.is_superuser:\n queryset = queryset.filter(\n organisers__in=request.member.get_member_groups()\n )\n queryset.update(published=published)\n\n def save_formset(self, request, form, formset, change):\n \"\"\"Save formsets with their order.\"\"\"\n formset.save()\n\n informationfield_forms = (\n x\n for x in formset.forms\n if isinstance(x, RegistrationInformationFieldForm)\n and \"DELETE\" not in x.changed_data\n )\n form.instance.set_registrationinformationfield_order(\n [\n f.instance.pk\n for f in sorted(\n informationfield_forms,\n key=lambda x: (x.cleaned_data[\"order\"], x.instance.pk),\n )\n ]\n )\n form.instance.save()\n\n def save_model(self, request, obj, form, change):\n if change and \"max_participants\" in form.changed_data:\n prev = self.model.objects.get(id=obj.id)\n prev_limit = prev.max_participants\n self_limit = obj.max_participants\n if prev_limit is None:\n prev_limit = prev.participant_count\n if self_limit is None:\n self_limit = obj.participant_count\n\n if prev_limit < self_limit and prev_limit < obj.participant_count:\n diff = self_limit - prev_limit\n joiners = prev.queue[:diff]\n for registration in joiners:\n emails.notify_waiting(obj, registration)\n messages.info(\n request,\n \"The maximum number of participants was increased. Any members that moved from the waiting list to the participants list have been notified.\",\n )\n elif self_limit < prev_limit and self_limit < obj.participant_count:\n diff = self_limit - prev_limit\n leavers = prev.registrations[self_limit:]\n address = map(lambda r: r.email, leavers)\n link = \"mailto:?bcc=\" + \",\".join(address)\n messages.warning(\n request,\n format_html(\n \"The maximum number of participants was decreased and some members moved to the waiting list. <a href='{}' style='text-decoration: underline;'>Use this link to send them an email.</a>\",\n link,\n ),\n )\n super().save_model(request, obj, form, change)\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n if \"delete_selected\" in actions:\n del actions[\"delete_selected\"]\n return actions\n\n def get_formsets_with_inlines(self, request, obj=None):\n for inline in self.get_inline_instances(request, obj):\n if self.has_change_permission(request, obj) or obj is None:\n yield inline.get_formset(request, obj), inline\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\n \"<int:pk>/details/\",\n self.admin_site.admin_view(EventAdminDetails.as_view()),\n name=\"events_event_details\",\n ),\n path(\n \"<int:pk>/export/\",\n self.admin_site.admin_view(EventRegistrationsExport.as_view()),\n name=\"events_event_export\",\n ),\n path(\n \"<int:pk>/mark-present-qr/\",\n self.admin_site.admin_view(EventMarkPresentQR.as_view()),\n name=\"events_event_mark_present_qr\",\n ),\n ]\n return custom_urls + urls\n", "path": "website/events/admin/event.py"}]}
| 3,204 | 210 |
gh_patches_debug_2478
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1767
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Delete tpay payment if order is modified
### Summary
Right now it is possible to order a pizza, pay it with tpay, change the order to a pizza with a different price, and the payment will not match the order anymore.
### How to test
1. Order a pizza
2. Pay with tpay
3. Change the order
4. The payment should be deleted
5. If the event is over, or the payment is batched, then changing the order should crash
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/pizzas/views.py`
Content:
```
1 """Views provided by the pizzas package."""
2 from django.contrib import messages
3 from django.contrib.auth.decorators import login_required
4 from django.http import Http404
5 from django.shortcuts import get_object_or_404, render, redirect
6 from django.utils.translation import gettext_lazy as _
7 from django.views.decorators.http import require_http_methods
8
9 from payments.services import delete_payment
10 from .models import FoodOrder, FoodEvent, Product
11
12
13 @login_required
14 def index(request):
15 """Overview of user order for a pizza event."""
16 products = Product.available_products.order_by("name")
17 if not request.user.has_perm("pizzas.order_restricted_products"):
18 products = products.exclude(restricted=True)
19 event = FoodEvent.current()
20 try:
21 obj = FoodOrder.objects.get(food_event=event, member=request.member)
22 except FoodOrder.DoesNotExist:
23 obj = None
24 context = {"event": event, "products": products, "order": obj}
25 return render(request, "pizzas/index.html", context)
26
27
28 @require_http_methods(["POST"])
29 def cancel_order(request):
30 """View that cancels a user's order."""
31 if "order" in request.POST:
32 try:
33 order = get_object_or_404(FoodOrder, pk=int(request.POST["order"]))
34 if not order.can_be_changed:
35 messages.error(request, _("You can no longer cancel."))
36 elif order.member == request.member:
37 order.delete()
38 messages.success(request, _("Your order has been cancelled."))
39 except Http404:
40 messages.error(request, _("Your order could not be found."))
41 return redirect("pizzas:index")
42
43
44 @login_required
45 def place_order(request):
46 """View that shows the detail of the current order."""
47 event = FoodEvent.current()
48 if not event:
49 return redirect("pizzas:index")
50
51 try:
52 obj = FoodOrder.objects.get(food_event=event, member=request.member)
53 current_order_locked = not obj.can_be_changed
54 except FoodOrder.DoesNotExist:
55 obj = None
56 current_order_locked = False
57
58 if "product" in request.POST and not current_order_locked:
59 productset = Product.available_products.all()
60 if not request.user.has_perm("pizzas.order_restricted_products"):
61 productset = productset.exclude(restricted=True)
62 try:
63 product = productset.get(pk=int(request.POST["product"]))
64 except Product.DoesNotExist as e:
65 raise Http404("Pizza does not exist") from e
66 if not obj:
67 obj = FoodOrder(food_event=event, member=request.member)
68 obj.product = product
69 if obj.payment:
70 delete_payment(obj.payment)
71 obj.save()
72 return redirect("pizzas:index")
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/pizzas/views.py b/website/pizzas/views.py
--- a/website/pizzas/views.py
+++ b/website/pizzas/views.py
@@ -67,6 +67,6 @@
obj = FoodOrder(food_event=event, member=request.member)
obj.product = product
if obj.payment:
- delete_payment(obj.payment)
+ delete_payment(obj)
obj.save()
return redirect("pizzas:index")
|
{"golden_diff": "diff --git a/website/pizzas/views.py b/website/pizzas/views.py\n--- a/website/pizzas/views.py\n+++ b/website/pizzas/views.py\n@@ -67,6 +67,6 @@\n obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n if obj.payment:\n- delete_payment(obj.payment)\n+ delete_payment(obj)\n obj.save()\n return redirect(\"pizzas:index\")\n", "issue": "Delete tpay payment if order is modified\n### Summary\r\nRight now it is possible to order a pizza, pay it with tpay, change the order to a pizza with a different price, and the payment will not match the order anymore.\r\n\r\n### How to test\r\n1. Order a pizza\r\n2. Pay with tpay\r\n3. Change the order\r\n4. The payment should be deleted\r\n5. If the event is over, or the payment is batched, then changing the order should crash\n", "before_files": [{"content": "\"\"\"Views provided by the pizzas package.\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom payments.services import delete_payment\nfrom .models import FoodOrder, FoodEvent, Product\n\n\n@login_required\ndef index(request):\n \"\"\"Overview of user order for a pizza event.\"\"\"\n products = Product.available_products.order_by(\"name\")\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n return render(request, \"pizzas/index.html\", context)\n\n\n@require_http_methods([\"POST\"])\ndef cancel_order(request):\n \"\"\"View that cancels a user's order.\"\"\"\n if \"order\" in request.POST:\n try:\n order = get_object_or_404(FoodOrder, pk=int(request.POST[\"order\"]))\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n order.delete()\n messages.success(request, _(\"Your order has been cancelled.\"))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n\n\n@login_required\ndef place_order(request):\n \"\"\"View that shows the detail of the current order.\"\"\"\n event = FoodEvent.current()\n if not event:\n return redirect(\"pizzas:index\")\n\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n current_order_locked = False\n\n if \"product\" in request.POST and not current_order_locked:\n productset = Product.available_products.all()\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n productset = productset.exclude(restricted=True)\n try:\n product = productset.get(pk=int(request.POST[\"product\"]))\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n if obj.payment:\n delete_payment(obj.payment)\n obj.save()\n return redirect(\"pizzas:index\")\n", "path": "website/pizzas/views.py"}], "after_files": [{"content": "\"\"\"Views provided by the pizzas package.\"\"\"\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.decorators.http import require_http_methods\n\nfrom payments.services import delete_payment\nfrom .models import FoodOrder, FoodEvent, Product\n\n\n@login_required\ndef index(request):\n \"\"\"Overview of user order for a pizza event.\"\"\"\n products = Product.available_products.order_by(\"name\")\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n products = products.exclude(restricted=True)\n event = FoodEvent.current()\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n except FoodOrder.DoesNotExist:\n obj = None\n context = {\"event\": event, \"products\": products, \"order\": obj}\n return render(request, \"pizzas/index.html\", context)\n\n\n@require_http_methods([\"POST\"])\ndef cancel_order(request):\n \"\"\"View that cancels a user's order.\"\"\"\n if \"order\" in request.POST:\n try:\n order = get_object_or_404(FoodOrder, pk=int(request.POST[\"order\"]))\n if not order.can_be_changed:\n messages.error(request, _(\"You can no longer cancel.\"))\n elif order.member == request.member:\n order.delete()\n messages.success(request, _(\"Your order has been cancelled.\"))\n except Http404:\n messages.error(request, _(\"Your order could not be found.\"))\n return redirect(\"pizzas:index\")\n\n\n@login_required\ndef place_order(request):\n \"\"\"View that shows the detail of the current order.\"\"\"\n event = FoodEvent.current()\n if not event:\n return redirect(\"pizzas:index\")\n\n try:\n obj = FoodOrder.objects.get(food_event=event, member=request.member)\n current_order_locked = not obj.can_be_changed\n except FoodOrder.DoesNotExist:\n obj = None\n current_order_locked = False\n\n if \"product\" in request.POST and not current_order_locked:\n productset = Product.available_products.all()\n if not request.user.has_perm(\"pizzas.order_restricted_products\"):\n productset = productset.exclude(restricted=True)\n try:\n product = productset.get(pk=int(request.POST[\"product\"]))\n except Product.DoesNotExist as e:\n raise Http404(\"Pizza does not exist\") from e\n if not obj:\n obj = FoodOrder(food_event=event, member=request.member)\n obj.product = product\n if obj.payment:\n delete_payment(obj)\n obj.save()\n return redirect(\"pizzas:index\")\n", "path": "website/pizzas/views.py"}]}
| 1,068 | 97 |
gh_patches_debug_9752
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-4452
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ListView` initial index is off by one
Setting the `initial_index` of a `ListView` seems to be off by one, introduced somewhere in v0.48.2:
```python
from textual.app import App, ComposeResult
from textual.widgets import Label, ListItem, ListView
class ExampleApp(App):
def compose(self) -> ComposeResult:
list_items = [ListItem(Label(str(i))) for i in range(10)]
yield ListView(
*list_items,
initial_index=9,
)
if __name__ == "__main__":
app = ExampleApp()
app.run()
```

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_list_view.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import ClassVar, Iterable, Optional
4
5 from typing_extensions import TypeGuard
6
7 from .. import _widget_navigation
8 from ..await_remove import AwaitRemove
9 from ..binding import Binding, BindingType
10 from ..containers import VerticalScroll
11 from ..events import Mount
12 from ..message import Message
13 from ..reactive import reactive
14 from ..widget import AwaitMount
15 from ..widgets._list_item import ListItem
16
17
18 class ListView(VerticalScroll, can_focus=True, can_focus_children=False):
19 """A vertical list view widget.
20
21 Displays a vertical list of `ListItem`s which can be highlighted and
22 selected using the mouse or keyboard.
23
24 Attributes:
25 index: The index in the list that's currently highlighted.
26 """
27
28 BINDINGS: ClassVar[list[BindingType]] = [
29 Binding("enter", "select_cursor", "Select", show=False),
30 Binding("up", "cursor_up", "Cursor Up", show=False),
31 Binding("down", "cursor_down", "Cursor Down", show=False),
32 ]
33 """
34 | Key(s) | Description |
35 | :- | :- |
36 | enter | Select the current item. |
37 | up | Move the cursor up. |
38 | down | Move the cursor down. |
39 """
40
41 index = reactive[Optional[int]](0, always_update=True, init=False)
42 """The index of the currently highlighted item."""
43
44 class Highlighted(Message):
45 """Posted when the highlighted item changes.
46
47 Highlighted item is controlled using up/down keys.
48 Can be handled using `on_list_view_highlighted` in a subclass of `ListView`
49 or in a parent widget in the DOM.
50 """
51
52 ALLOW_SELECTOR_MATCH = {"item"}
53 """Additional message attributes that can be used with the [`on` decorator][textual.on]."""
54
55 def __init__(self, list_view: ListView, item: ListItem | None) -> None:
56 super().__init__()
57 self.list_view: ListView = list_view
58 """The view that contains the item highlighted."""
59 self.item: ListItem | None = item
60 """The highlighted item, if there is one highlighted."""
61
62 @property
63 def control(self) -> ListView:
64 """The view that contains the item highlighted.
65
66 This is an alias for [`Highlighted.list_view`][textual.widgets.ListView.Highlighted.list_view]
67 and is used by the [`on`][textual.on] decorator.
68 """
69 return self.list_view
70
71 class Selected(Message):
72 """Posted when a list item is selected, e.g. when you press the enter key on it.
73
74 Can be handled using `on_list_view_selected` in a subclass of `ListView` or in
75 a parent widget in the DOM.
76 """
77
78 ALLOW_SELECTOR_MATCH = {"item"}
79 """Additional message attributes that can be used with the [`on` decorator][textual.on]."""
80
81 def __init__(self, list_view: ListView, item: ListItem) -> None:
82 super().__init__()
83 self.list_view: ListView = list_view
84 """The view that contains the item selected."""
85 self.item: ListItem = item
86 """The selected item."""
87
88 @property
89 def control(self) -> ListView:
90 """The view that contains the item selected.
91
92 This is an alias for [`Selected.list_view`][textual.widgets.ListView.Selected.list_view]
93 and is used by the [`on`][textual.on] decorator.
94 """
95 return self.list_view
96
97 def __init__(
98 self,
99 *children: ListItem,
100 initial_index: int | None = 0,
101 name: str | None = None,
102 id: str | None = None,
103 classes: str | None = None,
104 disabled: bool = False,
105 ) -> None:
106 """
107 Initialize a ListView.
108
109 Args:
110 *children: The ListItems to display in the list.
111 initial_index: The index that should be highlighted when the list is first mounted.
112 name: The name of the widget.
113 id: The unique ID of the widget used in CSS/query selection.
114 classes: The CSS classes of the widget.
115 disabled: Whether the ListView is disabled or not.
116 """
117 super().__init__(
118 *children, name=name, id=id, classes=classes, disabled=disabled
119 )
120 # Set the index to the given initial index, or the first available index after.
121 self._index = _widget_navigation.find_next_enabled(
122 self._nodes,
123 anchor=initial_index - 1 if initial_index is not None else None,
124 direction=1,
125 )
126
127 def _on_mount(self, _: Mount) -> None:
128 """Ensure the ListView is fully-settled after mounting."""
129 self.index = self._index
130
131 @property
132 def highlighted_child(self) -> ListItem | None:
133 """The currently highlighted ListItem, or None if nothing is highlighted."""
134 if self.index is not None and 0 <= self.index < len(self._nodes):
135 list_item = self._nodes[self.index]
136 assert isinstance(list_item, ListItem)
137 return list_item
138 else:
139 return None
140
141 def validate_index(self, index: int | None) -> int | None:
142 """Clamp the index to the valid range, or set to None if there's nothing to highlight.
143
144 Args:
145 index: The index to clamp.
146
147 Returns:
148 The clamped index.
149 """
150 if index is None or not self._nodes:
151 return None
152 elif index < 0:
153 return 0
154 elif index >= len(self._nodes):
155 return len(self._nodes) - 1
156
157 return index
158
159 def _is_valid_index(self, index: int | None) -> TypeGuard[int]:
160 """Determine whether the current index is valid into the list of children."""
161 if index is None:
162 return False
163 return 0 <= index < len(self._nodes)
164
165 def watch_index(self, old_index: int | None, new_index: int | None) -> None:
166 """Updates the highlighting when the index changes."""
167 if self._is_valid_index(old_index):
168 old_child = self._nodes[old_index]
169 assert isinstance(old_child, ListItem)
170 old_child.highlighted = False
171
172 if self._is_valid_index(new_index) and not self._nodes[new_index].disabled:
173 new_child = self._nodes[new_index]
174 assert isinstance(new_child, ListItem)
175 new_child.highlighted = True
176 self._scroll_highlighted_region()
177 self.post_message(self.Highlighted(self, new_child))
178 else:
179 self.post_message(self.Highlighted(self, None))
180
181 def extend(self, items: Iterable[ListItem]) -> AwaitMount:
182 """Append multiple new ListItems to the end of the ListView.
183
184 Args:
185 items: The ListItems to append.
186
187 Returns:
188 An awaitable that yields control to the event loop
189 until the DOM has been updated with the new child items.
190 """
191 await_mount = self.mount(*items)
192 if len(self) == 1:
193 self.index = 0
194 return await_mount
195
196 def append(self, item: ListItem) -> AwaitMount:
197 """Append a new ListItem to the end of the ListView.
198
199 Args:
200 item: The ListItem to append.
201
202 Returns:
203 An awaitable that yields control to the event loop
204 until the DOM has been updated with the new child item.
205 """
206 return self.extend([item])
207
208 def clear(self) -> AwaitRemove:
209 """Clear all items from the ListView.
210
211 Returns:
212 An awaitable that yields control to the event loop until
213 the DOM has been updated to reflect all children being removed.
214 """
215 await_remove = self.query("ListView > ListItem").remove()
216 self.index = None
217 return await_remove
218
219 def insert(self, index: int, items: Iterable[ListItem]) -> AwaitMount:
220 """Insert new ListItem(s) to specified index.
221
222 Args:
223 index: index to insert new ListItem.
224 items: The ListItems to insert.
225
226 Returns:
227 An awaitable that yields control to the event loop
228 until the DOM has been updated with the new child item.
229 """
230 await_mount = self.mount(*items, before=index)
231 return await_mount
232
233 def pop(self, index: Optional[int] = None) -> AwaitRemove:
234 """Remove last ListItem from ListView or
235 Remove ListItem from ListView by index
236
237 Args:
238 index: index of ListItem to remove from ListView
239
240 Returns:
241 An awaitable that yields control to the event loop until
242 the DOM has been updated to reflect item being removed.
243 """
244 if index is None:
245 await_remove = self.query("ListItem").last().remove()
246 else:
247 await_remove = self.query("ListItem")[index].remove()
248 return await_remove
249
250 def remove_items(self, indices: Iterable[int]) -> AwaitRemove:
251 """Remove ListItems from ListView by indices
252
253 Args:
254 indices: index(s) of ListItems to remove from ListView
255
256 Returns:
257 An awaitable object that waits for the direct children to be removed.
258 """
259 items = self.query("ListItem")
260 items_to_remove = []
261 for index in indices:
262 items_to_remove.append(items[index])
263
264 await_remove = self.app._remove_nodes(items_to_remove, self)
265 return await_remove
266
267 def action_select_cursor(self) -> None:
268 """Select the current item in the list."""
269 selected_child = self.highlighted_child
270 if selected_child is None:
271 return
272 self.post_message(self.Selected(self, selected_child))
273
274 def action_cursor_down(self) -> None:
275 """Highlight the next item in the list."""
276 candidate = _widget_navigation.find_next_enabled(
277 self._nodes,
278 anchor=self.index,
279 direction=1,
280 )
281 if self.index is not None and candidate is not None and candidate < self.index:
282 return # Avoid wrapping around.
283
284 self.index = candidate
285
286 def action_cursor_up(self) -> None:
287 """Highlight the previous item in the list."""
288 candidate = _widget_navigation.find_next_enabled(
289 self._nodes,
290 anchor=self.index,
291 direction=-1,
292 )
293 if self.index is not None and candidate is not None and candidate > self.index:
294 return # Avoid wrapping around.
295
296 self.index = candidate
297
298 def _on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:
299 event.stop()
300 self.focus()
301 self.index = self._nodes.index(event.item)
302 self.post_message(self.Selected(self, event.item))
303
304 def _scroll_highlighted_region(self) -> None:
305 """Used to keep the highlighted index within vision"""
306 if self.highlighted_child is not None:
307 self.call_after_refresh(
308 self.scroll_to_widget, self.highlighted_child, animate=False
309 )
310
311 def __len__(self) -> int:
312 """Compute the length (in number of items) of the list view."""
313 return len(self._nodes)
314
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/textual/widgets/_list_view.py b/src/textual/widgets/_list_view.py
--- a/src/textual/widgets/_list_view.py
+++ b/src/textual/widgets/_list_view.py
@@ -119,9 +119,10 @@
)
# Set the index to the given initial index, or the first available index after.
self._index = _widget_navigation.find_next_enabled(
- self._nodes,
- anchor=initial_index - 1 if initial_index is not None else None,
+ children,
+ anchor=initial_index if initial_index is not None else None,
direction=1,
+ with_anchor=True,
)
def _on_mount(self, _: Mount) -> None:
|
{"golden_diff": "diff --git a/src/textual/widgets/_list_view.py b/src/textual/widgets/_list_view.py\n--- a/src/textual/widgets/_list_view.py\n+++ b/src/textual/widgets/_list_view.py\n@@ -119,9 +119,10 @@\n )\n # Set the index to the given initial index, or the first available index after.\n self._index = _widget_navigation.find_next_enabled(\n- self._nodes,\n- anchor=initial_index - 1 if initial_index is not None else None,\n+ children,\n+ anchor=initial_index if initial_index is not None else None,\n direction=1,\n+ with_anchor=True,\n )\n \n def _on_mount(self, _: Mount) -> None:\n", "issue": "`ListView` initial index is off by one\nSetting the `initial_index` of a `ListView` seems to be off by one, introduced somewhere in v0.48.2:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Label, ListItem, ListView\r\n\r\n\r\nclass ExampleApp(App):\r\n def compose(self) -> ComposeResult:\r\n list_items = [ListItem(Label(str(i))) for i in range(10)]\r\n yield ListView(\r\n *list_items,\r\n initial_index=9,\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = ExampleApp()\r\n app.run()\r\n```\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import ClassVar, Iterable, Optional\n\nfrom typing_extensions import TypeGuard\n\nfrom .. import _widget_navigation\nfrom ..await_remove import AwaitRemove\nfrom ..binding import Binding, BindingType\nfrom ..containers import VerticalScroll\nfrom ..events import Mount\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..widget import AwaitMount\nfrom ..widgets._list_item import ListItem\n\n\nclass ListView(VerticalScroll, can_focus=True, can_focus_children=False):\n \"\"\"A vertical list view widget.\n\n Displays a vertical list of `ListItem`s which can be highlighted and\n selected using the mouse or keyboard.\n\n Attributes:\n index: The index in the list that's currently highlighted.\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"enter\", \"select_cursor\", \"Select\", show=False),\n Binding(\"up\", \"cursor_up\", \"Cursor Up\", show=False),\n Binding(\"down\", \"cursor_down\", \"Cursor Down\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Select the current item. |\n | up | Move the cursor up. |\n | down | Move the cursor down. |\n \"\"\"\n\n index = reactive[Optional[int]](0, always_update=True, init=False)\n \"\"\"The index of the currently highlighted item.\"\"\"\n\n class Highlighted(Message):\n \"\"\"Posted when the highlighted item changes.\n\n Highlighted item is controlled using up/down keys.\n Can be handled using `on_list_view_highlighted` in a subclass of `ListView`\n or in a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem | None) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item highlighted.\"\"\"\n self.item: ListItem | None = item\n \"\"\"The highlighted item, if there is one highlighted.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item highlighted.\n\n This is an alias for [`Highlighted.list_view`][textual.widgets.ListView.Highlighted.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n class Selected(Message):\n \"\"\"Posted when a list item is selected, e.g. when you press the enter key on it.\n\n Can be handled using `on_list_view_selected` in a subclass of `ListView` or in\n a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item selected.\"\"\"\n self.item: ListItem = item\n \"\"\"The selected item.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item selected.\n\n This is an alias for [`Selected.list_view`][textual.widgets.ListView.Selected.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n def __init__(\n self,\n *children: ListItem,\n initial_index: int | None = 0,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"\n Initialize a ListView.\n\n Args:\n *children: The ListItems to display in the list.\n initial_index: The index that should be highlighted when the list is first mounted.\n name: The name of the widget.\n id: The unique ID of the widget used in CSS/query selection.\n classes: The CSS classes of the widget.\n disabled: Whether the ListView is disabled or not.\n \"\"\"\n super().__init__(\n *children, name=name, id=id, classes=classes, disabled=disabled\n )\n # Set the index to the given initial index, or the first available index after.\n self._index = _widget_navigation.find_next_enabled(\n self._nodes,\n anchor=initial_index - 1 if initial_index is not None else None,\n direction=1,\n )\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Ensure the ListView is fully-settled after mounting.\"\"\"\n self.index = self._index\n\n @property\n def highlighted_child(self) -> ListItem | None:\n \"\"\"The currently highlighted ListItem, or None if nothing is highlighted.\"\"\"\n if self.index is not None and 0 <= self.index < len(self._nodes):\n list_item = self._nodes[self.index]\n assert isinstance(list_item, ListItem)\n return list_item\n else:\n return None\n\n def validate_index(self, index: int | None) -> int | None:\n \"\"\"Clamp the index to the valid range, or set to None if there's nothing to highlight.\n\n Args:\n index: The index to clamp.\n\n Returns:\n The clamped index.\n \"\"\"\n if index is None or not self._nodes:\n return None\n elif index < 0:\n return 0\n elif index >= len(self._nodes):\n return len(self._nodes) - 1\n\n return index\n\n def _is_valid_index(self, index: int | None) -> TypeGuard[int]:\n \"\"\"Determine whether the current index is valid into the list of children.\"\"\"\n if index is None:\n return False\n return 0 <= index < len(self._nodes)\n\n def watch_index(self, old_index: int | None, new_index: int | None) -> None:\n \"\"\"Updates the highlighting when the index changes.\"\"\"\n if self._is_valid_index(old_index):\n old_child = self._nodes[old_index]\n assert isinstance(old_child, ListItem)\n old_child.highlighted = False\n\n if self._is_valid_index(new_index) and not self._nodes[new_index].disabled:\n new_child = self._nodes[new_index]\n assert isinstance(new_child, ListItem)\n new_child.highlighted = True\n self._scroll_highlighted_region()\n self.post_message(self.Highlighted(self, new_child))\n else:\n self.post_message(self.Highlighted(self, None))\n\n def extend(self, items: Iterable[ListItem]) -> AwaitMount:\n \"\"\"Append multiple new ListItems to the end of the ListView.\n\n Args:\n items: The ListItems to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child items.\n \"\"\"\n await_mount = self.mount(*items)\n if len(self) == 1:\n self.index = 0\n return await_mount\n\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n\n Args:\n item: The ListItem to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n return self.extend([item])\n\n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n\n Returns:\n An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n \"\"\"\n await_remove = self.query(\"ListView > ListItem\").remove()\n self.index = None\n return await_remove\n\n def insert(self, index: int, items: Iterable[ListItem]) -> AwaitMount:\n \"\"\"Insert new ListItem(s) to specified index.\n\n Args:\n index: index to insert new ListItem.\n items: The ListItems to insert.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n await_mount = self.mount(*items, before=index)\n return await_mount\n\n def pop(self, index: Optional[int] = None) -> AwaitRemove:\n \"\"\"Remove last ListItem from ListView or\n Remove ListItem from ListView by index\n\n Args:\n index: index of ListItem to remove from ListView\n\n Returns:\n An awaitable that yields control to the event loop until\n the DOM has been updated to reflect item being removed.\n \"\"\"\n if index is None:\n await_remove = self.query(\"ListItem\").last().remove()\n else:\n await_remove = self.query(\"ListItem\")[index].remove()\n return await_remove\n\n def remove_items(self, indices: Iterable[int]) -> AwaitRemove:\n \"\"\"Remove ListItems from ListView by indices\n\n Args:\n indices: index(s) of ListItems to remove from ListView\n\n Returns:\n An awaitable object that waits for the direct children to be removed.\n \"\"\"\n items = self.query(\"ListItem\")\n items_to_remove = []\n for index in indices:\n items_to_remove.append(items[index])\n\n await_remove = self.app._remove_nodes(items_to_remove, self)\n return await_remove\n\n def action_select_cursor(self) -> None:\n \"\"\"Select the current item in the list.\"\"\"\n selected_child = self.highlighted_child\n if selected_child is None:\n return\n self.post_message(self.Selected(self, selected_child))\n\n def action_cursor_down(self) -> None:\n \"\"\"Highlight the next item in the list.\"\"\"\n candidate = _widget_navigation.find_next_enabled(\n self._nodes,\n anchor=self.index,\n direction=1,\n )\n if self.index is not None and candidate is not None and candidate < self.index:\n return # Avoid wrapping around.\n\n self.index = candidate\n\n def action_cursor_up(self) -> None:\n \"\"\"Highlight the previous item in the list.\"\"\"\n candidate = _widget_navigation.find_next_enabled(\n self._nodes,\n anchor=self.index,\n direction=-1,\n )\n if self.index is not None and candidate is not None and candidate > self.index:\n return # Avoid wrapping around.\n\n self.index = candidate\n\n def _on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:\n event.stop()\n self.focus()\n self.index = self._nodes.index(event.item)\n self.post_message(self.Selected(self, event.item))\n\n def _scroll_highlighted_region(self) -> None:\n \"\"\"Used to keep the highlighted index within vision\"\"\"\n if self.highlighted_child is not None:\n self.call_after_refresh(\n self.scroll_to_widget, self.highlighted_child, animate=False\n )\n\n def __len__(self) -> int:\n \"\"\"Compute the length (in number of items) of the list view.\"\"\"\n return len(self._nodes)\n", "path": "src/textual/widgets/_list_view.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import ClassVar, Iterable, Optional\n\nfrom typing_extensions import TypeGuard\n\nfrom .. import _widget_navigation\nfrom ..await_remove import AwaitRemove\nfrom ..binding import Binding, BindingType\nfrom ..containers import VerticalScroll\nfrom ..events import Mount\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..widget import AwaitMount\nfrom ..widgets._list_item import ListItem\n\n\nclass ListView(VerticalScroll, can_focus=True, can_focus_children=False):\n \"\"\"A vertical list view widget.\n\n Displays a vertical list of `ListItem`s which can be highlighted and\n selected using the mouse or keyboard.\n\n Attributes:\n index: The index in the list that's currently highlighted.\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"enter\", \"select_cursor\", \"Select\", show=False),\n Binding(\"up\", \"cursor_up\", \"Cursor Up\", show=False),\n Binding(\"down\", \"cursor_down\", \"Cursor Down\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Select the current item. |\n | up | Move the cursor up. |\n | down | Move the cursor down. |\n \"\"\"\n\n index = reactive[Optional[int]](0, always_update=True, init=False)\n \"\"\"The index of the currently highlighted item.\"\"\"\n\n class Highlighted(Message):\n \"\"\"Posted when the highlighted item changes.\n\n Highlighted item is controlled using up/down keys.\n Can be handled using `on_list_view_highlighted` in a subclass of `ListView`\n or in a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem | None) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item highlighted.\"\"\"\n self.item: ListItem | None = item\n \"\"\"The highlighted item, if there is one highlighted.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item highlighted.\n\n This is an alias for [`Highlighted.list_view`][textual.widgets.ListView.Highlighted.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n class Selected(Message):\n \"\"\"Posted when a list item is selected, e.g. when you press the enter key on it.\n\n Can be handled using `on_list_view_selected` in a subclass of `ListView` or in\n a parent widget in the DOM.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"item\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, list_view: ListView, item: ListItem) -> None:\n super().__init__()\n self.list_view: ListView = list_view\n \"\"\"The view that contains the item selected.\"\"\"\n self.item: ListItem = item\n \"\"\"The selected item.\"\"\"\n\n @property\n def control(self) -> ListView:\n \"\"\"The view that contains the item selected.\n\n This is an alias for [`Selected.list_view`][textual.widgets.ListView.Selected.list_view]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.list_view\n\n def __init__(\n self,\n *children: ListItem,\n initial_index: int | None = 0,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"\n Initialize a ListView.\n\n Args:\n *children: The ListItems to display in the list.\n initial_index: The index that should be highlighted when the list is first mounted.\n name: The name of the widget.\n id: The unique ID of the widget used in CSS/query selection.\n classes: The CSS classes of the widget.\n disabled: Whether the ListView is disabled or not.\n \"\"\"\n super().__init__(\n *children, name=name, id=id, classes=classes, disabled=disabled\n )\n # Set the index to the given initial index, or the first available index after.\n self._index = _widget_navigation.find_next_enabled(\n children,\n anchor=initial_index if initial_index is not None else None,\n direction=1,\n with_anchor=True,\n )\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Ensure the ListView is fully-settled after mounting.\"\"\"\n self.index = self._index\n\n @property\n def highlighted_child(self) -> ListItem | None:\n \"\"\"The currently highlighted ListItem, or None if nothing is highlighted.\"\"\"\n if self.index is not None and 0 <= self.index < len(self._nodes):\n list_item = self._nodes[self.index]\n assert isinstance(list_item, ListItem)\n return list_item\n else:\n return None\n\n def validate_index(self, index: int | None) -> int | None:\n \"\"\"Clamp the index to the valid range, or set to None if there's nothing to highlight.\n\n Args:\n index: The index to clamp.\n\n Returns:\n The clamped index.\n \"\"\"\n if index is None or not self._nodes:\n return None\n elif index < 0:\n return 0\n elif index >= len(self._nodes):\n return len(self._nodes) - 1\n\n return index\n\n def _is_valid_index(self, index: int | None) -> TypeGuard[int]:\n \"\"\"Determine whether the current index is valid into the list of children.\"\"\"\n if index is None:\n return False\n return 0 <= index < len(self._nodes)\n\n def watch_index(self, old_index: int | None, new_index: int | None) -> None:\n \"\"\"Updates the highlighting when the index changes.\"\"\"\n if self._is_valid_index(old_index):\n old_child = self._nodes[old_index]\n assert isinstance(old_child, ListItem)\n old_child.highlighted = False\n\n if self._is_valid_index(new_index) and not self._nodes[new_index].disabled:\n new_child = self._nodes[new_index]\n assert isinstance(new_child, ListItem)\n new_child.highlighted = True\n self._scroll_highlighted_region()\n self.post_message(self.Highlighted(self, new_child))\n else:\n self.post_message(self.Highlighted(self, None))\n\n def extend(self, items: Iterable[ListItem]) -> AwaitMount:\n \"\"\"Append multiple new ListItems to the end of the ListView.\n\n Args:\n items: The ListItems to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child items.\n \"\"\"\n await_mount = self.mount(*items)\n if len(self) == 1:\n self.index = 0\n return await_mount\n\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n\n Args:\n item: The ListItem to append.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n return self.extend([item])\n\n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n\n Returns:\n An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n \"\"\"\n await_remove = self.query(\"ListView > ListItem\").remove()\n self.index = None\n return await_remove\n\n def insert(self, index: int, items: Iterable[ListItem]) -> AwaitMount:\n \"\"\"Insert new ListItem(s) to specified index.\n\n Args:\n index: index to insert new ListItem.\n items: The ListItems to insert.\n\n Returns:\n An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n await_mount = self.mount(*items, before=index)\n return await_mount\n\n def pop(self, index: Optional[int] = None) -> AwaitRemove:\n \"\"\"Remove last ListItem from ListView or\n Remove ListItem from ListView by index\n\n Args:\n index: index of ListItem to remove from ListView\n\n Returns:\n An awaitable that yields control to the event loop until\n the DOM has been updated to reflect item being removed.\n \"\"\"\n if index is None:\n await_remove = self.query(\"ListItem\").last().remove()\n else:\n await_remove = self.query(\"ListItem\")[index].remove()\n return await_remove\n\n def remove_items(self, indices: Iterable[int]) -> AwaitRemove:\n \"\"\"Remove ListItems from ListView by indices\n\n Args:\n indices: index(s) of ListItems to remove from ListView\n\n Returns:\n An awaitable object that waits for the direct children to be removed.\n \"\"\"\n items = self.query(\"ListItem\")\n items_to_remove = []\n for index in indices:\n items_to_remove.append(items[index])\n\n await_remove = self.app._remove_nodes(items_to_remove, self)\n return await_remove\n\n def action_select_cursor(self) -> None:\n \"\"\"Select the current item in the list.\"\"\"\n selected_child = self.highlighted_child\n if selected_child is None:\n return\n self.post_message(self.Selected(self, selected_child))\n\n def action_cursor_down(self) -> None:\n \"\"\"Highlight the next item in the list.\"\"\"\n candidate = _widget_navigation.find_next_enabled(\n self._nodes,\n anchor=self.index,\n direction=1,\n )\n if self.index is not None and candidate is not None and candidate < self.index:\n return # Avoid wrapping around.\n\n self.index = candidate\n\n def action_cursor_up(self) -> None:\n \"\"\"Highlight the previous item in the list.\"\"\"\n candidate = _widget_navigation.find_next_enabled(\n self._nodes,\n anchor=self.index,\n direction=-1,\n )\n if self.index is not None and candidate is not None and candidate > self.index:\n return # Avoid wrapping around.\n\n self.index = candidate\n\n def _on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:\n event.stop()\n self.focus()\n self.index = self._nodes.index(event.item)\n self.post_message(self.Selected(self, event.item))\n\n def _scroll_highlighted_region(self) -> None:\n \"\"\"Used to keep the highlighted index within vision\"\"\"\n if self.highlighted_child is not None:\n self.call_after_refresh(\n self.scroll_to_widget, self.highlighted_child, animate=False\n )\n\n def __len__(self) -> int:\n \"\"\"Compute the length (in number of items) of the list view.\"\"\"\n return len(self._nodes)\n", "path": "src/textual/widgets/_list_view.py"}]}
| 3,655 | 161 |
gh_patches_debug_12655
|
rasdani/github-patches
|
git_diff
|
deis__deis-3535
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with `deis certs:remove`
Getting the following error when trying to remove a cert.
```
$ deis certs:remove '*.brandfolder.com'
Removing *.brandfolder.com... 405 METHOD NOT ALLOWED
Detail:
Method 'DELETE' not allowed.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/api/urls.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.conf.urls import include, patterns, url
5
6 from api import routers, views
7
8
9 router = routers.ApiRouter()
10
11 # Add the generated REST URLs and login/logout endpoint
12 urlpatterns = patterns(
13 '',
14 url(r'^', include(router.urls)),
15 # application release components
16 url(r'^apps/(?P<id>{})/config/?'.format(settings.APP_URL_REGEX),
17 views.ConfigViewSet.as_view({'get': 'retrieve', 'post': 'create'})),
18 url(r'^apps/(?P<id>{})/builds/(?P<uuid>[-_\w]+)/?'.format(settings.APP_URL_REGEX),
19 views.BuildViewSet.as_view({'get': 'retrieve'})),
20 url(r'^apps/(?P<id>{})/builds/?'.format(settings.APP_URL_REGEX),
21 views.BuildViewSet.as_view({'get': 'list', 'post': 'create'})),
22 url(r'^apps/(?P<id>{})/releases/v(?P<version>[0-9]+)/?'.format(settings.APP_URL_REGEX),
23 views.ReleaseViewSet.as_view({'get': 'retrieve'})),
24 url(r'^apps/(?P<id>{})/releases/rollback/?'.format(settings.APP_URL_REGEX),
25 views.ReleaseViewSet.as_view({'post': 'rollback'})),
26 url(r'^apps/(?P<id>{})/releases/?'.format(settings.APP_URL_REGEX),
27 views.ReleaseViewSet.as_view({'get': 'list'})),
28 # application infrastructure
29 url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\w]+)/(?P<num>[-_\w]+)/?'.format(
30 settings.APP_URL_REGEX),
31 views.ContainerViewSet.as_view({'get': 'retrieve'})),
32 url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\w.]+)/?'.format(settings.APP_URL_REGEX),
33 views.ContainerViewSet.as_view({'get': 'list'})),
34 url(r'^apps/(?P<id>{})/containers/?'.format(settings.APP_URL_REGEX),
35 views.ContainerViewSet.as_view({'get': 'list'})),
36 # application domains
37 url(r'^apps/(?P<id>{})/domains/(?P<domain>[-\._\w]+)/?'.format(settings.APP_URL_REGEX),
38 views.DomainViewSet.as_view({'delete': 'destroy'})),
39 url(r'^apps/(?P<id>{})/domains/?'.format(settings.APP_URL_REGEX),
40 views.DomainViewSet.as_view({'post': 'create', 'get': 'list'})),
41 # application actions
42 url(r'^apps/(?P<id>{})/scale/?'.format(settings.APP_URL_REGEX),
43 views.AppViewSet.as_view({'post': 'scale'})),
44 url(r'^apps/(?P<id>{})/logs/?'.format(settings.APP_URL_REGEX),
45 views.AppViewSet.as_view({'get': 'logs'})),
46 url(r'^apps/(?P<id>{})/run/?'.format(settings.APP_URL_REGEX),
47 views.AppViewSet.as_view({'post': 'run'})),
48 # apps sharing
49 url(r'^apps/(?P<id>{})/perms/(?P<username>[-_\w]+)/?'.format(settings.APP_URL_REGEX),
50 views.AppPermsViewSet.as_view({'delete': 'destroy'})),
51 url(r'^apps/(?P<id>{})/perms/?'.format(settings.APP_URL_REGEX),
52 views.AppPermsViewSet.as_view({'get': 'list', 'post': 'create'})),
53 # apps base endpoint
54 url(r'^apps/(?P<id>{})/?'.format(settings.APP_URL_REGEX),
55 views.AppViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),
56 url(r'^apps/?',
57 views.AppViewSet.as_view({'get': 'list', 'post': 'create'})),
58 # key
59 url(r'^keys/(?P<id>.+)/?',
60 views.KeyViewSet.as_view({
61 'get': 'retrieve', 'delete': 'destroy'})),
62 url(r'^keys/?',
63 views.KeyViewSet.as_view({'get': 'list', 'post': 'create'})),
64 # hooks
65 url(r'^hooks/push/?',
66 views.PushHookViewSet.as_view({'post': 'create'})),
67 url(r'^hooks/build/?',
68 views.BuildHookViewSet.as_view({'post': 'create'})),
69 url(r'^hooks/config/?',
70 views.ConfigHookViewSet.as_view({'post': 'create'})),
71 # authn / authz
72 url(r'^auth/register/?',
73 views.UserRegistrationViewSet.as_view({'post': 'create'})),
74 url(r'^auth/cancel/?',
75 views.UserManagementViewSet.as_view({'delete': 'destroy'})),
76 url(r'^auth/passwd/?',
77 views.UserManagementViewSet.as_view({'post': 'passwd'})),
78 url(r'^auth/login/',
79 'rest_framework.authtoken.views.obtain_auth_token'),
80 # admin sharing
81 url(r'^admin/perms/(?P<username>[-_\w]+)/?',
82 views.AdminPermsViewSet.as_view({'delete': 'destroy'})),
83 url(r'^admin/perms/?',
84 views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})),
85 url(r'^certs/(?P<common_name>[-_.\w]+)/?'.format(settings.APP_URL_REGEX),
86 views.CertificateViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),
87 url(r'^certs/?',
88 views.CertificateViewSet.as_view({'get': 'list', 'post': 'create'})),
89 # list users
90 url(r'^users/', views.UserView.as_view({'get': 'list'})),
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/controller/api/urls.py b/controller/api/urls.py
--- a/controller/api/urls.py
+++ b/controller/api/urls.py
@@ -82,7 +82,7 @@
views.AdminPermsViewSet.as_view({'delete': 'destroy'})),
url(r'^admin/perms/?',
views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})),
- url(r'^certs/(?P<common_name>[-_.\w]+)/?'.format(settings.APP_URL_REGEX),
+ url(r'^certs/(?P<common_name>[-_*.\w]+)/?'.format(settings.APP_URL_REGEX),
views.CertificateViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),
url(r'^certs/?',
views.CertificateViewSet.as_view({'get': 'list', 'post': 'create'})),
|
{"golden_diff": "diff --git a/controller/api/urls.py b/controller/api/urls.py\n--- a/controller/api/urls.py\n+++ b/controller/api/urls.py\n@@ -82,7 +82,7 @@\n views.AdminPermsViewSet.as_view({'delete': 'destroy'})),\n url(r'^admin/perms/?',\n views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})),\n- url(r'^certs/(?P<common_name>[-_.\\w]+)/?'.format(settings.APP_URL_REGEX),\n+ url(r'^certs/(?P<common_name>[-_*.\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.CertificateViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^certs/?',\n views.CertificateViewSet.as_view({'get': 'list', 'post': 'create'})),\n", "issue": "Error with `deis certs:remove`\nGetting the following error when trying to remove a cert.\n\n```\n$ deis certs:remove '*.brandfolder.com'\nRemoving *.brandfolder.com... 405 METHOD NOT ALLOWED\nDetail:\nMethod 'DELETE' not allowed.\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import include, patterns, url\n\nfrom api import routers, views\n\n\nrouter = routers.ApiRouter()\n\n# Add the generated REST URLs and login/logout endpoint\nurlpatterns = patterns(\n '',\n url(r'^', include(router.urls)),\n # application release components\n url(r'^apps/(?P<id>{})/config/?'.format(settings.APP_URL_REGEX),\n views.ConfigViewSet.as_view({'get': 'retrieve', 'post': 'create'})),\n url(r'^apps/(?P<id>{})/builds/(?P<uuid>[-_\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.BuildViewSet.as_view({'get': 'retrieve'})),\n url(r'^apps/(?P<id>{})/builds/?'.format(settings.APP_URL_REGEX),\n views.BuildViewSet.as_view({'get': 'list', 'post': 'create'})),\n url(r'^apps/(?P<id>{})/releases/v(?P<version>[0-9]+)/?'.format(settings.APP_URL_REGEX),\n views.ReleaseViewSet.as_view({'get': 'retrieve'})),\n url(r'^apps/(?P<id>{})/releases/rollback/?'.format(settings.APP_URL_REGEX),\n views.ReleaseViewSet.as_view({'post': 'rollback'})),\n url(r'^apps/(?P<id>{})/releases/?'.format(settings.APP_URL_REGEX),\n views.ReleaseViewSet.as_view({'get': 'list'})),\n # application infrastructure\n url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\\w]+)/(?P<num>[-_\\w]+)/?'.format(\n settings.APP_URL_REGEX),\n views.ContainerViewSet.as_view({'get': 'retrieve'})),\n url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\\w.]+)/?'.format(settings.APP_URL_REGEX),\n views.ContainerViewSet.as_view({'get': 'list'})),\n url(r'^apps/(?P<id>{})/containers/?'.format(settings.APP_URL_REGEX),\n views.ContainerViewSet.as_view({'get': 'list'})),\n # application domains\n url(r'^apps/(?P<id>{})/domains/(?P<domain>[-\\._\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.DomainViewSet.as_view({'delete': 'destroy'})),\n url(r'^apps/(?P<id>{})/domains/?'.format(settings.APP_URL_REGEX),\n views.DomainViewSet.as_view({'post': 'create', 'get': 'list'})),\n # application actions\n url(r'^apps/(?P<id>{})/scale/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'post': 'scale'})),\n url(r'^apps/(?P<id>{})/logs/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'get': 'logs'})),\n url(r'^apps/(?P<id>{})/run/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'post': 'run'})),\n # apps sharing\n url(r'^apps/(?P<id>{})/perms/(?P<username>[-_\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.AppPermsViewSet.as_view({'delete': 'destroy'})),\n url(r'^apps/(?P<id>{})/perms/?'.format(settings.APP_URL_REGEX),\n views.AppPermsViewSet.as_view({'get': 'list', 'post': 'create'})),\n # apps base endpoint\n url(r'^apps/(?P<id>{})/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^apps/?',\n views.AppViewSet.as_view({'get': 'list', 'post': 'create'})),\n # key\n url(r'^keys/(?P<id>.+)/?',\n views.KeyViewSet.as_view({\n 'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^keys/?',\n views.KeyViewSet.as_view({'get': 'list', 'post': 'create'})),\n # hooks\n url(r'^hooks/push/?',\n views.PushHookViewSet.as_view({'post': 'create'})),\n url(r'^hooks/build/?',\n views.BuildHookViewSet.as_view({'post': 'create'})),\n url(r'^hooks/config/?',\n views.ConfigHookViewSet.as_view({'post': 'create'})),\n # authn / authz\n url(r'^auth/register/?',\n views.UserRegistrationViewSet.as_view({'post': 'create'})),\n url(r'^auth/cancel/?',\n views.UserManagementViewSet.as_view({'delete': 'destroy'})),\n url(r'^auth/passwd/?',\n views.UserManagementViewSet.as_view({'post': 'passwd'})),\n url(r'^auth/login/',\n 'rest_framework.authtoken.views.obtain_auth_token'),\n # admin sharing\n url(r'^admin/perms/(?P<username>[-_\\w]+)/?',\n views.AdminPermsViewSet.as_view({'delete': 'destroy'})),\n url(r'^admin/perms/?',\n views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})),\n url(r'^certs/(?P<common_name>[-_.\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.CertificateViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^certs/?',\n views.CertificateViewSet.as_view({'get': 'list', 'post': 'create'})),\n # list users\n url(r'^users/', views.UserView.as_view({'get': 'list'})),\n)\n", "path": "controller/api/urls.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import include, patterns, url\n\nfrom api import routers, views\n\n\nrouter = routers.ApiRouter()\n\n# Add the generated REST URLs and login/logout endpoint\nurlpatterns = patterns(\n '',\n url(r'^', include(router.urls)),\n # application release components\n url(r'^apps/(?P<id>{})/config/?'.format(settings.APP_URL_REGEX),\n views.ConfigViewSet.as_view({'get': 'retrieve', 'post': 'create'})),\n url(r'^apps/(?P<id>{})/builds/(?P<uuid>[-_\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.BuildViewSet.as_view({'get': 'retrieve'})),\n url(r'^apps/(?P<id>{})/builds/?'.format(settings.APP_URL_REGEX),\n views.BuildViewSet.as_view({'get': 'list', 'post': 'create'})),\n url(r'^apps/(?P<id>{})/releases/v(?P<version>[0-9]+)/?'.format(settings.APP_URL_REGEX),\n views.ReleaseViewSet.as_view({'get': 'retrieve'})),\n url(r'^apps/(?P<id>{})/releases/rollback/?'.format(settings.APP_URL_REGEX),\n views.ReleaseViewSet.as_view({'post': 'rollback'})),\n url(r'^apps/(?P<id>{})/releases/?'.format(settings.APP_URL_REGEX),\n views.ReleaseViewSet.as_view({'get': 'list'})),\n # application infrastructure\n url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\\w]+)/(?P<num>[-_\\w]+)/?'.format(\n settings.APP_URL_REGEX),\n views.ContainerViewSet.as_view({'get': 'retrieve'})),\n url(r'^apps/(?P<id>{})/containers/(?P<type>[-_\\w.]+)/?'.format(settings.APP_URL_REGEX),\n views.ContainerViewSet.as_view({'get': 'list'})),\n url(r'^apps/(?P<id>{})/containers/?'.format(settings.APP_URL_REGEX),\n views.ContainerViewSet.as_view({'get': 'list'})),\n # application domains\n url(r'^apps/(?P<id>{})/domains/(?P<domain>[-\\._\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.DomainViewSet.as_view({'delete': 'destroy'})),\n url(r'^apps/(?P<id>{})/domains/?'.format(settings.APP_URL_REGEX),\n views.DomainViewSet.as_view({'post': 'create', 'get': 'list'})),\n # application actions\n url(r'^apps/(?P<id>{})/scale/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'post': 'scale'})),\n url(r'^apps/(?P<id>{})/logs/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'get': 'logs'})),\n url(r'^apps/(?P<id>{})/run/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'post': 'run'})),\n # apps sharing\n url(r'^apps/(?P<id>{})/perms/(?P<username>[-_\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.AppPermsViewSet.as_view({'delete': 'destroy'})),\n url(r'^apps/(?P<id>{})/perms/?'.format(settings.APP_URL_REGEX),\n views.AppPermsViewSet.as_view({'get': 'list', 'post': 'create'})),\n # apps base endpoint\n url(r'^apps/(?P<id>{})/?'.format(settings.APP_URL_REGEX),\n views.AppViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^apps/?',\n views.AppViewSet.as_view({'get': 'list', 'post': 'create'})),\n # key\n url(r'^keys/(?P<id>.+)/?',\n views.KeyViewSet.as_view({\n 'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^keys/?',\n views.KeyViewSet.as_view({'get': 'list', 'post': 'create'})),\n # hooks\n url(r'^hooks/push/?',\n views.PushHookViewSet.as_view({'post': 'create'})),\n url(r'^hooks/build/?',\n views.BuildHookViewSet.as_view({'post': 'create'})),\n url(r'^hooks/config/?',\n views.ConfigHookViewSet.as_view({'post': 'create'})),\n # authn / authz\n url(r'^auth/register/?',\n views.UserRegistrationViewSet.as_view({'post': 'create'})),\n url(r'^auth/cancel/?',\n views.UserManagementViewSet.as_view({'delete': 'destroy'})),\n url(r'^auth/passwd/?',\n views.UserManagementViewSet.as_view({'post': 'passwd'})),\n url(r'^auth/login/',\n 'rest_framework.authtoken.views.obtain_auth_token'),\n # admin sharing\n url(r'^admin/perms/(?P<username>[-_\\w]+)/?',\n views.AdminPermsViewSet.as_view({'delete': 'destroy'})),\n url(r'^admin/perms/?',\n views.AdminPermsViewSet.as_view({'get': 'list', 'post': 'create'})),\n url(r'^certs/(?P<common_name>[-_*.\\w]+)/?'.format(settings.APP_URL_REGEX),\n views.CertificateViewSet.as_view({'get': 'retrieve', 'delete': 'destroy'})),\n url(r'^certs/?',\n views.CertificateViewSet.as_view({'get': 'list', 'post': 'create'})),\n # list users\n url(r'^users/', views.UserView.as_view({'get': 'list'})),\n)\n", "path": "controller/api/urls.py"}]}
| 1,699 | 189 |
gh_patches_debug_11545
|
rasdani/github-patches
|
git_diff
|
lisa-lab__pylearn2-1300
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test_video.test_spatiotemporal_cubes is randomly failing
The test `utils.tests.test_video.test_spatiotemporal_cubes` seems to be randomly failing on some Travis builds. Both @jych and @daemonmaker have experienced this. From [comments in the code](https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/utils/tests/test_video.py#L27-L28) it seems like this is not the first time. Was a reason ever discovered?
test_video.test_spatiotemporal_cubes is randomly failing
The test `utils.tests.test_video.test_spatiotemporal_cubes` seems to be randomly failing on some Travis builds. Both @jych and @daemonmaker have experienced this. From [comments in the code](https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/utils/tests/test_video.py#L27-L28) it seems like this is not the first time. Was a reason ever discovered?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pylearn2/utils/video.py`
Content:
```
1 """
2 Utilities for working with videos, pulling out patches, etc.
3 """
4 import numpy
5
6 from pylearn2.compat import OrderedDict
7 from pylearn2.utils.rng import make_np_rng
8
9 __author__ = "David Warde-Farley"
10 __copyright__ = "Copyright 2011, David Warde-Farley / Universite de Montreal"
11 __license__ = "BSD"
12 __maintainer__ = "David Warde-Farley"
13 __email__ = "wardefar@iro"
14 __all__ = ["get_video_dims", "spatiotemporal_cubes"]
15
16
17 def get_video_dims(fname):
18 """
19 Pull out the frame length, spatial height and spatial width of
20 a video file using ffmpeg.
21
22 Parameters
23 ----------
24 fname : str
25 Path to video file to be inspected.
26
27 Returns
28 -------
29 shape : tuple
30 The spatiotemporal dimensions of the video
31 (length, height, width).
32 """
33 try:
34 import pyffmpeg
35 except ImportError:
36 raise ImportError("This function requires pyffmpeg "
37 "<http://code.google.com/p/pyffmpeg/>")
38 mp = pyffmpeg.FFMpegReader()
39 try:
40 mp.open(fname)
41 tracks = mp.get_tracks()
42 for track in tracks:
43 if isinstance(track, pyffmpeg.VideoTrack):
44 break
45 else:
46 raise ValueError('no video track found')
47 return (track.duration(),) + track.get_orig_size()
48 finally:
49 mp.close()
50
51
52 class FrameLookup(object):
53 """
54 Class encapsulating the logic of turning a frame index into a
55 collection of files into the frame index of a specific video file.
56
57 Item-indexing on this object will yield a (filename, nframes, frame_no)
58 tuple, where nframes is the number of frames in the given file
59 (mainly for checking that we're far enough from the end so that we
60 can sample a big enough chunk).
61
62 Parameters
63 ----------
64 names_ang_lengths : WRITEME
65 """
66 def __init__(self, names_and_lengths):
67 self.files, self.lengths = zip(*names_and_lengths)
68 self.terminals = numpy.cumsum([s[1] for s in names_and_lengths])
69
70 def __getitem__(self, i):
71 idx = (i < self.terminals).nonzero()[0][0]
72 frame_no = i
73 if idx > 0:
74 frame_no -= self.terminals[idx - 1]
75 return self.files[idx], self.lengths[idx], frame_no
76
77 def __len__(self):
78 return self.terminals[-1]
79
80 def __iter__(self):
81 raise TypeError('iteration not supported')
82
83
84 def spatiotemporal_cubes(file_tuples, shape, n_patches=numpy.inf, rng=None):
85 """
86 Generator function that yields a stream of (filename, slicetuple)
87 representing a spatiotemporal patch of that file.
88
89 Parameters
90 ----------
91 file_tuples : list of tuples
92 Each element should be a 2-tuple consisting of a filename
93 (or arbitrary identifier) and a (length, height, width)
94 shape tuple of the dimensions (number of frames in the video,
95 height and width of each frame).
96
97 shape : tuple
98 A shape tuple consisting of the desired (length, height, width)
99 of each spatiotemporal patch.
100
101 n_patches : int, optional
102 The number of patches to generate. By default, generates patches
103 infinitely.
104
105 rng : RandomState object or seed, optional
106 The random number generator (or seed) to use. Defaults to None,
107 meaning it will be seeded from /dev/urandom or the clock.
108
109 Returns
110 -------
111 generator : generator object
112 A generator that yields a stream of (filename, slicetuple) tuples.
113 The slice tuple is such that it indexes into a 3D array containing
114 the entire clip with frames indexed along the first axis, rows
115 along the second and columns along the third.
116 """
117 frame_lookup = FrameLookup([(a, b[0]) for a, b in file_tuples])
118 file_lookup = OrderedDict(file_tuples)
119 patch_length, patch_height, patch_width = shape
120 done = 0
121 rng = make_np_rng(rng, which_method="random_integers")
122 while done < n_patches:
123 frame = rng.random_integers(0, len(frame_lookup) - 1)
124 filename, file_length, frame_no = frame_lookup[frame]
125 # Check that there is a contiguous block of frames starting at
126 # frame_no that is at least as long as our desired cube length.
127 if file_length - frame_no < patch_length:
128 continue
129 _, video_height, video_width = file_lookup[filename][:3]
130 # The last row and column in which a patch could "start" to still
131 # fall within frame.
132 last_row = video_height - patch_height
133 last_col = video_width - patch_width
134 row = numpy.random.random_integers(0, last_row)
135 col = numpy.random.random_integers(0, last_col)
136 patch_slice = (slice(frame_no, frame_no + patch_length),
137 slice(row, row + patch_height),
138 slice(col, col + patch_width))
139 done += 1
140 yield filename, patch_slice
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pylearn2/utils/video.py b/pylearn2/utils/video.py
--- a/pylearn2/utils/video.py
+++ b/pylearn2/utils/video.py
@@ -131,8 +131,8 @@
# fall within frame.
last_row = video_height - patch_height
last_col = video_width - patch_width
- row = numpy.random.random_integers(0, last_row)
- col = numpy.random.random_integers(0, last_col)
+ row = rng.random_integers(0, last_row)
+ col = rng.random_integers(0, last_col)
patch_slice = (slice(frame_no, frame_no + patch_length),
slice(row, row + patch_height),
slice(col, col + patch_width))
|
{"golden_diff": "diff --git a/pylearn2/utils/video.py b/pylearn2/utils/video.py\n--- a/pylearn2/utils/video.py\n+++ b/pylearn2/utils/video.py\n@@ -131,8 +131,8 @@\n # fall within frame.\n last_row = video_height - patch_height\n last_col = video_width - patch_width\n- row = numpy.random.random_integers(0, last_row)\n- col = numpy.random.random_integers(0, last_col)\n+ row = rng.random_integers(0, last_row)\n+ col = rng.random_integers(0, last_col)\n patch_slice = (slice(frame_no, frame_no + patch_length),\n slice(row, row + patch_height),\n slice(col, col + patch_width))\n", "issue": "test_video.test_spatiotemporal_cubes is randomly failing\nThe test `utils.tests.test_video.test_spatiotemporal_cubes` seems to be randomly failing on some Travis builds. Both @jych and @daemonmaker have experienced this. From [comments in the code](https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/utils/tests/test_video.py#L27-L28) it seems like this is not the first time. Was a reason ever discovered?\n\ntest_video.test_spatiotemporal_cubes is randomly failing\nThe test `utils.tests.test_video.test_spatiotemporal_cubes` seems to be randomly failing on some Travis builds. Both @jych and @daemonmaker have experienced this. From [comments in the code](https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/utils/tests/test_video.py#L27-L28) it seems like this is not the first time. Was a reason ever discovered?\n\n", "before_files": [{"content": "\"\"\"\nUtilities for working with videos, pulling out patches, etc.\n\"\"\"\nimport numpy\n\nfrom pylearn2.compat import OrderedDict\nfrom pylearn2.utils.rng import make_np_rng\n\n__author__ = \"David Warde-Farley\"\n__copyright__ = \"Copyright 2011, David Warde-Farley / Universite de Montreal\"\n__license__ = \"BSD\"\n__maintainer__ = \"David Warde-Farley\"\n__email__ = \"wardefar@iro\"\n__all__ = [\"get_video_dims\", \"spatiotemporal_cubes\"]\n\n\ndef get_video_dims(fname):\n \"\"\"\n Pull out the frame length, spatial height and spatial width of\n a video file using ffmpeg.\n\n Parameters\n ----------\n fname : str\n Path to video file to be inspected.\n\n Returns\n -------\n shape : tuple\n The spatiotemporal dimensions of the video\n (length, height, width).\n \"\"\"\n try:\n import pyffmpeg\n except ImportError:\n raise ImportError(\"This function requires pyffmpeg \"\n \"<http://code.google.com/p/pyffmpeg/>\")\n mp = pyffmpeg.FFMpegReader()\n try:\n mp.open(fname)\n tracks = mp.get_tracks()\n for track in tracks:\n if isinstance(track, pyffmpeg.VideoTrack):\n break\n else:\n raise ValueError('no video track found')\n return (track.duration(),) + track.get_orig_size()\n finally:\n mp.close()\n\n\nclass FrameLookup(object):\n \"\"\"\n Class encapsulating the logic of turning a frame index into a\n collection of files into the frame index of a specific video file.\n\n Item-indexing on this object will yield a (filename, nframes, frame_no)\n tuple, where nframes is the number of frames in the given file\n (mainly for checking that we're far enough from the end so that we\n can sample a big enough chunk).\n\n Parameters\n ----------\n names_ang_lengths : WRITEME\n \"\"\"\n def __init__(self, names_and_lengths):\n self.files, self.lengths = zip(*names_and_lengths)\n self.terminals = numpy.cumsum([s[1] for s in names_and_lengths])\n\n def __getitem__(self, i):\n idx = (i < self.terminals).nonzero()[0][0]\n frame_no = i\n if idx > 0:\n frame_no -= self.terminals[idx - 1]\n return self.files[idx], self.lengths[idx], frame_no\n\n def __len__(self):\n return self.terminals[-1]\n\n def __iter__(self):\n raise TypeError('iteration not supported')\n\n\ndef spatiotemporal_cubes(file_tuples, shape, n_patches=numpy.inf, rng=None):\n \"\"\"\n Generator function that yields a stream of (filename, slicetuple)\n representing a spatiotemporal patch of that file.\n\n Parameters\n ----------\n file_tuples : list of tuples\n Each element should be a 2-tuple consisting of a filename\n (or arbitrary identifier) and a (length, height, width)\n shape tuple of the dimensions (number of frames in the video,\n height and width of each frame).\n\n shape : tuple\n A shape tuple consisting of the desired (length, height, width)\n of each spatiotemporal patch.\n\n n_patches : int, optional\n The number of patches to generate. By default, generates patches\n infinitely.\n\n rng : RandomState object or seed, optional\n The random number generator (or seed) to use. Defaults to None,\n meaning it will be seeded from /dev/urandom or the clock.\n\n Returns\n -------\n generator : generator object\n A generator that yields a stream of (filename, slicetuple) tuples.\n The slice tuple is such that it indexes into a 3D array containing\n the entire clip with frames indexed along the first axis, rows\n along the second and columns along the third.\n \"\"\"\n frame_lookup = FrameLookup([(a, b[0]) for a, b in file_tuples])\n file_lookup = OrderedDict(file_tuples)\n patch_length, patch_height, patch_width = shape\n done = 0\n rng = make_np_rng(rng, which_method=\"random_integers\")\n while done < n_patches:\n frame = rng.random_integers(0, len(frame_lookup) - 1)\n filename, file_length, frame_no = frame_lookup[frame]\n # Check that there is a contiguous block of frames starting at\n # frame_no that is at least as long as our desired cube length.\n if file_length - frame_no < patch_length:\n continue\n _, video_height, video_width = file_lookup[filename][:3]\n # The last row and column in which a patch could \"start\" to still\n # fall within frame.\n last_row = video_height - patch_height\n last_col = video_width - patch_width\n row = numpy.random.random_integers(0, last_row)\n col = numpy.random.random_integers(0, last_col)\n patch_slice = (slice(frame_no, frame_no + patch_length),\n slice(row, row + patch_height),\n slice(col, col + patch_width))\n done += 1\n yield filename, patch_slice\n", "path": "pylearn2/utils/video.py"}], "after_files": [{"content": "\"\"\"\nUtilities for working with videos, pulling out patches, etc.\n\"\"\"\nimport numpy\n\nfrom pylearn2.compat import OrderedDict\nfrom pylearn2.utils.rng import make_np_rng\n\n__author__ = \"David Warde-Farley\"\n__copyright__ = \"Copyright 2011, David Warde-Farley / Universite de Montreal\"\n__license__ = \"BSD\"\n__maintainer__ = \"David Warde-Farley\"\n__email__ = \"wardefar@iro\"\n__all__ = [\"get_video_dims\", \"spatiotemporal_cubes\"]\n\n\ndef get_video_dims(fname):\n \"\"\"\n Pull out the frame length, spatial height and spatial width of\n a video file using ffmpeg.\n\n Parameters\n ----------\n fname : str\n Path to video file to be inspected.\n\n Returns\n -------\n shape : tuple\n The spatiotemporal dimensions of the video\n (length, height, width).\n \"\"\"\n try:\n import pyffmpeg\n except ImportError:\n raise ImportError(\"This function requires pyffmpeg \"\n \"<http://code.google.com/p/pyffmpeg/>\")\n mp = pyffmpeg.FFMpegReader()\n try:\n mp.open(fname)\n tracks = mp.get_tracks()\n for track in tracks:\n if isinstance(track, pyffmpeg.VideoTrack):\n break\n else:\n raise ValueError('no video track found')\n return (track.duration(),) + track.get_orig_size()\n finally:\n mp.close()\n\n\nclass FrameLookup(object):\n \"\"\"\n Class encapsulating the logic of turning a frame index into a\n collection of files into the frame index of a specific video file.\n\n Item-indexing on this object will yield a (filename, nframes, frame_no)\n tuple, where nframes is the number of frames in the given file\n (mainly for checking that we're far enough from the end so that we\n can sample a big enough chunk).\n\n Parameters\n ----------\n names_ang_lengths : WRITEME\n \"\"\"\n def __init__(self, names_and_lengths):\n self.files, self.lengths = zip(*names_and_lengths)\n self.terminals = numpy.cumsum([s[1] for s in names_and_lengths])\n\n def __getitem__(self, i):\n idx = (i < self.terminals).nonzero()[0][0]\n frame_no = i\n if idx > 0:\n frame_no -= self.terminals[idx - 1]\n return self.files[idx], self.lengths[idx], frame_no\n\n def __len__(self):\n return self.terminals[-1]\n\n def __iter__(self):\n raise TypeError('iteration not supported')\n\n\ndef spatiotemporal_cubes(file_tuples, shape, n_patches=numpy.inf, rng=None):\n \"\"\"\n Generator function that yields a stream of (filename, slicetuple)\n representing a spatiotemporal patch of that file.\n\n Parameters\n ----------\n file_tuples : list of tuples\n Each element should be a 2-tuple consisting of a filename\n (or arbitrary identifier) and a (length, height, width)\n shape tuple of the dimensions (number of frames in the video,\n height and width of each frame).\n\n shape : tuple\n A shape tuple consisting of the desired (length, height, width)\n of each spatiotemporal patch.\n\n n_patches : int, optional\n The number of patches to generate. By default, generates patches\n infinitely.\n\n rng : RandomState object or seed, optional\n The random number generator (or seed) to use. Defaults to None,\n meaning it will be seeded from /dev/urandom or the clock.\n\n Returns\n -------\n generator : generator object\n A generator that yields a stream of (filename, slicetuple) tuples.\n The slice tuple is such that it indexes into a 3D array containing\n the entire clip with frames indexed along the first axis, rows\n along the second and columns along the third.\n \"\"\"\n frame_lookup = FrameLookup([(a, b[0]) for a, b in file_tuples])\n file_lookup = OrderedDict(file_tuples)\n patch_length, patch_height, patch_width = shape\n done = 0\n rng = make_np_rng(rng, which_method=\"random_integers\")\n while done < n_patches:\n frame = rng.random_integers(0, len(frame_lookup) - 1)\n filename, file_length, frame_no = frame_lookup[frame]\n # Check that there is a contiguous block of frames starting at\n # frame_no that is at least as long as our desired cube length.\n if file_length - frame_no < patch_length:\n continue\n _, video_height, video_width = file_lookup[filename][:3]\n # The last row and column in which a patch could \"start\" to still\n # fall within frame.\n last_row = video_height - patch_height\n last_col = video_width - patch_width\n row = rng.random_integers(0, last_row)\n col = rng.random_integers(0, last_col)\n patch_slice = (slice(frame_no, frame_no + patch_length),\n slice(row, row + patch_height),\n slice(col, col + patch_width))\n done += 1\n yield filename, patch_slice\n", "path": "pylearn2/utils/video.py"}]}
| 1,937 | 171 |
gh_patches_debug_32647
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1257
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add statistics information for individual checks (especially time of execution)
I would love to have an option to add some statistics to be displayed for each check. This could be very useful for running pre-commit in CI. I had a case recently of a check that run accidentally a bit longer than expected (it was building unnecessary docker image) and I have not realised that it is taking longer - because we had other checks after and I could only see the total execution time.
Also some other useful information that we might see is how many parallel processes were run and how many files were passed as parameters. This might be really useful especially for people who do not understand that pre-commit runs in parallel by default - this can have some undesireable side effects if you forget to set "require_serial" to True when you need. And seeing that in the output of CI might immediately show that something is wrong. For now the output is a bit of "black-box".
An option to see some additional information (`--add-statistics`) might be super-useful.
Example output I imagine (maybe a bit better aligned):
```
Check if image build is needed...........................................Passed [23.5s, 1 process, no-files]
Add licence for all JINJA template files.................................Passed [2.25s, 8 processes, 48 files]
Check Shell scripts syntax correctness...................................Passed [6.00s, 8 processes, 10 files]
Lint dockerfile..........................................................Passed [2.20s, 3 processes, 3 files]
Run mypy.................................................................Passed [12.00s, 8 processes, 1456 files]
Run pylint for main sources.............................................Skipped
Run pylint for tests....................................................Skipped
Run flake8...............................................................Passed [24.05s, 8 processes, 1456 files]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/run.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import os
5 import re
6 import subprocess
7
8 from identify.identify import tags_from_path
9
10 from pre_commit import color
11 from pre_commit import git
12 from pre_commit import output
13 from pre_commit.clientlib import load_config
14 from pre_commit.output import get_hook_message
15 from pre_commit.repository import all_hooks
16 from pre_commit.repository import install_hook_envs
17 from pre_commit.staged_files_only import staged_files_only
18 from pre_commit.util import cmd_output_b
19 from pre_commit.util import noop_context
20
21
22 logger = logging.getLogger('pre_commit')
23
24
25 def filter_by_include_exclude(names, include, exclude):
26 include_re, exclude_re = re.compile(include), re.compile(exclude)
27 return [
28 filename for filename in names
29 if include_re.search(filename)
30 if not exclude_re.search(filename)
31 ]
32
33
34 class Classifier(object):
35 def __init__(self, filenames):
36 # on windows we normalize all filenames to use forward slashes
37 # this makes it easier to filter using the `files:` regex
38 # this also makes improperly quoted shell-based hooks work better
39 # see #1173
40 if os.altsep == '/' and os.sep == '\\':
41 filenames = (f.replace(os.sep, os.altsep) for f in filenames)
42 self.filenames = [f for f in filenames if os.path.lexists(f)]
43 self._types_cache = {}
44
45 def _types_for_file(self, filename):
46 try:
47 return self._types_cache[filename]
48 except KeyError:
49 ret = self._types_cache[filename] = tags_from_path(filename)
50 return ret
51
52 def by_types(self, names, types, exclude_types):
53 types, exclude_types = frozenset(types), frozenset(exclude_types)
54 ret = []
55 for filename in names:
56 tags = self._types_for_file(filename)
57 if tags >= types and not tags & exclude_types:
58 ret.append(filename)
59 return ret
60
61 def filenames_for_hook(self, hook):
62 names = self.filenames
63 names = filter_by_include_exclude(names, hook.files, hook.exclude)
64 names = self.by_types(names, hook.types, hook.exclude_types)
65 return names
66
67
68 def _get_skips(environ):
69 skips = environ.get('SKIP', '')
70 return {skip.strip() for skip in skips.split(',') if skip.strip()}
71
72
73 SKIPPED = 'Skipped'
74 NO_FILES = '(no files to check)'
75
76
77 def _subtle_line(s, use_color):
78 output.write_line(color.format_color(s, color.SUBTLE, use_color))
79
80
81 def _run_single_hook(classifier, hook, skips, cols, verbose, use_color):
82 filenames = classifier.filenames_for_hook(hook)
83
84 if hook.language == 'pcre':
85 logger.warning(
86 '`{}` (from {}) uses the deprecated pcre language.\n'
87 'The pcre language is scheduled for removal in pre-commit 2.x.\n'
88 'The pygrep language is a more portable (and usually drop-in) '
89 'replacement.'.format(hook.id, hook.src),
90 )
91
92 if hook.id in skips or hook.alias in skips:
93 output.write(
94 get_hook_message(
95 hook.name,
96 end_msg=SKIPPED,
97 end_color=color.YELLOW,
98 use_color=use_color,
99 cols=cols,
100 ),
101 )
102 retcode = 0
103 files_modified = False
104 out = b''
105 elif not filenames and not hook.always_run:
106 output.write(
107 get_hook_message(
108 hook.name,
109 postfix=NO_FILES,
110 end_msg=SKIPPED,
111 end_color=color.TURQUOISE,
112 use_color=use_color,
113 cols=cols,
114 ),
115 )
116 retcode = 0
117 files_modified = False
118 out = b''
119 else:
120 # print hook and dots first in case the hook takes a while to run
121 output.write(get_hook_message(hook.name, end_len=6, cols=cols))
122
123 diff_cmd = ('git', 'diff', '--no-ext-diff')
124 diff_before = cmd_output_b(*diff_cmd, retcode=None)
125 filenames = tuple(filenames) if hook.pass_filenames else ()
126 retcode, out = hook.run(filenames, use_color)
127 diff_after = cmd_output_b(*diff_cmd, retcode=None)
128
129 # if the hook makes changes, fail the commit
130 files_modified = diff_before != diff_after
131
132 if retcode or files_modified:
133 print_color = color.RED
134 status = 'Failed'
135 else:
136 print_color = color.GREEN
137 status = 'Passed'
138
139 output.write_line(color.format_color(status, print_color, use_color))
140
141 if verbose or hook.verbose or retcode or files_modified:
142 _subtle_line('- hook id: {}'.format(hook.id), use_color)
143
144 if retcode:
145 _subtle_line('- exit code: {}'.format(retcode), use_color)
146
147 # Print a message if failing due to file modifications
148 if files_modified:
149 _subtle_line('- files were modified by this hook', use_color)
150
151 if out.strip():
152 output.write_line()
153 output.write_line(out.strip(), logfile_name=hook.log_file)
154 output.write_line()
155
156 return files_modified or bool(retcode)
157
158
159 def _compute_cols(hooks):
160 """Compute the number of columns to display hook messages. The widest
161 that will be displayed is in the no files skipped case:
162
163 Hook name...(no files to check) Skipped
164 """
165 if hooks:
166 name_len = max(len(hook.name) for hook in hooks)
167 else:
168 name_len = 0
169
170 cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)
171 return max(cols, 80)
172
173
174 def _all_filenames(args):
175 if args.origin and args.source:
176 return git.get_changed_files(args.origin, args.source)
177 elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:
178 return (args.commit_msg_filename,)
179 elif args.files:
180 return args.files
181 elif args.all_files:
182 return git.get_all_files()
183 elif git.is_in_merge_conflict():
184 return git.get_conflicted_files()
185 else:
186 return git.get_staged_files()
187
188
189 def _run_hooks(config, hooks, args, environ):
190 """Actually run the hooks."""
191 skips = _get_skips(environ)
192 cols = _compute_cols(hooks)
193 filenames = _all_filenames(args)
194 filenames = filter_by_include_exclude(
195 filenames, config['files'], config['exclude'],
196 )
197 classifier = Classifier(filenames)
198 retval = 0
199 for hook in hooks:
200 retval |= _run_single_hook(
201 classifier, hook, skips, cols,
202 verbose=args.verbose, use_color=args.color,
203 )
204 if retval and config['fail_fast']:
205 break
206 if retval and args.show_diff_on_failure and git.has_diff():
207 if args.all_files:
208 output.write_line(
209 'pre-commit hook(s) made changes.\n'
210 'If you are seeing this message in CI, '
211 'reproduce locally with: `pre-commit run --all-files`.\n'
212 'To run `pre-commit` as part of git workflow, use '
213 '`pre-commit install`.',
214 )
215 output.write_line('All changes made by hooks:')
216 # args.color is a boolean.
217 # See user_color function in color.py
218 subprocess.call((
219 'git', '--no-pager', 'diff', '--no-ext-diff',
220 '--color={}'.format({True: 'always', False: 'never'}[args.color]),
221 ))
222
223 return retval
224
225
226 def _has_unmerged_paths():
227 _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')
228 return bool(stdout.strip())
229
230
231 def _has_unstaged_config(config_file):
232 retcode, _, _ = cmd_output_b(
233 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,
234 retcode=None,
235 )
236 # be explicit, other git errors don't mean it has an unstaged config.
237 return retcode == 1
238
239
240 def run(config_file, store, args, environ=os.environ):
241 no_stash = args.all_files or bool(args.files)
242
243 # Check if we have unresolved merge conflict files and fail fast.
244 if _has_unmerged_paths():
245 logger.error('Unmerged files. Resolve before committing.')
246 return 1
247 if bool(args.source) != bool(args.origin):
248 logger.error('Specify both --origin and --source.')
249 return 1
250 if _has_unstaged_config(config_file) and not no_stash:
251 logger.error(
252 'Your pre-commit configuration is unstaged.\n'
253 '`git add {}` to fix this.'.format(config_file),
254 )
255 return 1
256
257 # Expose origin / source as environment variables for hooks to consume
258 if args.origin and args.source:
259 environ['PRE_COMMIT_ORIGIN'] = args.origin
260 environ['PRE_COMMIT_SOURCE'] = args.source
261
262 if no_stash:
263 ctx = noop_context()
264 else:
265 ctx = staged_files_only(store.directory)
266
267 with ctx:
268 config = load_config(config_file)
269 hooks = [
270 hook
271 for hook in all_hooks(config, store)
272 if not args.hook or hook.id == args.hook or hook.alias == args.hook
273 if args.hook_stage in hook.stages
274 ]
275
276 if args.hook and not hooks:
277 output.write_line(
278 'No hook with id `{}` in stage `{}`'.format(
279 args.hook, args.hook_stage,
280 ),
281 )
282 return 1
283
284 install_hook_envs(hooks, store)
285
286 return _run_hooks(config, hooks, args, environ)
287
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py
--- a/pre_commit/commands/run.py
+++ b/pre_commit/commands/run.py
@@ -4,6 +4,7 @@
import os
import re
import subprocess
+import time
from identify.identify import tags_from_path
@@ -99,6 +100,7 @@
cols=cols,
),
)
+ duration = None
retcode = 0
files_modified = False
out = b''
@@ -113,6 +115,7 @@
cols=cols,
),
)
+ duration = None
retcode = 0
files_modified = False
out = b''
@@ -123,7 +126,9 @@
diff_cmd = ('git', 'diff', '--no-ext-diff')
diff_before = cmd_output_b(*diff_cmd, retcode=None)
filenames = tuple(filenames) if hook.pass_filenames else ()
+ time_before = time.time()
retcode, out = hook.run(filenames, use_color)
+ duration = round(time.time() - time_before, 2) or 0
diff_after = cmd_output_b(*diff_cmd, retcode=None)
# if the hook makes changes, fail the commit
@@ -141,6 +146,9 @@
if verbose or hook.verbose or retcode or files_modified:
_subtle_line('- hook id: {}'.format(hook.id), use_color)
+ if (verbose or hook.verbose) and duration is not None:
+ _subtle_line('- duration: {}s'.format(duration), use_color)
+
if retcode:
_subtle_line('- exit code: {}'.format(retcode), use_color)
|
{"golden_diff": "diff --git a/pre_commit/commands/run.py b/pre_commit/commands/run.py\n--- a/pre_commit/commands/run.py\n+++ b/pre_commit/commands/run.py\n@@ -4,6 +4,7 @@\n import os\n import re\n import subprocess\n+import time\n \n from identify.identify import tags_from_path\n \n@@ -99,6 +100,7 @@\n cols=cols,\n ),\n )\n+ duration = None\n retcode = 0\n files_modified = False\n out = b''\n@@ -113,6 +115,7 @@\n cols=cols,\n ),\n )\n+ duration = None\n retcode = 0\n files_modified = False\n out = b''\n@@ -123,7 +126,9 @@\n diff_cmd = ('git', 'diff', '--no-ext-diff')\n diff_before = cmd_output_b(*diff_cmd, retcode=None)\n filenames = tuple(filenames) if hook.pass_filenames else ()\n+ time_before = time.time()\n retcode, out = hook.run(filenames, use_color)\n+ duration = round(time.time() - time_before, 2) or 0\n diff_after = cmd_output_b(*diff_cmd, retcode=None)\n \n # if the hook makes changes, fail the commit\n@@ -141,6 +146,9 @@\n if verbose or hook.verbose or retcode or files_modified:\n _subtle_line('- hook id: {}'.format(hook.id), use_color)\n \n+ if (verbose or hook.verbose) and duration is not None:\n+ _subtle_line('- duration: {}s'.format(duration), use_color)\n+\n if retcode:\n _subtle_line('- exit code: {}'.format(retcode), use_color)\n", "issue": "Add statistics information for individual checks (especially time of execution)\nI would love to have an option to add some statistics to be displayed for each check. This could be very useful for running pre-commit in CI. I had a case recently of a check that run accidentally a bit longer than expected (it was building unnecessary docker image) and I have not realised that it is taking longer - because we had other checks after and I could only see the total execution time.\r\n\r\nAlso some other useful information that we might see is how many parallel processes were run and how many files were passed as parameters. This might be really useful especially for people who do not understand that pre-commit runs in parallel by default - this can have some undesireable side effects if you forget to set \"require_serial\" to True when you need. And seeing that in the output of CI might immediately show that something is wrong. For now the output is a bit of \"black-box\".\r\n\r\nAn option to see some additional information (`--add-statistics`) might be super-useful.\r\n\r\nExample output I imagine (maybe a bit better aligned):\r\n\r\n```\r\nCheck if image build is needed...........................................Passed [23.5s, 1 process, no-files] \r\nAdd licence for all JINJA template files.................................Passed [2.25s, 8 processes, 48 files]\r\nCheck Shell scripts syntax correctness...................................Passed [6.00s, 8 processes, 10 files]\r\nLint dockerfile..........................................................Passed [2.20s, 3 processes, 3 files]\r\nRun mypy.................................................................Passed [12.00s, 8 processes, 1456 files]\r\nRun pylint for main sources.............................................Skipped \r\nRun pylint for tests....................................................Skipped\r\nRun flake8...............................................................Passed [24.05s, 8 processes, 1456 files]\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport subprocess\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef filter_by_include_exclude(names, include, exclude):\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in names\n if include_re.search(filename)\n if not exclude_re.search(filename)\n ]\n\n\nclass Classifier(object):\n def __init__(self, filenames):\n # on windows we normalize all filenames to use forward slashes\n # this makes it easier to filter using the `files:` regex\n # this also makes improperly quoted shell-based hooks work better\n # see #1173\n if os.altsep == '/' and os.sep == '\\\\':\n filenames = (f.replace(os.sep, os.altsep) for f in filenames)\n self.filenames = [f for f in filenames if os.path.lexists(f)]\n self._types_cache = {}\n\n def _types_for_file(self, filename):\n try:\n return self._types_cache[filename]\n except KeyError:\n ret = self._types_cache[filename] = tags_from_path(filename)\n return ret\n\n def by_types(self, names, types, exclude_types):\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in names:\n tags = self._types_for_file(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return ret\n\n def filenames_for_hook(self, hook):\n names = self.filenames\n names = filter_by_include_exclude(names, hook.files, hook.exclude)\n names = self.by_types(names, hook.types, hook.exclude_types)\n return names\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _subtle_line(s, use_color):\n output.write_line(color.format_color(s, color.SUBTLE, use_color))\n\n\ndef _run_single_hook(classifier, hook, skips, cols, verbose, use_color):\n filenames = classifier.filenames_for_hook(hook)\n\n if hook.language == 'pcre':\n logger.warning(\n '`{}` (from {}) uses the deprecated pcre language.\\n'\n 'The pcre language is scheduled for removal in pre-commit 2.x.\\n'\n 'The pygrep language is a more portable (and usually drop-in) '\n 'replacement.'.format(hook.id, hook.src),\n )\n\n if hook.id in skips or hook.alias in skips:\n output.write(\n get_hook_message(\n hook.name,\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=use_color,\n cols=cols,\n ),\n )\n retcode = 0\n files_modified = False\n out = b''\n elif not filenames and not hook.always_run:\n output.write(\n get_hook_message(\n hook.name,\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=use_color,\n cols=cols,\n ),\n )\n retcode = 0\n files_modified = False\n out = b''\n else:\n # print hook and dots first in case the hook takes a while to run\n output.write(get_hook_message(hook.name, end_len=6, cols=cols))\n\n diff_cmd = ('git', 'diff', '--no-ext-diff')\n diff_before = cmd_output_b(*diff_cmd, retcode=None)\n filenames = tuple(filenames) if hook.pass_filenames else ()\n retcode, out = hook.run(filenames, use_color)\n diff_after = cmd_output_b(*diff_cmd, retcode=None)\n\n # if the hook makes changes, fail the commit\n files_modified = diff_before != diff_after\n\n if retcode or files_modified:\n print_color = color.RED\n status = 'Failed'\n else:\n print_color = color.GREEN\n status = 'Passed'\n\n output.write_line(color.format_color(status, print_color, use_color))\n\n if verbose or hook.verbose or retcode or files_modified:\n _subtle_line('- hook id: {}'.format(hook.id), use_color)\n\n if retcode:\n _subtle_line('- exit code: {}'.format(retcode), use_color)\n\n # Print a message if failing due to file modifications\n if files_modified:\n _subtle_line('- files were modified by this hook', use_color)\n\n if out.strip():\n output.write_line()\n output.write_line(out.strip(), logfile_name=hook.log_file)\n output.write_line()\n\n return files_modified or bool(retcode)\n\n\ndef _compute_cols(hooks):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(hook.name) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args):\n if args.origin and args.source:\n return git.get_changed_files(args.origin, args.source)\n elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:\n return (args.commit_msg_filename,)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(config, hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols(hooks)\n filenames = _all_filenames(args)\n filenames = filter_by_include_exclude(\n filenames, config['files'], config['exclude'],\n )\n classifier = Classifier(filenames)\n retval = 0\n for hook in hooks:\n retval |= _run_single_hook(\n classifier, hook, skips, cols,\n verbose=args.verbose, use_color=args.color,\n )\n if retval and config['fail_fast']:\n break\n if retval and args.show_diff_on_failure and git.has_diff():\n if args.all_files:\n output.write_line(\n 'pre-commit hook(s) made changes.\\n'\n 'If you are seeing this message in CI, '\n 'reproduce locally with: `pre-commit run --all-files`.\\n'\n 'To run `pre-commit` as part of git workflow, use '\n '`pre-commit install`.',\n )\n output.write_line('All changes made by hooks:')\n # args.color is a boolean.\n # See user_color function in color.py\n subprocess.call((\n 'git', '--no-pager', 'diff', '--no-ext-diff',\n '--color={}'.format({True: 'always', False: 'never'}[args.color]),\n ))\n\n return retval\n\n\ndef _has_unmerged_paths():\n _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(config_file):\n retcode, _, _ = cmd_output_b(\n 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(config_file, store, args, environ=os.environ):\n no_stash = args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(config_file) and not no_stash:\n logger.error(\n 'Your pre-commit configuration is unstaged.\\n'\n '`git add {}` to fix this.'.format(config_file),\n )\n return 1\n\n # Expose origin / source as environment variables for hooks to consume\n if args.origin and args.source:\n environ['PRE_COMMIT_ORIGIN'] = args.origin\n environ['PRE_COMMIT_SOURCE'] = args.source\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(store.directory)\n\n with ctx:\n config = load_config(config_file)\n hooks = [\n hook\n for hook in all_hooks(config, store)\n if not args.hook or hook.id == args.hook or hook.alias == args.hook\n if args.hook_stage in hook.stages\n ]\n\n if args.hook and not hooks:\n output.write_line(\n 'No hook with id `{}` in stage `{}`'.format(\n args.hook, args.hook_stage,\n ),\n )\n return 1\n\n install_hook_envs(hooks, store)\n\n return _run_hooks(config, hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport subprocess\nimport time\n\nfrom identify.identify import tags_from_path\n\nfrom pre_commit import color\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.output import get_hook_message\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.staged_files_only import staged_files_only\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import noop_context\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef filter_by_include_exclude(names, include, exclude):\n include_re, exclude_re = re.compile(include), re.compile(exclude)\n return [\n filename for filename in names\n if include_re.search(filename)\n if not exclude_re.search(filename)\n ]\n\n\nclass Classifier(object):\n def __init__(self, filenames):\n # on windows we normalize all filenames to use forward slashes\n # this makes it easier to filter using the `files:` regex\n # this also makes improperly quoted shell-based hooks work better\n # see #1173\n if os.altsep == '/' and os.sep == '\\\\':\n filenames = (f.replace(os.sep, os.altsep) for f in filenames)\n self.filenames = [f for f in filenames if os.path.lexists(f)]\n self._types_cache = {}\n\n def _types_for_file(self, filename):\n try:\n return self._types_cache[filename]\n except KeyError:\n ret = self._types_cache[filename] = tags_from_path(filename)\n return ret\n\n def by_types(self, names, types, exclude_types):\n types, exclude_types = frozenset(types), frozenset(exclude_types)\n ret = []\n for filename in names:\n tags = self._types_for_file(filename)\n if tags >= types and not tags & exclude_types:\n ret.append(filename)\n return ret\n\n def filenames_for_hook(self, hook):\n names = self.filenames\n names = filter_by_include_exclude(names, hook.files, hook.exclude)\n names = self.by_types(names, hook.types, hook.exclude_types)\n return names\n\n\ndef _get_skips(environ):\n skips = environ.get('SKIP', '')\n return {skip.strip() for skip in skips.split(',') if skip.strip()}\n\n\nSKIPPED = 'Skipped'\nNO_FILES = '(no files to check)'\n\n\ndef _subtle_line(s, use_color):\n output.write_line(color.format_color(s, color.SUBTLE, use_color))\n\n\ndef _run_single_hook(classifier, hook, skips, cols, verbose, use_color):\n filenames = classifier.filenames_for_hook(hook)\n\n if hook.language == 'pcre':\n logger.warning(\n '`{}` (from {}) uses the deprecated pcre language.\\n'\n 'The pcre language is scheduled for removal in pre-commit 2.x.\\n'\n 'The pygrep language is a more portable (and usually drop-in) '\n 'replacement.'.format(hook.id, hook.src),\n )\n\n if hook.id in skips or hook.alias in skips:\n output.write(\n get_hook_message(\n hook.name,\n end_msg=SKIPPED,\n end_color=color.YELLOW,\n use_color=use_color,\n cols=cols,\n ),\n )\n duration = None\n retcode = 0\n files_modified = False\n out = b''\n elif not filenames and not hook.always_run:\n output.write(\n get_hook_message(\n hook.name,\n postfix=NO_FILES,\n end_msg=SKIPPED,\n end_color=color.TURQUOISE,\n use_color=use_color,\n cols=cols,\n ),\n )\n duration = None\n retcode = 0\n files_modified = False\n out = b''\n else:\n # print hook and dots first in case the hook takes a while to run\n output.write(get_hook_message(hook.name, end_len=6, cols=cols))\n\n diff_cmd = ('git', 'diff', '--no-ext-diff')\n diff_before = cmd_output_b(*diff_cmd, retcode=None)\n filenames = tuple(filenames) if hook.pass_filenames else ()\n time_before = time.time()\n retcode, out = hook.run(filenames, use_color)\n duration = round(time.time() - time_before, 2) or 0\n diff_after = cmd_output_b(*diff_cmd, retcode=None)\n\n # if the hook makes changes, fail the commit\n files_modified = diff_before != diff_after\n\n if retcode or files_modified:\n print_color = color.RED\n status = 'Failed'\n else:\n print_color = color.GREEN\n status = 'Passed'\n\n output.write_line(color.format_color(status, print_color, use_color))\n\n if verbose or hook.verbose or retcode or files_modified:\n _subtle_line('- hook id: {}'.format(hook.id), use_color)\n\n if (verbose or hook.verbose) and duration is not None:\n _subtle_line('- duration: {}s'.format(duration), use_color)\n\n if retcode:\n _subtle_line('- exit code: {}'.format(retcode), use_color)\n\n # Print a message if failing due to file modifications\n if files_modified:\n _subtle_line('- files were modified by this hook', use_color)\n\n if out.strip():\n output.write_line()\n output.write_line(out.strip(), logfile_name=hook.log_file)\n output.write_line()\n\n return files_modified or bool(retcode)\n\n\ndef _compute_cols(hooks):\n \"\"\"Compute the number of columns to display hook messages. The widest\n that will be displayed is in the no files skipped case:\n\n Hook name...(no files to check) Skipped\n \"\"\"\n if hooks:\n name_len = max(len(hook.name) for hook in hooks)\n else:\n name_len = 0\n\n cols = name_len + 3 + len(NO_FILES) + 1 + len(SKIPPED)\n return max(cols, 80)\n\n\ndef _all_filenames(args):\n if args.origin and args.source:\n return git.get_changed_files(args.origin, args.source)\n elif args.hook_stage in {'prepare-commit-msg', 'commit-msg'}:\n return (args.commit_msg_filename,)\n elif args.files:\n return args.files\n elif args.all_files:\n return git.get_all_files()\n elif git.is_in_merge_conflict():\n return git.get_conflicted_files()\n else:\n return git.get_staged_files()\n\n\ndef _run_hooks(config, hooks, args, environ):\n \"\"\"Actually run the hooks.\"\"\"\n skips = _get_skips(environ)\n cols = _compute_cols(hooks)\n filenames = _all_filenames(args)\n filenames = filter_by_include_exclude(\n filenames, config['files'], config['exclude'],\n )\n classifier = Classifier(filenames)\n retval = 0\n for hook in hooks:\n retval |= _run_single_hook(\n classifier, hook, skips, cols,\n verbose=args.verbose, use_color=args.color,\n )\n if retval and config['fail_fast']:\n break\n if retval and args.show_diff_on_failure and git.has_diff():\n if args.all_files:\n output.write_line(\n 'pre-commit hook(s) made changes.\\n'\n 'If you are seeing this message in CI, '\n 'reproduce locally with: `pre-commit run --all-files`.\\n'\n 'To run `pre-commit` as part of git workflow, use '\n '`pre-commit install`.',\n )\n output.write_line('All changes made by hooks:')\n # args.color is a boolean.\n # See user_color function in color.py\n subprocess.call((\n 'git', '--no-pager', 'diff', '--no-ext-diff',\n '--color={}'.format({True: 'always', False: 'never'}[args.color]),\n ))\n\n return retval\n\n\ndef _has_unmerged_paths():\n _, stdout, _ = cmd_output_b('git', 'ls-files', '--unmerged')\n return bool(stdout.strip())\n\n\ndef _has_unstaged_config(config_file):\n retcode, _, _ = cmd_output_b(\n 'git', 'diff', '--no-ext-diff', '--exit-code', config_file,\n retcode=None,\n )\n # be explicit, other git errors don't mean it has an unstaged config.\n return retcode == 1\n\n\ndef run(config_file, store, args, environ=os.environ):\n no_stash = args.all_files or bool(args.files)\n\n # Check if we have unresolved merge conflict files and fail fast.\n if _has_unmerged_paths():\n logger.error('Unmerged files. Resolve before committing.')\n return 1\n if bool(args.source) != bool(args.origin):\n logger.error('Specify both --origin and --source.')\n return 1\n if _has_unstaged_config(config_file) and not no_stash:\n logger.error(\n 'Your pre-commit configuration is unstaged.\\n'\n '`git add {}` to fix this.'.format(config_file),\n )\n return 1\n\n # Expose origin / source as environment variables for hooks to consume\n if args.origin and args.source:\n environ['PRE_COMMIT_ORIGIN'] = args.origin\n environ['PRE_COMMIT_SOURCE'] = args.source\n\n if no_stash:\n ctx = noop_context()\n else:\n ctx = staged_files_only(store.directory)\n\n with ctx:\n config = load_config(config_file)\n hooks = [\n hook\n for hook in all_hooks(config, store)\n if not args.hook or hook.id == args.hook or hook.alias == args.hook\n if args.hook_stage in hook.stages\n ]\n\n if args.hook and not hooks:\n output.write_line(\n 'No hook with id `{}` in stage `{}`'.format(\n args.hook, args.hook_stage,\n ),\n )\n return 1\n\n install_hook_envs(hooks, store)\n\n return _run_hooks(config, hooks, args, environ)\n", "path": "pre_commit/commands/run.py"}]}
| 3,545 | 390 |
gh_patches_debug_10483
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-2689
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LeveshteinSimilarityIndex fails when called from SparseTermSimilarityMatrix
<!--
**IMPORTANT**:
- Use the [Gensim mailing list](https://groups.google.com/forum/#!forum/gensim) to ask general or usage questions. Github issues are only for bug reports.
- Check [Recipes&FAQ](https://github.com/RaRe-Technologies/gensim/wiki/Recipes-&-FAQ) first for common answers.
Github bug reports that do not include relevant information and context will be closed without an answer. Thanks!
-->
#### Problem description
When trying to build a SparseTermSimilarityMatrix using Levenshtein it fails.
#### Steps/code/corpus to reproduce
```python
index = LevenshteinSimilarityIndex(dictionary)
SparseTermSimilarityMatrix(index, dictionary) # <- fails here
```
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-59-c16b89564835> in <module>
----> 1 similarity_matrix1 = SparseTermSimilarityMatrix(similarity_index1, dictionary)
~/.local/share/virtualenvs/pdftagger-LHy_2RHk/lib/python3.6/site-packages/gensim/similarities/termsim.py in __init__(self, source, dictionary, tfidf, symmetric, positive_definite, nonzero_limit, dtype)
234 for term, similarity in index.most_similar(t1, topn=num_rows)
235 if term in dictionary.token2id
--> 236 ] if num_rows > 0 else []
237
238 if tfidf is None:
~/.local/share/virtualenvs/pdftagger-LHy_2RHk/lib/python3.6/site-packages/gensim/similarities/levenshtein.py in most_similar(self, t1, topn)
151 if similarity > 0
152 )
--> 153 return islice(most_similar, topn)
ValueError: Stop argument for islice() must be None or an integer: 0 <= x <= sys.maxsize.
```
#### Versions
Linux-4.18.0-25-generic-x86_64-with-debian-buster-sid
Python 3.6.9 (default, Nov 18 2019, 15:20:23)
[GCC 8.3.0]
NumPy 1.17.4
SciPy 1.3.3
gensim 3.8.1
FAST_VERSION 1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/similarities/levenshtein.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) 2018 Vit Novotny <[email protected]>
5 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
6
7 """
8 This module provides a namespace for functions that use the Levenshtein distance.
9 """
10
11 from itertools import islice
12 import logging
13 from math import floor
14
15 from gensim.similarities.termsim import TermSimilarityIndex
16
17 logger = logging.getLogger(__name__)
18
19
20 def levdist(t1, t2, max_distance=float("inf")):
21 """Get the Levenshtein distance between two terms.
22
23 Return the Levenshtein distance between two terms. The distance is a
24 number between <1.0, inf>, higher is less similar.
25
26 Parameters
27 ----------
28 t1 : {bytes, str, unicode}
29 The first compared term.
30 t2 : {bytes, str, unicode}
31 The second compared term.
32 max_distance : {int, float}, optional
33 If you don't care about distances larger than a known threshold, a more
34 efficient code path can be taken. For terms that are clearly "too far
35 apart", we will not compute the distance exactly, but we will return
36 `max(len(t1), len(t2))` more quickly, meaning "more than
37 `max_distance`".
38 Default: always compute distance exactly, no threshold clipping.
39
40 Returns
41 -------
42 int
43 The Levenshtein distance between `t1` and `t2`.
44
45 """
46 import Levenshtein
47
48 distance = Levenshtein.distance(t1, t2)
49 if distance > max_distance:
50 return max(len(t1), len(t2))
51 return distance
52
53
54 def levsim(t1, t2, alpha=1.8, beta=5.0, min_similarity=0.0):
55 """Get the Levenshtein similarity between two terms.
56
57 Return the Levenshtein similarity between two terms. The similarity is a
58 number between <0.0, 1.0>, higher is more similar.
59
60 Parameters
61 ----------
62 t1 : {bytes, str, unicode}
63 The first compared term.
64 t2 : {bytes, str, unicode}
65 The second compared term.
66 alpha : float, optional
67 The multiplicative factor alpha defined by Charlet and Damnati (2017).
68 beta : float, optional
69 The exponential factor beta defined by Charlet and Damnati (2017).
70 min_similarity : {int, float}, optional
71 If you don't care about similarities smaller than a known threshold, a
72 more efficient code path can be taken. For terms that are clearly "too
73 far apart", we will not compute the distance exactly, but we will
74 return zero more quickly, meaning "less than `min_similarity`".
75 Default: always compute similarity exactly, no threshold clipping.
76
77 Returns
78 -------
79 float
80 The Levenshtein similarity between `t1` and `t2`.
81
82 Notes
83 -----
84 This notion of Levenshtein similarity was first defined in section 2.2 of
85 `Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3:
86 Soft-Cosine Semantic Similarity between Questions for Community Question
87 Answering", 2017 <http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`_.
88
89 """
90 assert alpha >= 0
91 assert beta >= 0
92
93 max_lengths = max(len(t1), len(t2))
94 if max_lengths == 0:
95 return 1.0
96
97 min_similarity = float(max(min(min_similarity, 1.0), 0.0))
98 max_distance = int(floor(max_lengths * (1 - (min_similarity / alpha) ** (1 / beta))))
99 distance = levdist(t1, t2, max_distance)
100 similarity = alpha * (1 - distance * 1.0 / max_lengths)**beta
101 return similarity
102
103
104 class LevenshteinSimilarityIndex(TermSimilarityIndex):
105 """
106 Computes Levenshtein similarities between terms and retrieves most similar
107 terms for a given term.
108
109 Notes
110 -----
111 This is a naive implementation that iteratively computes pointwise Levenshtein similarities
112 between individual terms. Using this implementation to compute the similarity of all terms in
113 real-world dictionaries such as the English Wikipedia will take years.
114
115 Parameters
116 ----------
117 dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
118 A dictionary that specifies the considered terms.
119 alpha : float, optional
120 The multiplicative factor alpha defined by Charlet and Damnati (2017).
121 beta : float, optional
122 The exponential factor beta defined by Charlet and Damnati (2017).
123 threshold : float, optional
124 Only terms more similar than `threshold` are considered when retrieving
125 the most similar terms for a given term.
126
127 See Also
128 --------
129 :func:`gensim.similarities.levenshtein.levsim`
130 The Levenshtein similarity.
131 :class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
132 Build a term similarity matrix and compute the Soft Cosine Measure.
133
134 """
135 def __init__(self, dictionary, alpha=1.8, beta=5.0, threshold=0.0):
136 self.dictionary = dictionary
137 self.alpha = alpha
138 self.beta = beta
139 self.threshold = threshold
140 super(LevenshteinSimilarityIndex, self).__init__()
141
142 def most_similar(self, t1, topn=10):
143 similarities = (
144 (levsim(t1, t2, self.alpha, self.beta, self.threshold), t2)
145 for t2 in self.dictionary.values()
146 if t1 != t2
147 )
148 most_similar = (
149 (t2, similarity)
150 for (similarity, t2) in sorted(similarities, reverse=True)
151 if similarity > 0
152 )
153 return islice(most_similar, topn)
154
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gensim/similarities/levenshtein.py b/gensim/similarities/levenshtein.py
--- a/gensim/similarities/levenshtein.py
+++ b/gensim/similarities/levenshtein.py
@@ -8,7 +8,7 @@
This module provides a namespace for functions that use the Levenshtein distance.
"""
-from itertools import islice
+import itertools
import logging
from math import floor
@@ -150,4 +150,4 @@
for (similarity, t2) in sorted(similarities, reverse=True)
if similarity > 0
)
- return islice(most_similar, topn)
+ return itertools.islice(most_similar, int(topn))
|
{"golden_diff": "diff --git a/gensim/similarities/levenshtein.py b/gensim/similarities/levenshtein.py\n--- a/gensim/similarities/levenshtein.py\n+++ b/gensim/similarities/levenshtein.py\n@@ -8,7 +8,7 @@\n This module provides a namespace for functions that use the Levenshtein distance.\n \"\"\"\n \n-from itertools import islice\n+import itertools\n import logging\n from math import floor\n \n@@ -150,4 +150,4 @@\n for (similarity, t2) in sorted(similarities, reverse=True)\n if similarity > 0\n )\n- return islice(most_similar, topn)\n+ return itertools.islice(most_similar, int(topn))\n", "issue": "LeveshteinSimilarityIndex fails when called from SparseTermSimilarityMatrix\n<!--\r\n**IMPORTANT**:\r\n\r\n- Use the [Gensim mailing list](https://groups.google.com/forum/#!forum/gensim) to ask general or usage questions. Github issues are only for bug reports.\r\n- Check [Recipes&FAQ](https://github.com/RaRe-Technologies/gensim/wiki/Recipes-&-FAQ) first for common answers.\r\n\r\nGithub bug reports that do not include relevant information and context will be closed without an answer. Thanks!\r\n-->\r\n\r\n#### Problem description\r\n\r\nWhen trying to build a SparseTermSimilarityMatrix using Levenshtein it fails.\r\n\r\n#### Steps/code/corpus to reproduce\r\n\r\n```python\r\nindex = LevenshteinSimilarityIndex(dictionary)\r\nSparseTermSimilarityMatrix(index, dictionary) # <- fails here\r\n```\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-59-c16b89564835> in <module>\r\n----> 1 similarity_matrix1 = SparseTermSimilarityMatrix(similarity_index1, dictionary)\r\n\r\n~/.local/share/virtualenvs/pdftagger-LHy_2RHk/lib/python3.6/site-packages/gensim/similarities/termsim.py in __init__(self, source, dictionary, tfidf, symmetric, positive_definite, nonzero_limit, dtype)\r\n 234 for term, similarity in index.most_similar(t1, topn=num_rows)\r\n 235 if term in dictionary.token2id\r\n--> 236 ] if num_rows > 0 else []\r\n 237 \r\n 238 if tfidf is None:\r\n\r\n~/.local/share/virtualenvs/pdftagger-LHy_2RHk/lib/python3.6/site-packages/gensim/similarities/levenshtein.py in most_similar(self, t1, topn)\r\n 151 if similarity > 0\r\n 152 )\r\n--> 153 return islice(most_similar, topn)\r\n\r\nValueError: Stop argument for islice() must be None or an integer: 0 <= x <= sys.maxsize.\r\n```\r\n\r\n#### Versions\r\n\r\nLinux-4.18.0-25-generic-x86_64-with-debian-buster-sid\r\nPython 3.6.9 (default, Nov 18 2019, 15:20:23) \r\n[GCC 8.3.0]\r\nNumPy 1.17.4\r\nSciPy 1.3.3\r\ngensim 3.8.1\r\nFAST_VERSION 1\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018 Vit Novotny <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module provides a namespace for functions that use the Levenshtein distance.\n\"\"\"\n\nfrom itertools import islice\nimport logging\nfrom math import floor\n\nfrom gensim.similarities.termsim import TermSimilarityIndex\n\nlogger = logging.getLogger(__name__)\n\n\ndef levdist(t1, t2, max_distance=float(\"inf\")):\n \"\"\"Get the Levenshtein distance between two terms.\n\n Return the Levenshtein distance between two terms. The distance is a\n number between <1.0, inf>, higher is less similar.\n\n Parameters\n ----------\n t1 : {bytes, str, unicode}\n The first compared term.\n t2 : {bytes, str, unicode}\n The second compared term.\n max_distance : {int, float}, optional\n If you don't care about distances larger than a known threshold, a more\n efficient code path can be taken. For terms that are clearly \"too far\n apart\", we will not compute the distance exactly, but we will return\n `max(len(t1), len(t2))` more quickly, meaning \"more than\n `max_distance`\".\n Default: always compute distance exactly, no threshold clipping.\n\n Returns\n -------\n int\n The Levenshtein distance between `t1` and `t2`.\n\n \"\"\"\n import Levenshtein\n\n distance = Levenshtein.distance(t1, t2)\n if distance > max_distance:\n return max(len(t1), len(t2))\n return distance\n\n\ndef levsim(t1, t2, alpha=1.8, beta=5.0, min_similarity=0.0):\n \"\"\"Get the Levenshtein similarity between two terms.\n\n Return the Levenshtein similarity between two terms. The similarity is a\n number between <0.0, 1.0>, higher is more similar.\n\n Parameters\n ----------\n t1 : {bytes, str, unicode}\n The first compared term.\n t2 : {bytes, str, unicode}\n The second compared term.\n alpha : float, optional\n The multiplicative factor alpha defined by Charlet and Damnati (2017).\n beta : float, optional\n The exponential factor beta defined by Charlet and Damnati (2017).\n min_similarity : {int, float}, optional\n If you don't care about similarities smaller than a known threshold, a\n more efficient code path can be taken. For terms that are clearly \"too\n far apart\", we will not compute the distance exactly, but we will\n return zero more quickly, meaning \"less than `min_similarity`\".\n Default: always compute similarity exactly, no threshold clipping.\n\n Returns\n -------\n float\n The Levenshtein similarity between `t1` and `t2`.\n\n Notes\n -----\n This notion of Levenshtein similarity was first defined in section 2.2 of\n `Delphine Charlet and Geraldine Damnati, \"SimBow at SemEval-2017 Task 3:\n Soft-Cosine Semantic Similarity between Questions for Community Question\n Answering\", 2017 <http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`_.\n\n \"\"\"\n assert alpha >= 0\n assert beta >= 0\n\n max_lengths = max(len(t1), len(t2))\n if max_lengths == 0:\n return 1.0\n\n min_similarity = float(max(min(min_similarity, 1.0), 0.0))\n max_distance = int(floor(max_lengths * (1 - (min_similarity / alpha) ** (1 / beta))))\n distance = levdist(t1, t2, max_distance)\n similarity = alpha * (1 - distance * 1.0 / max_lengths)**beta\n return similarity\n\n\nclass LevenshteinSimilarityIndex(TermSimilarityIndex):\n \"\"\"\n Computes Levenshtein similarities between terms and retrieves most similar\n terms for a given term.\n\n Notes\n -----\n This is a naive implementation that iteratively computes pointwise Levenshtein similarities\n between individual terms. Using this implementation to compute the similarity of all terms in\n real-world dictionaries such as the English Wikipedia will take years.\n\n Parameters\n ----------\n dictionary : :class:`~gensim.corpora.dictionary.Dictionary`\n A dictionary that specifies the considered terms.\n alpha : float, optional\n The multiplicative factor alpha defined by Charlet and Damnati (2017).\n beta : float, optional\n The exponential factor beta defined by Charlet and Damnati (2017).\n threshold : float, optional\n Only terms more similar than `threshold` are considered when retrieving\n the most similar terms for a given term.\n\n See Also\n --------\n :func:`gensim.similarities.levenshtein.levsim`\n The Levenshtein similarity.\n :class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`\n Build a term similarity matrix and compute the Soft Cosine Measure.\n\n \"\"\"\n def __init__(self, dictionary, alpha=1.8, beta=5.0, threshold=0.0):\n self.dictionary = dictionary\n self.alpha = alpha\n self.beta = beta\n self.threshold = threshold\n super(LevenshteinSimilarityIndex, self).__init__()\n\n def most_similar(self, t1, topn=10):\n similarities = (\n (levsim(t1, t2, self.alpha, self.beta, self.threshold), t2)\n for t2 in self.dictionary.values()\n if t1 != t2\n )\n most_similar = (\n (t2, similarity)\n for (similarity, t2) in sorted(similarities, reverse=True)\n if similarity > 0\n )\n return islice(most_similar, topn)\n", "path": "gensim/similarities/levenshtein.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018 Vit Novotny <[email protected]>\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\n\"\"\"\nThis module provides a namespace for functions that use the Levenshtein distance.\n\"\"\"\n\nimport itertools\nimport logging\nfrom math import floor\n\nfrom gensim.similarities.termsim import TermSimilarityIndex\n\nlogger = logging.getLogger(__name__)\n\n\ndef levdist(t1, t2, max_distance=float(\"inf\")):\n \"\"\"Get the Levenshtein distance between two terms.\n\n Return the Levenshtein distance between two terms. The distance is a\n number between <1.0, inf>, higher is less similar.\n\n Parameters\n ----------\n t1 : {bytes, str, unicode}\n The first compared term.\n t2 : {bytes, str, unicode}\n The second compared term.\n max_distance : {int, float}, optional\n If you don't care about distances larger than a known threshold, a more\n efficient code path can be taken. For terms that are clearly \"too far\n apart\", we will not compute the distance exactly, but we will return\n `max(len(t1), len(t2))` more quickly, meaning \"more than\n `max_distance`\".\n Default: always compute distance exactly, no threshold clipping.\n\n Returns\n -------\n int\n The Levenshtein distance between `t1` and `t2`.\n\n \"\"\"\n import Levenshtein\n\n distance = Levenshtein.distance(t1, t2)\n if distance > max_distance:\n return max(len(t1), len(t2))\n return distance\n\n\ndef levsim(t1, t2, alpha=1.8, beta=5.0, min_similarity=0.0):\n \"\"\"Get the Levenshtein similarity between two terms.\n\n Return the Levenshtein similarity between two terms. The similarity is a\n number between <0.0, 1.0>, higher is more similar.\n\n Parameters\n ----------\n t1 : {bytes, str, unicode}\n The first compared term.\n t2 : {bytes, str, unicode}\n The second compared term.\n alpha : float, optional\n The multiplicative factor alpha defined by Charlet and Damnati (2017).\n beta : float, optional\n The exponential factor beta defined by Charlet and Damnati (2017).\n min_similarity : {int, float}, optional\n If you don't care about similarities smaller than a known threshold, a\n more efficient code path can be taken. For terms that are clearly \"too\n far apart\", we will not compute the distance exactly, but we will\n return zero more quickly, meaning \"less than `min_similarity`\".\n Default: always compute similarity exactly, no threshold clipping.\n\n Returns\n -------\n float\n The Levenshtein similarity between `t1` and `t2`.\n\n Notes\n -----\n This notion of Levenshtein similarity was first defined in section 2.2 of\n `Delphine Charlet and Geraldine Damnati, \"SimBow at SemEval-2017 Task 3:\n Soft-Cosine Semantic Similarity between Questions for Community Question\n Answering\", 2017 <http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`_.\n\n \"\"\"\n assert alpha >= 0\n assert beta >= 0\n\n max_lengths = max(len(t1), len(t2))\n if max_lengths == 0:\n return 1.0\n\n min_similarity = float(max(min(min_similarity, 1.0), 0.0))\n max_distance = int(floor(max_lengths * (1 - (min_similarity / alpha) ** (1 / beta))))\n distance = levdist(t1, t2, max_distance)\n similarity = alpha * (1 - distance * 1.0 / max_lengths)**beta\n return similarity\n\n\nclass LevenshteinSimilarityIndex(TermSimilarityIndex):\n \"\"\"\n Computes Levenshtein similarities between terms and retrieves most similar\n terms for a given term.\n\n Notes\n -----\n This is a naive implementation that iteratively computes pointwise Levenshtein similarities\n between individual terms. Using this implementation to compute the similarity of all terms in\n real-world dictionaries such as the English Wikipedia will take years.\n\n Parameters\n ----------\n dictionary : :class:`~gensim.corpora.dictionary.Dictionary`\n A dictionary that specifies the considered terms.\n alpha : float, optional\n The multiplicative factor alpha defined by Charlet and Damnati (2017).\n beta : float, optional\n The exponential factor beta defined by Charlet and Damnati (2017).\n threshold : float, optional\n Only terms more similar than `threshold` are considered when retrieving\n the most similar terms for a given term.\n\n See Also\n --------\n :func:`gensim.similarities.levenshtein.levsim`\n The Levenshtein similarity.\n :class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`\n Build a term similarity matrix and compute the Soft Cosine Measure.\n\n \"\"\"\n def __init__(self, dictionary, alpha=1.8, beta=5.0, threshold=0.0):\n self.dictionary = dictionary\n self.alpha = alpha\n self.beta = beta\n self.threshold = threshold\n super(LevenshteinSimilarityIndex, self).__init__()\n\n def most_similar(self, t1, topn=10):\n similarities = (\n (levsim(t1, t2, self.alpha, self.beta, self.threshold), t2)\n for t2 in self.dictionary.values()\n if t1 != t2\n )\n most_similar = (\n (t2, similarity)\n for (similarity, t2) in sorted(similarities, reverse=True)\n if similarity > 0\n )\n return itertools.islice(most_similar, int(topn))\n", "path": "gensim/similarities/levenshtein.py"}]}
| 2,560 | 175 |
gh_patches_debug_31805
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-4356
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NHL.COM URL Failure...
URL:
http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802
youtube-dl.py -v http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802
[debug] System config: []
[debug] User config: []
[debug] Command-line args: ['-v', 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665']
[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252
[debug] youtube-dl version 2014.12.01
[debug] Python version 2.7.5 - Windows-7-6.1.7601-SP1
[debug] exe versions: ffmpeg N-40824-, rtmpdump 2.4
[debug] Proxy map: {}
[nhl.com] 58665: Downloading JSON metadata
[nhl.com] 58665: Extracting information
[nhl.com] 58665: Downloading final video url
[debug] Invoking downloader on 'http://mapleleafs.cdnllnwnl.neulion.net/s/mapleleafs/vod/flv/LWEB-100128-CLASSICGI6.flv?eid=58653&pid=58665&gid=3028?eid=58653&pid=58665&gid=3028&pt=1&ip=199.85.73.12&e
=1417456874&h=181627b3cec444da4267f94da6b83915'
ERROR: unable to download video data: HTTP Error 404: Not Found
Traceback (most recent call last):
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\YoutubeDL.py", line 1087, in process_info
success = dl(filename, info_dict)
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\YoutubeDL.py", line 1063, in dl
return fd.download(name, info)
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\downloader\common.py", line 294, in download
return self.real_download(filename, info_dict)
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\downloader\http.py", line 66, in real_download
data = self.ydl.urlopen(request)
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\YoutubeDL.py", line 1321, in urlopen
return self._opener.open(req, timeout=self._socket_timeout)
File "C:\Python27\lib\urllib2.py", line 410, in open
response = meth(req, response)
File "C:\Python27\lib\urllib2.py", line 523, in http_response
'http', request, response, code, msg, hdrs)
File "C:\Python27\lib\urllib2.py", line 448, in error
return self._call_chain(_args)
File "C:\Python27\lib\urllib2.py", line 382, in _call_chain
result = func(_args)
File "C:\Python27\lib\urllib2.py", line 531, in http_error_default
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
HTTPError: HTTP Error 404: Not Found
'catid' is not recognized as an internal or external command,
operable program or batch file.
Thanks in advance..
Ringo
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/nhl.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import re
4 import json
5
6 from .common import InfoExtractor
7 from ..compat import (
8 compat_urlparse,
9 compat_urllib_parse,
10 )
11 from ..utils import (
12 unified_strdate,
13 )
14
15
16 class NHLBaseInfoExtractor(InfoExtractor):
17 @staticmethod
18 def _fix_json(json_string):
19 return json_string.replace('\\\'', '\'')
20
21 def _extract_video(self, info):
22 video_id = info['id']
23 self.report_extraction(video_id)
24
25 initial_video_url = info['publishPoint']
26 if info['formats'] == '1':
27 data = compat_urllib_parse.urlencode({
28 'type': 'fvod',
29 'path': initial_video_url.replace('.mp4', '_sd.mp4'),
30 })
31 path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
32 path_doc = self._download_xml(
33 path_url, video_id, 'Downloading final video url')
34 video_url = path_doc.find('path').text
35 else:
36 video_url = initial_video_url
37
38 join = compat_urlparse.urljoin
39 return {
40 'id': video_id,
41 'title': info['name'],
42 'url': video_url,
43 'description': info['description'],
44 'duration': int(info['duration']),
45 'thumbnail': join(join(video_url, '/u/'), info['bigImage']),
46 'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),
47 }
48
49
50 class NHLIE(NHLBaseInfoExtractor):
51 IE_NAME = 'nhl.com'
52 _VALID_URL = r'https?://video(?P<team>\.[^.]*)?\.nhl\.com/videocenter/console(?:\?(?:.*?[?&])?)id=(?P<id>[0-9a-z-]+)'
53
54 _TESTS = [{
55 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',
56 'md5': 'db704a4ea09e8d3988c85e36cc892d09',
57 'info_dict': {
58 'id': '453614',
59 'ext': 'mp4',
60 'title': 'Quick clip: Weise 4-3 goal vs Flames',
61 'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',
62 'duration': 18,
63 'upload_date': '20131006',
64 },
65 }, {
66 'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',
67 'md5': 'd22e82bc592f52d37d24b03531ee9696',
68 'info_dict': {
69 'id': '2014020024-628-h',
70 'ext': 'mp4',
71 'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',
72 'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',
73 'duration': 0,
74 'upload_date': '20141011',
75 },
76 }, {
77 'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
78 'only_matching': True,
79 }]
80
81 def _real_extract(self, url):
82 mobj = re.match(self._VALID_URL, url)
83 video_id = mobj.group('id')
84 json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id
85 data = self._download_json(
86 json_url, video_id, transform_source=self._fix_json)
87 return self._extract_video(data[0])
88
89
90 class NHLVideocenterIE(NHLBaseInfoExtractor):
91 IE_NAME = 'nhl.com:videocenter'
92 IE_DESC = 'NHL videocenter category'
93 _VALID_URL = r'https?://video\.(?P<team>[^.]*)\.nhl\.com/videocenter/(console\?.*?catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'
94 _TEST = {
95 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',
96 'info_dict': {
97 'id': '999',
98 'title': 'Highlights',
99 },
100 'playlist_count': 12,
101 }
102
103 def _real_extract(self, url):
104 mobj = re.match(self._VALID_URL, url)
105 team = mobj.group('team')
106 webpage = self._download_webpage(url, team)
107 cat_id = self._search_regex(
108 [r'var defaultCatId = "(.+?)";',
109 r'{statusIndex:0,index:0,.*?id:(.*?),'],
110 webpage, 'category id')
111 playlist_title = self._html_search_regex(
112 r'tab0"[^>]*?>(.*?)</td>',
113 webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
114
115 data = compat_urllib_parse.urlencode({
116 'cid': cat_id,
117 # This is the default value
118 'count': 12,
119 'ptrs': 3,
120 'format': 'json',
121 })
122 path = '/videocenter/servlets/browse?' + data
123 request_url = compat_urlparse.urljoin(url, path)
124 response = self._download_webpage(request_url, playlist_title)
125 response = self._fix_json(response)
126 if not response.strip():
127 self._downloader.report_warning('Got an empty reponse, trying '
128 'adding the "newvideos" parameter')
129 response = self._download_webpage(request_url + '&newvideos=true',
130 playlist_title)
131 response = self._fix_json(response)
132 videos = json.loads(response)
133
134 return {
135 '_type': 'playlist',
136 'title': playlist_title,
137 'id': cat_id,
138 'entries': [self._extract_video(v) for v in videos],
139 }
140
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py
--- a/youtube_dl/extractor/nhl.py
+++ b/youtube_dl/extractor/nhl.py
@@ -7,6 +7,7 @@
from ..compat import (
compat_urlparse,
compat_urllib_parse,
+ compat_urllib_parse_urlparse
)
from ..utils import (
unified_strdate,
@@ -24,9 +25,13 @@
initial_video_url = info['publishPoint']
if info['formats'] == '1':
+ parsed_url = compat_urllib_parse_urlparse(initial_video_url)
+ path = parsed_url.path
+ extension_index = path.rfind('.')
+ path = path[:extension_index] + '_sd' + path[extension_index:]
data = compat_urllib_parse.urlencode({
'type': 'fvod',
- 'path': initial_video_url.replace('.mp4', '_sd.mp4'),
+ 'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
})
path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data
path_doc = self._download_xml(
@@ -73,6 +78,17 @@
'duration': 0,
'upload_date': '20141011',
},
+ }, {
+ 'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',
+ 'md5': 'c78fc64ea01777e426cfc202b746c825',
+ 'info_dict': {
+ 'id': '58665',
+ 'ext': 'flv',
+ 'title': 'Classic Game In Six - April 22, 1979',
+ 'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',
+ 'duration': 400,
+ 'upload_date': '20100129'
+ },
}, {
'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',
'only_matching': True,
|
{"golden_diff": "diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py\n--- a/youtube_dl/extractor/nhl.py\n+++ b/youtube_dl/extractor/nhl.py\n@@ -7,6 +7,7 @@\n from ..compat import (\n compat_urlparse,\n compat_urllib_parse,\n+ compat_urllib_parse_urlparse\n )\n from ..utils import (\n unified_strdate,\n@@ -24,9 +25,13 @@\n \n initial_video_url = info['publishPoint']\n if info['formats'] == '1':\n+ parsed_url = compat_urllib_parse_urlparse(initial_video_url)\n+ path = parsed_url.path\n+ extension_index = path.rfind('.')\n+ path = path[:extension_index] + '_sd' + path[extension_index:]\n data = compat_urllib_parse.urlencode({\n 'type': 'fvod',\n- 'path': initial_video_url.replace('.mp4', '_sd.mp4'),\n+ 'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])\n })\n path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data\n path_doc = self._download_xml(\n@@ -73,6 +78,17 @@\n 'duration': 0,\n 'upload_date': '20141011',\n },\n+ }, {\n+ 'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',\n+ 'md5': 'c78fc64ea01777e426cfc202b746c825',\n+ 'info_dict': {\n+ 'id': '58665',\n+ 'ext': 'flv',\n+ 'title': 'Classic Game In Six - April 22, 1979',\n+ 'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',\n+ 'duration': 400,\n+ 'upload_date': '20100129'\n+ },\n }, {\n 'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',\n 'only_matching': True,\n", "issue": "NHL.COM URL Failure...\nURL:\nhttp://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802 \n\nyoutube-dl.py -v http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: ['-v', 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665']\n[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252\n[debug] youtube-dl version 2014.12.01\n[debug] Python version 2.7.5 - Windows-7-6.1.7601-SP1\n[debug] exe versions: ffmpeg N-40824-, rtmpdump 2.4\n[debug] Proxy map: {}\n[nhl.com] 58665: Downloading JSON metadata\n[nhl.com] 58665: Extracting information\n[nhl.com] 58665: Downloading final video url\n[debug] Invoking downloader on 'http://mapleleafs.cdnllnwnl.neulion.net/s/mapleleafs/vod/flv/LWEB-100128-CLASSICGI6.flv?eid=58653&pid=58665&gid=3028?eid=58653&pid=58665&gid=3028&pt=1&ip=199.85.73.12&e\n=1417456874&h=181627b3cec444da4267f94da6b83915'\nERROR: unable to download video data: HTTP Error 404: Not Found\nTraceback (most recent call last):\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\YoutubeDL.py\", line 1087, in process_info\n success = dl(filename, info_dict)\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\YoutubeDL.py\", line 1063, in dl\n return fd.download(name, info)\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\downloader\\common.py\", line 294, in download\n return self.real_download(filename, info_dict)\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\downloader\\http.py\", line 66, in real_download\n data = self.ydl.urlopen(request)\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\YoutubeDL.py\", line 1321, in urlopen\n return self._opener.open(req, timeout=self._socket_timeout)\n File \"C:\\Python27\\lib\\urllib2.py\", line 410, in open\n response = meth(req, response)\n File \"C:\\Python27\\lib\\urllib2.py\", line 523, in http_response\n 'http', request, response, code, msg, hdrs)\n File \"C:\\Python27\\lib\\urllib2.py\", line 448, in error\n return self._call_chain(_args)\n File \"C:\\Python27\\lib\\urllib2.py\", line 382, in _call_chain\n result = func(_args)\n File \"C:\\Python27\\lib\\urllib2.py\", line 531, in http_error_default\n raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)\nHTTPError: HTTP Error 404: Not Found\n\n'catid' is not recognized as an internal or external command,\noperable program or batch file.\n\nThanks in advance..\nRingo\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\nimport json\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urlparse,\n compat_urllib_parse,\n)\nfrom ..utils import (\n unified_strdate,\n)\n\n\nclass NHLBaseInfoExtractor(InfoExtractor):\n @staticmethod\n def _fix_json(json_string):\n return json_string.replace('\\\\\\'', '\\'')\n\n def _extract_video(self, info):\n video_id = info['id']\n self.report_extraction(video_id)\n\n initial_video_url = info['publishPoint']\n if info['formats'] == '1':\n data = compat_urllib_parse.urlencode({\n 'type': 'fvod',\n 'path': initial_video_url.replace('.mp4', '_sd.mp4'),\n })\n path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data\n path_doc = self._download_xml(\n path_url, video_id, 'Downloading final video url')\n video_url = path_doc.find('path').text\n else:\n video_url = initial_video_url\n\n join = compat_urlparse.urljoin\n return {\n 'id': video_id,\n 'title': info['name'],\n 'url': video_url,\n 'description': info['description'],\n 'duration': int(info['duration']),\n 'thumbnail': join(join(video_url, '/u/'), info['bigImage']),\n 'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),\n }\n\n\nclass NHLIE(NHLBaseInfoExtractor):\n IE_NAME = 'nhl.com'\n _VALID_URL = r'https?://video(?P<team>\\.[^.]*)?\\.nhl\\.com/videocenter/console(?:\\?(?:.*?[?&])?)id=(?P<id>[0-9a-z-]+)'\n\n _TESTS = [{\n 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',\n 'md5': 'db704a4ea09e8d3988c85e36cc892d09',\n 'info_dict': {\n 'id': '453614',\n 'ext': 'mp4',\n 'title': 'Quick clip: Weise 4-3 goal vs Flames',\n 'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',\n 'duration': 18,\n 'upload_date': '20131006',\n },\n }, {\n 'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',\n 'md5': 'd22e82bc592f52d37d24b03531ee9696',\n 'info_dict': {\n 'id': '2014020024-628-h',\n 'ext': 'mp4',\n 'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',\n 'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',\n 'duration': 0,\n 'upload_date': '20141011',\n },\n }, {\n 'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id\n data = self._download_json(\n json_url, video_id, transform_source=self._fix_json)\n return self._extract_video(data[0])\n\n\nclass NHLVideocenterIE(NHLBaseInfoExtractor):\n IE_NAME = 'nhl.com:videocenter'\n IE_DESC = 'NHL videocenter category'\n _VALID_URL = r'https?://video\\.(?P<team>[^.]*)\\.nhl\\.com/videocenter/(console\\?.*?catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'\n _TEST = {\n 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',\n 'info_dict': {\n 'id': '999',\n 'title': 'Highlights',\n },\n 'playlist_count': 12,\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n team = mobj.group('team')\n webpage = self._download_webpage(url, team)\n cat_id = self._search_regex(\n [r'var defaultCatId = \"(.+?)\";',\n r'{statusIndex:0,index:0,.*?id:(.*?),'],\n webpage, 'category id')\n playlist_title = self._html_search_regex(\n r'tab0\"[^>]*?>(.*?)</td>',\n webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()\n\n data = compat_urllib_parse.urlencode({\n 'cid': cat_id,\n # This is the default value\n 'count': 12,\n 'ptrs': 3,\n 'format': 'json',\n })\n path = '/videocenter/servlets/browse?' + data\n request_url = compat_urlparse.urljoin(url, path)\n response = self._download_webpage(request_url, playlist_title)\n response = self._fix_json(response)\n if not response.strip():\n self._downloader.report_warning('Got an empty reponse, trying '\n 'adding the \"newvideos\" parameter')\n response = self._download_webpage(request_url + '&newvideos=true',\n playlist_title)\n response = self._fix_json(response)\n videos = json.loads(response)\n\n return {\n '_type': 'playlist',\n 'title': playlist_title,\n 'id': cat_id,\n 'entries': [self._extract_video(v) for v in videos],\n }\n", "path": "youtube_dl/extractor/nhl.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport re\nimport json\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_urlparse,\n compat_urllib_parse,\n compat_urllib_parse_urlparse\n)\nfrom ..utils import (\n unified_strdate,\n)\n\n\nclass NHLBaseInfoExtractor(InfoExtractor):\n @staticmethod\n def _fix_json(json_string):\n return json_string.replace('\\\\\\'', '\\'')\n\n def _extract_video(self, info):\n video_id = info['id']\n self.report_extraction(video_id)\n\n initial_video_url = info['publishPoint']\n if info['formats'] == '1':\n parsed_url = compat_urllib_parse_urlparse(initial_video_url)\n path = parsed_url.path\n extension_index = path.rfind('.')\n path = path[:extension_index] + '_sd' + path[extension_index:]\n data = compat_urllib_parse.urlencode({\n 'type': 'fvod',\n 'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])\n })\n path_url = 'http://video.nhl.com/videocenter/servlets/encryptvideopath?' + data\n path_doc = self._download_xml(\n path_url, video_id, 'Downloading final video url')\n video_url = path_doc.find('path').text\n else:\n video_url = initial_video_url\n\n join = compat_urlparse.urljoin\n return {\n 'id': video_id,\n 'title': info['name'],\n 'url': video_url,\n 'description': info['description'],\n 'duration': int(info['duration']),\n 'thumbnail': join(join(video_url, '/u/'), info['bigImage']),\n 'upload_date': unified_strdate(info['releaseDate'].split('.')[0]),\n }\n\n\nclass NHLIE(NHLBaseInfoExtractor):\n IE_NAME = 'nhl.com'\n _VALID_URL = r'https?://video(?P<team>\\.[^.]*)?\\.nhl\\.com/videocenter/console(?:\\?(?:.*?[?&])?)id=(?P<id>[0-9a-z-]+)'\n\n _TESTS = [{\n 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=6?id=453614',\n 'md5': 'db704a4ea09e8d3988c85e36cc892d09',\n 'info_dict': {\n 'id': '453614',\n 'ext': 'mp4',\n 'title': 'Quick clip: Weise 4-3 goal vs Flames',\n 'description': 'Dale Weise scores his first of the season to put the Canucks up 4-3.',\n 'duration': 18,\n 'upload_date': '20131006',\n },\n }, {\n 'url': 'http://video.nhl.com/videocenter/console?id=2014020024-628-h',\n 'md5': 'd22e82bc592f52d37d24b03531ee9696',\n 'info_dict': {\n 'id': '2014020024-628-h',\n 'ext': 'mp4',\n 'title': 'Alex Galchenyuk Goal on Ray Emery (14:40/3rd)',\n 'description': 'Home broadcast - Montreal Canadiens at Philadelphia Flyers - October 11, 2014',\n 'duration': 0,\n 'upload_date': '20141011',\n },\n }, {\n 'url': 'http://video.mapleleafs.nhl.com/videocenter/console?id=58665&catid=802',\n 'md5': 'c78fc64ea01777e426cfc202b746c825',\n 'info_dict': {\n 'id': '58665',\n 'ext': 'flv',\n 'title': 'Classic Game In Six - April 22, 1979',\n 'description': 'It was the last playoff game for the Leafs in the decade, and the last time the Leafs and Habs played in the playoffs. Great game, not a great ending.',\n 'duration': 400,\n 'upload_date': '20100129'\n },\n }, {\n 'url': 'http://video.flames.nhl.com/videocenter/console?id=630616',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n json_url = 'http://video.nhl.com/videocenter/servlets/playlist?ids=%s&format=json' % video_id\n data = self._download_json(\n json_url, video_id, transform_source=self._fix_json)\n return self._extract_video(data[0])\n\n\nclass NHLVideocenterIE(NHLBaseInfoExtractor):\n IE_NAME = 'nhl.com:videocenter'\n IE_DESC = 'NHL videocenter category'\n _VALID_URL = r'https?://video\\.(?P<team>[^.]*)\\.nhl\\.com/videocenter/(console\\?.*?catid=(?P<catid>[0-9]+)(?![&?]id=).*?)?$'\n _TEST = {\n 'url': 'http://video.canucks.nhl.com/videocenter/console?catid=999',\n 'info_dict': {\n 'id': '999',\n 'title': 'Highlights',\n },\n 'playlist_count': 12,\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n team = mobj.group('team')\n webpage = self._download_webpage(url, team)\n cat_id = self._search_regex(\n [r'var defaultCatId = \"(.+?)\";',\n r'{statusIndex:0,index:0,.*?id:(.*?),'],\n webpage, 'category id')\n playlist_title = self._html_search_regex(\n r'tab0\"[^>]*?>(.*?)</td>',\n webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()\n\n data = compat_urllib_parse.urlencode({\n 'cid': cat_id,\n # This is the default value\n 'count': 12,\n 'ptrs': 3,\n 'format': 'json',\n })\n path = '/videocenter/servlets/browse?' + data\n request_url = compat_urlparse.urljoin(url, path)\n response = self._download_webpage(request_url, playlist_title)\n response = self._fix_json(response)\n if not response.strip():\n self._downloader.report_warning('Got an empty reponse, trying '\n 'adding the \"newvideos\" parameter')\n response = self._download_webpage(request_url + '&newvideos=true',\n playlist_title)\n response = self._fix_json(response)\n videos = json.loads(response)\n\n return {\n '_type': 'playlist',\n 'title': playlist_title,\n 'id': cat_id,\n 'entries': [self._extract_video(v) for v in videos],\n }\n", "path": "youtube_dl/extractor/nhl.py"}]}
| 2,873 | 548 |
gh_patches_debug_30818
|
rasdani/github-patches
|
git_diff
|
PennyLaneAI__pennylane-4663
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `qml.data.load()` fails when using 'full' parameter value
### Expected behavior
`qml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='2x2')`
Should return the Heisenberg dataset corresponding to the `open` periodicity, `rectangular` lattice, and `2x2` layout.
### Actual behavior
`qml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='2x2')`
Raises `ValueError: layout value of '2x2' is not available. Available values are ['1x16', '1x4', '1x8']`
### Additional information
_No response_
### Source code
```shell
qml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='1x4') # works
qml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='2x2') # fails
```
### Tracebacks
_No response_
### System information
```shell
Name: PennyLane
Version: 0.31.0.dev0
Summary: PennyLane is a Python quantum machine learning library by Xanadu Inc.
Home-page: https://github.com/XanaduAI/pennylane
Author:
Author-email:
License: Apache License 2.0
Location: .venv/lib/python3.8/site-packages
Editable project location:
Requires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml
Required-by: PennyLane-Lightning
Platform info: Linux-5.15.0-73-generic-x86_64-with-glibc2.29
Python version: 3.8.10
Numpy version: 1.23.5
Scipy version: 1.10.1
Installed devices:
- default.gaussian (PennyLane-0.31.0.dev0)
- default.mixed (PennyLane-0.31.0.dev0)
- default.qubit (PennyLane-0.31.0.dev0)
- default.qubit.autograd (PennyLane-0.31.0.dev0)
- default.qubit.jax (PennyLane-0.31.0.dev0)
- default.qubit.tf (PennyLane-0.31.0.dev0)
- default.qubit.torch (PennyLane-0.31.0.dev0)
- default.qutrit (PennyLane-0.31.0.dev0)
- null.qubit (PennyLane-0.31.0.dev0)
- lightning.qubit (PennyLane-Lightning-0.30.0)
```
### Existing GitHub issues
- [X] I have searched existing GitHub issues to make sure the issue does not already exist.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pennylane/data/data_manager/foldermap.py`
Content:
```
1 # Copyright 2018-2023 Xanadu Quantum Technologies Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """
15 Contains ``FolderMapView`` for reading the ``foldermap.json`` file in the
16 datasets bucket.
17 """
18
19
20 import typing
21 from collections.abc import Mapping
22 from pathlib import PurePosixPath
23 from typing import Any, List, Literal, Optional, Tuple, Union
24
25 from .params import Description, ParamArg, ParamVal
26
27
28 # Type for a dataset path, relative to the foldermap.json file
29 class DataPath(PurePosixPath):
30 """Type for Dataset Path, relative to the foldermap.json file."""
31
32 def __repr__(self) -> str:
33 return repr(str(self))
34
35
36 class FolderMapView(typing.Mapping[str, Union["FolderMapView", DataPath]]):
37 """Provides a read-only view of the ``foldermap.json`` file in
38 the datasets bucket. The folder map is a nested mapping of
39 dataset parameters to their path, relative to the ``foldermap.json``
40 file.
41
42 A dictionary in the folder map can optionally specify a default
43 paramater using the '__default' key. This view hides that
44 key, and allows the default parameter to be accessed.
45
46 For example, the underlying foldermap data will look like
47 this:
48
49 {
50 "__params": {
51 "qchem": ["molname", "basis", "bondlength"]
52 },
53 "qchem": {
54 "O2": {
55 "__default": "STO-3G",
56 "STO-3G": {
57 "__default": "0.5",
58 "0.5": "qchem/O2/STO-3G/0.5.h5",
59 "0.6": "qchem/O2/STO-3G/0.6.h5"
60 }
61 },
62 "H2": {
63 "__default": "STO-3G",
64 "STO-3G": {
65 "__default": "0.7",
66 "0.7": "qchem/H2/STO-3G/0.7.h5"
67 }
68 }
69 },
70 }
71
72 When accessed through ``FolderMapView``, the '__default' and '__params'
73 keys will be hidden.
74 """
75
76 __PRIVATE_KEYS = {"__default", "__params"}
77
78 def __init__(self, __curr_level: typing.Mapping[str, Any]) -> None:
79 """Initialize the mapping.
80
81 Args:
82 __data_struct: The top level foldermap
83 """
84 self.__curr_level = __curr_level
85
86 def get_default_key(self) -> Optional[str]:
87 """Get the default key for this level of the foldermap.
88 Raises a ValueError if it does not have a default.
89 """
90 return self.__curr_level.get("__default")
91
92 def find(
93 self,
94 data_name: str,
95 missing_default: Optional[ParamArg] = ParamArg.DEFAULT,
96 **params: Union[typing.Iterable[ParamVal], ParamArg],
97 ) -> List[Tuple[Description, DataPath]]:
98 """Returns a 2-tuple of dataset description and paths, for each dataset that
99 matches ``params``."""
100
101 try:
102 data_names_to_params = self.__curr_level["__params"]
103 except KeyError as exc:
104 raise RuntimeError("Can only call find() from top level of foldermap") from exc
105
106 try:
107 param_names: List[str] = data_names_to_params[data_name]
108 except KeyError as exc:
109 raise ValueError(f"No datasets with data name: '{data_name}'") from exc
110
111 curr: List[Tuple[Description, Union[FolderMapView, DataPath]]] = [
112 (Description(()), self[data_name])
113 ]
114 todo: List[Tuple[Description, Union[FolderMapView, DataPath]]] = []
115 done: List[Tuple[Description, DataPath]] = []
116
117 for param_name in param_names:
118 param_arg = params.get(param_name, missing_default)
119
120 while curr:
121 curr_description, curr_level = curr.pop()
122 if param_arg == ParamArg.FULL:
123 next_params = curr_level
124 elif param_arg == ParamArg.DEFAULT:
125 default = curr_level.get_default_key()
126 if default is None:
127 raise ValueError(f"No default available for parameter '{param_name}'")
128 next_params = (default,)
129 elif isinstance(param_arg, str):
130 next_params = (param_arg,)
131 else:
132 next_params = param_arg
133
134 try:
135 todo.extend(
136 (
137 Description((*curr_description.items(), (param_name, next_param))),
138 curr_level[next_param],
139 )
140 for next_param in next_params
141 )
142 except KeyError as exc:
143 raise ValueError(
144 f"{param_name} '{exc.args[0]}' is not available. Available values are: {list(curr_level)}"
145 ) from exc
146
147 curr, todo = todo, curr
148
149 done.extend(curr)
150
151 return done
152
153 def __getitem__(
154 self, __key: Union[str, Literal[ParamArg.DEFAULT]]
155 ) -> Union["FolderMapView", DataPath]:
156 """Gets the item with key. If key is ``ParamArg.DEFAULT``, return the
157 item under the default parameter, or raise a ``ValueError`` if no
158 default exists."""
159 if __key in self.__PRIVATE_KEYS:
160 raise KeyError(__key)
161
162 if __key == ParamArg.DEFAULT:
163 default = self.get_default_key()
164 if default is None:
165 raise ValueError("No default available")
166 return self[default]
167
168 elem = self.__curr_level[__key]
169 if isinstance(elem, Mapping):
170 return FolderMapView(elem)
171
172 return DataPath(elem)
173
174 def __iter__(self) -> typing.Iterator[str]:
175 return (key for key in self.__curr_level.keys() if key not in self.__PRIVATE_KEYS)
176
177 def keys(self) -> typing.FrozenSet[str]:
178 return frozenset(iter(self))
179
180 def __len__(self) -> int:
181 return sum(1 for _ in self.__iter__())
182
183 def __repr__(self) -> str:
184 return repr(dict(self))
185
186 def __str__(self) -> str:
187 return str(dict(self))
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pennylane/data/data_manager/foldermap.py b/pennylane/data/data_manager/foldermap.py
--- a/pennylane/data/data_manager/foldermap.py
+++ b/pennylane/data/data_manager/foldermap.py
@@ -119,6 +119,7 @@
while curr:
curr_description, curr_level = curr.pop()
+
if param_arg == ParamArg.FULL:
next_params = curr_level
elif param_arg == ParamArg.DEFAULT:
@@ -131,18 +132,29 @@
else:
next_params = param_arg
- try:
- todo.extend(
+ for next_param in next_params:
+ try:
+ fmap_next = curr_level[next_param]
+ except KeyError:
+ continue
+
+ todo.append(
(
Description((*curr_description.items(), (param_name, next_param))),
- curr_level[next_param],
+ fmap_next,
)
- for next_param in next_params
)
- except KeyError as exc:
- raise ValueError(
- f"{param_name} '{exc.args[0]}' is not available. Available values are: {list(curr_level)}"
- ) from exc
+
+ if len(todo) == 0:
+ # None of the parameters matched
+ param_arg_repr = (
+ repr([param_arg])
+ if isinstance(param_arg, (str, ParamArg))
+ else repr(list(param_arg))
+ )
+ raise ValueError(
+ f"{param_name} value(s) {param_arg_repr} are not available. Available values are: {list(curr_level)}"
+ )
curr, todo = todo, curr
|
{"golden_diff": "diff --git a/pennylane/data/data_manager/foldermap.py b/pennylane/data/data_manager/foldermap.py\n--- a/pennylane/data/data_manager/foldermap.py\n+++ b/pennylane/data/data_manager/foldermap.py\n@@ -119,6 +119,7 @@\n \n while curr:\n curr_description, curr_level = curr.pop()\n+\n if param_arg == ParamArg.FULL:\n next_params = curr_level\n elif param_arg == ParamArg.DEFAULT:\n@@ -131,18 +132,29 @@\n else:\n next_params = param_arg\n \n- try:\n- todo.extend(\n+ for next_param in next_params:\n+ try:\n+ fmap_next = curr_level[next_param]\n+ except KeyError:\n+ continue\n+\n+ todo.append(\n (\n Description((*curr_description.items(), (param_name, next_param))),\n- curr_level[next_param],\n+ fmap_next,\n )\n- for next_param in next_params\n )\n- except KeyError as exc:\n- raise ValueError(\n- f\"{param_name} '{exc.args[0]}' is not available. Available values are: {list(curr_level)}\"\n- ) from exc\n+\n+ if len(todo) == 0:\n+ # None of the parameters matched\n+ param_arg_repr = (\n+ repr([param_arg])\n+ if isinstance(param_arg, (str, ParamArg))\n+ else repr(list(param_arg))\n+ )\n+ raise ValueError(\n+ f\"{param_name} value(s) {param_arg_repr} are not available. Available values are: {list(curr_level)}\"\n+ )\n \n curr, todo = todo, curr\n", "issue": "[BUG] `qml.data.load()` fails when using 'full' parameter value\n### Expected behavior\r\n\r\n`qml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='2x2')`\r\nShould return the Heisenberg dataset corresponding to the `open` periodicity, `rectangular` lattice, and `2x2` layout.\r\n\r\n### Actual behavior\r\n\r\n`qml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='2x2')`\r\n\r\nRaises `ValueError: layout value of '2x2' is not available. Available values are ['1x16', '1x4', '1x8']`\r\n\r\n### Additional information\r\n\r\n_No response_\r\n\r\n### Source code\r\n\r\n```shell\r\nqml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='1x4') # works\r\n\r\nqml.data.load('qspin',sysname='Heisenberg',periodicity='open',lattice='full',layout='2x2') # fails\r\n```\r\n\r\n\r\n### Tracebacks\r\n\r\n_No response_\r\n\r\n### System information\r\n\r\n```shell\r\nName: PennyLane\r\nVersion: 0.31.0.dev0\r\nSummary: PennyLane is a Python quantum machine learning library by Xanadu Inc.\r\nHome-page: https://github.com/XanaduAI/pennylane\r\nAuthor: \r\nAuthor-email: \r\nLicense: Apache License 2.0\r\nLocation: .venv/lib/python3.8/site-packages\r\nEditable project location: \r\nRequires: appdirs, autograd, autoray, cachetools, networkx, numpy, pennylane-lightning, requests, rustworkx, scipy, semantic-version, toml\r\nRequired-by: PennyLane-Lightning\r\n\r\nPlatform info: Linux-5.15.0-73-generic-x86_64-with-glibc2.29\r\nPython version: 3.8.10\r\nNumpy version: 1.23.5\r\nScipy version: 1.10.1\r\nInstalled devices:\r\n- default.gaussian (PennyLane-0.31.0.dev0)\r\n- default.mixed (PennyLane-0.31.0.dev0)\r\n- default.qubit (PennyLane-0.31.0.dev0)\r\n- default.qubit.autograd (PennyLane-0.31.0.dev0)\r\n- default.qubit.jax (PennyLane-0.31.0.dev0)\r\n- default.qubit.tf (PennyLane-0.31.0.dev0)\r\n- default.qubit.torch (PennyLane-0.31.0.dev0)\r\n- default.qutrit (PennyLane-0.31.0.dev0)\r\n- null.qubit (PennyLane-0.31.0.dev0)\r\n- lightning.qubit (PennyLane-Lightning-0.30.0)\r\n```\r\n\r\n\r\n### Existing GitHub issues\r\n\r\n- [X] I have searched existing GitHub issues to make sure the issue does not already exist.\n", "before_files": [{"content": "# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nContains ``FolderMapView`` for reading the ``foldermap.json`` file in the\ndatasets bucket.\n\"\"\"\n\n\nimport typing\nfrom collections.abc import Mapping\nfrom pathlib import PurePosixPath\nfrom typing import Any, List, Literal, Optional, Tuple, Union\n\nfrom .params import Description, ParamArg, ParamVal\n\n\n# Type for a dataset path, relative to the foldermap.json file\nclass DataPath(PurePosixPath):\n \"\"\"Type for Dataset Path, relative to the foldermap.json file.\"\"\"\n\n def __repr__(self) -> str:\n return repr(str(self))\n\n\nclass FolderMapView(typing.Mapping[str, Union[\"FolderMapView\", DataPath]]):\n \"\"\"Provides a read-only view of the ``foldermap.json`` file in\n the datasets bucket. The folder map is a nested mapping of\n dataset parameters to their path, relative to the ``foldermap.json``\n file.\n\n A dictionary in the folder map can optionally specify a default\n paramater using the '__default' key. This view hides that\n key, and allows the default parameter to be accessed.\n\n For example, the underlying foldermap data will look like\n this:\n\n {\n \"__params\": {\n \"qchem\": [\"molname\", \"basis\", \"bondlength\"]\n },\n \"qchem\": {\n \"O2\": {\n \"__default\": \"STO-3G\",\n \"STO-3G\": {\n \"__default\": \"0.5\",\n \"0.5\": \"qchem/O2/STO-3G/0.5.h5\",\n \"0.6\": \"qchem/O2/STO-3G/0.6.h5\"\n }\n },\n \"H2\": {\n \"__default\": \"STO-3G\",\n \"STO-3G\": {\n \"__default\": \"0.7\",\n \"0.7\": \"qchem/H2/STO-3G/0.7.h5\"\n }\n }\n },\n }\n\n When accessed through ``FolderMapView``, the '__default' and '__params'\n keys will be hidden.\n \"\"\"\n\n __PRIVATE_KEYS = {\"__default\", \"__params\"}\n\n def __init__(self, __curr_level: typing.Mapping[str, Any]) -> None:\n \"\"\"Initialize the mapping.\n\n Args:\n __data_struct: The top level foldermap\n \"\"\"\n self.__curr_level = __curr_level\n\n def get_default_key(self) -> Optional[str]:\n \"\"\"Get the default key for this level of the foldermap.\n Raises a ValueError if it does not have a default.\n \"\"\"\n return self.__curr_level.get(\"__default\")\n\n def find(\n self,\n data_name: str,\n missing_default: Optional[ParamArg] = ParamArg.DEFAULT,\n **params: Union[typing.Iterable[ParamVal], ParamArg],\n ) -> List[Tuple[Description, DataPath]]:\n \"\"\"Returns a 2-tuple of dataset description and paths, for each dataset that\n matches ``params``.\"\"\"\n\n try:\n data_names_to_params = self.__curr_level[\"__params\"]\n except KeyError as exc:\n raise RuntimeError(\"Can only call find() from top level of foldermap\") from exc\n\n try:\n param_names: List[str] = data_names_to_params[data_name]\n except KeyError as exc:\n raise ValueError(f\"No datasets with data name: '{data_name}'\") from exc\n\n curr: List[Tuple[Description, Union[FolderMapView, DataPath]]] = [\n (Description(()), self[data_name])\n ]\n todo: List[Tuple[Description, Union[FolderMapView, DataPath]]] = []\n done: List[Tuple[Description, DataPath]] = []\n\n for param_name in param_names:\n param_arg = params.get(param_name, missing_default)\n\n while curr:\n curr_description, curr_level = curr.pop()\n if param_arg == ParamArg.FULL:\n next_params = curr_level\n elif param_arg == ParamArg.DEFAULT:\n default = curr_level.get_default_key()\n if default is None:\n raise ValueError(f\"No default available for parameter '{param_name}'\")\n next_params = (default,)\n elif isinstance(param_arg, str):\n next_params = (param_arg,)\n else:\n next_params = param_arg\n\n try:\n todo.extend(\n (\n Description((*curr_description.items(), (param_name, next_param))),\n curr_level[next_param],\n )\n for next_param in next_params\n )\n except KeyError as exc:\n raise ValueError(\n f\"{param_name} '{exc.args[0]}' is not available. Available values are: {list(curr_level)}\"\n ) from exc\n\n curr, todo = todo, curr\n\n done.extend(curr)\n\n return done\n\n def __getitem__(\n self, __key: Union[str, Literal[ParamArg.DEFAULT]]\n ) -> Union[\"FolderMapView\", DataPath]:\n \"\"\"Gets the item with key. If key is ``ParamArg.DEFAULT``, return the\n item under the default parameter, or raise a ``ValueError`` if no\n default exists.\"\"\"\n if __key in self.__PRIVATE_KEYS:\n raise KeyError(__key)\n\n if __key == ParamArg.DEFAULT:\n default = self.get_default_key()\n if default is None:\n raise ValueError(\"No default available\")\n return self[default]\n\n elem = self.__curr_level[__key]\n if isinstance(elem, Mapping):\n return FolderMapView(elem)\n\n return DataPath(elem)\n\n def __iter__(self) -> typing.Iterator[str]:\n return (key for key in self.__curr_level.keys() if key not in self.__PRIVATE_KEYS)\n\n def keys(self) -> typing.FrozenSet[str]:\n return frozenset(iter(self))\n\n def __len__(self) -> int:\n return sum(1 for _ in self.__iter__())\n\n def __repr__(self) -> str:\n return repr(dict(self))\n\n def __str__(self) -> str:\n return str(dict(self))\n", "path": "pennylane/data/data_manager/foldermap.py"}], "after_files": [{"content": "# Copyright 2018-2023 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nContains ``FolderMapView`` for reading the ``foldermap.json`` file in the\ndatasets bucket.\n\"\"\"\n\n\nimport typing\nfrom collections.abc import Mapping\nfrom pathlib import PurePosixPath\nfrom typing import Any, List, Literal, Optional, Tuple, Union\n\nfrom .params import Description, ParamArg, ParamVal\n\n\n# Type for a dataset path, relative to the foldermap.json file\nclass DataPath(PurePosixPath):\n \"\"\"Type for Dataset Path, relative to the foldermap.json file.\"\"\"\n\n def __repr__(self) -> str:\n return repr(str(self))\n\n\nclass FolderMapView(typing.Mapping[str, Union[\"FolderMapView\", DataPath]]):\n \"\"\"Provides a read-only view of the ``foldermap.json`` file in\n the datasets bucket. The folder map is a nested mapping of\n dataset parameters to their path, relative to the ``foldermap.json``\n file.\n\n A dictionary in the folder map can optionally specify a default\n paramater using the '__default' key. This view hides that\n key, and allows the default parameter to be accessed.\n\n For example, the underlying foldermap data will look like\n this:\n\n {\n \"__params\": {\n \"qchem\": [\"molname\", \"basis\", \"bondlength\"]\n },\n \"qchem\": {\n \"O2\": {\n \"__default\": \"STO-3G\",\n \"STO-3G\": {\n \"__default\": \"0.5\",\n \"0.5\": \"qchem/O2/STO-3G/0.5.h5\",\n \"0.6\": \"qchem/O2/STO-3G/0.6.h5\"\n }\n },\n \"H2\": {\n \"__default\": \"STO-3G\",\n \"STO-3G\": {\n \"__default\": \"0.7\",\n \"0.7\": \"qchem/H2/STO-3G/0.7.h5\"\n }\n }\n },\n }\n\n When accessed through ``FolderMapView``, the '__default' and '__params'\n keys will be hidden.\n \"\"\"\n\n __PRIVATE_KEYS = {\"__default\", \"__params\"}\n\n def __init__(self, __curr_level: typing.Mapping[str, Any]) -> None:\n \"\"\"Initialize the mapping.\n\n Args:\n __data_struct: The top level foldermap\n \"\"\"\n self.__curr_level = __curr_level\n\n def get_default_key(self) -> Optional[str]:\n \"\"\"Get the default key for this level of the foldermap.\n Raises a ValueError if it does not have a default.\n \"\"\"\n return self.__curr_level.get(\"__default\")\n\n def find(\n self,\n data_name: str,\n missing_default: Optional[ParamArg] = ParamArg.DEFAULT,\n **params: Union[typing.Iterable[ParamVal], ParamArg],\n ) -> List[Tuple[Description, DataPath]]:\n \"\"\"Returns a 2-tuple of dataset description and paths, for each dataset that\n matches ``params``.\"\"\"\n\n try:\n data_names_to_params = self.__curr_level[\"__params\"]\n except KeyError as exc:\n raise RuntimeError(\"Can only call find() from top level of foldermap\") from exc\n\n try:\n param_names: List[str] = data_names_to_params[data_name]\n except KeyError as exc:\n raise ValueError(f\"No datasets with data name: '{data_name}'\") from exc\n\n curr: List[Tuple[Description, Union[FolderMapView, DataPath]]] = [\n (Description(()), self[data_name])\n ]\n todo: List[Tuple[Description, Union[FolderMapView, DataPath]]] = []\n done: List[Tuple[Description, DataPath]] = []\n\n for param_name in param_names:\n param_arg = params.get(param_name, missing_default)\n\n while curr:\n curr_description, curr_level = curr.pop()\n\n if param_arg == ParamArg.FULL:\n next_params = curr_level\n elif param_arg == ParamArg.DEFAULT:\n default = curr_level.get_default_key()\n if default is None:\n raise ValueError(f\"No default available for parameter '{param_name}'\")\n next_params = (default,)\n elif isinstance(param_arg, str):\n next_params = (param_arg,)\n else:\n next_params = param_arg\n\n for next_param in next_params:\n try:\n fmap_next = curr_level[next_param]\n except KeyError:\n continue\n\n todo.append(\n (\n Description((*curr_description.items(), (param_name, next_param))),\n fmap_next,\n )\n )\n\n if len(todo) == 0:\n # None of the parameters matched\n param_arg_repr = (\n repr([param_arg])\n if isinstance(param_arg, (str, ParamArg))\n else repr(list(param_arg))\n )\n raise ValueError(\n f\"{param_name} value(s) {param_arg_repr} are not available. Available values are: {list(curr_level)}\"\n )\n\n curr, todo = todo, curr\n\n done.extend(curr)\n\n return done\n\n def __getitem__(\n self, __key: Union[str, Literal[ParamArg.DEFAULT]]\n ) -> Union[\"FolderMapView\", DataPath]:\n \"\"\"Gets the item with key. If key is ``ParamArg.DEFAULT``, return the\n item under the default parameter, or raise a ``ValueError`` if no\n default exists.\"\"\"\n if __key in self.__PRIVATE_KEYS:\n raise KeyError(__key)\n\n if __key == ParamArg.DEFAULT:\n default = self.get_default_key()\n if default is None:\n raise ValueError(\"No default available\")\n return self[default]\n\n elem = self.__curr_level[__key]\n if isinstance(elem, Mapping):\n return FolderMapView(elem)\n\n return DataPath(elem)\n\n def __iter__(self) -> typing.Iterator[str]:\n return (key for key in self.__curr_level.keys() if key not in self.__PRIVATE_KEYS)\n\n def keys(self) -> typing.FrozenSet[str]:\n return frozenset(iter(self))\n\n def __len__(self) -> int:\n return sum(1 for _ in self.__iter__())\n\n def __repr__(self) -> str:\n return repr(dict(self))\n\n def __str__(self) -> str:\n return str(dict(self))\n", "path": "pennylane/data/data_manager/foldermap.py"}]}
| 2,851 | 375 |
gh_patches_debug_36019
|
rasdani/github-patches
|
git_diff
|
pytorch__TensorRT-905
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
✨[Feature] Enable debug logging with a context
**Is your feature request related to a problem? Please describe.**
<!--A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]-->
Right now seems like users don't know how or don't enable debug logging. We can probably add some syntax to make this easier.
**Describe the solution you'd like**
<!--A clear and concise description of what you want to happen.-->
I would love to see something like:
```py
import torch_tensorrt as torchtrt
with torchtrt.debug:
torchtrt.ts.compile(....)
```
under the hood this would be equivalent to:
```py
import torch_tensorrt as torchtrt
torchtrt.logging.set_reportable_log_level(torchtrt.logging.Level.Debug)
torchtrt.ts.compile(....)
torchtrt.logging.set_reportable_log_level(torchtrt.logging.Level.Error)
```
**Describe alternatives you've considered**
<!--A clear and concise description of any alternative solutions or features you've considered.-->
**Additional context**
<!--Add any other context or screenshots about the feature request here.-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/torch_tensorrt/logging.py`
Content:
```
1 from enum import Enum
2 from torch_tensorrt._C import _get_logging_prefix, _set_logging_prefix, \
3 _get_reportable_log_level, _set_reportable_log_level, \
4 _get_is_colored_output_on, _set_is_colored_output_on, \
5 _log, LogLevel
6
7
8 class Level(Enum):
9 """Enum to set the minimum required logging level to print a message to stdout
10 """
11 InternalError = LogLevel.INTERNAL_ERROR
12 Error = LogLevel.ERROR
13 Warning = LogLevel.WARNING
14 Info = LogLevel.INFO
15 Debug = LogLevel.DEBUG
16 Graph = LogLevel.GRAPH
17
18 @staticmethod
19 def _to_internal_level(external) -> LogLevel:
20 if external == Level.InternalError:
21 return LogLevel.INTERNAL_ERROR
22 if external == Level.Error:
23 return LogLevel.ERROR
24 if external == Level.Warning:
25 return LogLevel.WARNING
26 if external == Level.Info:
27 return LogLevel.INFO
28 if external == Level.Debug:
29 return LogLevel.DEBUG
30 if external == Level.Graph:
31 return LogLevel.GRAPH
32
33
34 def get_logging_prefix() -> str:
35 """Get the prefix set for logging messages
36
37 Returns:
38 str: Prefix used for logger
39 """
40 return _get_logging_prefix()
41
42
43 def set_logging_prefix(prefix: str):
44 """Set the prefix used when logging messages
45
46 Args:
47 prefix (str): Prefix to use for logging messages
48 """
49 _set_logging_prefix(prefix)
50
51
52 def get_reportable_log_level() -> Level:
53 """Get the level required for a message to be printed in the log
54
55 Returns:
56 torch_tensorrt.logging.Level: The enum representing the level required to print
57 """
58 return Level(_get_reportable_log_level())
59
60
61 def set_reportable_log_level(level: Level):
62 """Set the level required for a message to be printed to the log
63
64 Args:
65 level (torch_tensorrt.logging.Level): The enum representing the level required to print
66 """
67 _set_reportable_log_level(Level._to_internal_level(level))
68
69
70 def get_is_colored_output_on() -> bool:
71 """Get if colored output is enabled for logging
72
73 Returns:
74 bool: If colored output is one
75 """
76 return _get_is_colored_output_on()
77
78
79 def set_is_colored_output_on(colored_output_on: bool):
80 """Enable or disable color in the log output
81
82 Args:
83 colored_output_on (bool): If colored output should be enabled or not
84 """
85 _set_is_colored_output_on(colored_output_on)
86
87
88 def log(level: Level, msg: str):
89 """Add a new message to the log
90
91 Adds a new message to the log at a specified level. The message
92 will only get printed out if Level > reportable_log_level
93
94 Args:
95 level (torch_tensorrt.logging.Level): Severity of the message
96 msg (str): Actual message text
97 """
98 _log(Level._to_internal_level(level), msg)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/py/torch_tensorrt/logging.py b/py/torch_tensorrt/logging.py
--- a/py/torch_tensorrt/logging.py
+++ b/py/torch_tensorrt/logging.py
@@ -96,3 +96,113 @@
msg (str): Actual message text
"""
_log(Level._to_internal_level(level), msg)
+
+ InternalError = LogLevel.INTERNAL_ERROR
+ Error = LogLevel.ERROR
+ Warning = LogLevel.WARNING
+ Info = LogLevel.INFO
+ Debug = LogLevel.DEBUG
+ Graph = LogLevel.GRAPH
+
+
+class internal_errors:
+ """Context-manager to limit displayed log messages to just internal errors
+
+ Example::
+
+ with torch_tensorrt.logging.internal_errors():
+ outputs = model_torchtrt(inputs)
+ """
+
+ def __enter__(self):
+ self.external_lvl = get_reportable_log_level()
+ set_reportable_log_level(Level.InternalError)
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ set_reportable_log_level(self.external_lvl)
+
+
+class errors:
+ """Context-manager to limit displayed log messages to just errors and above
+
+ Example::
+
+ with torch_tensorrt.logging.errors():
+ outputs = model_torchtrt(inputs)
+ """
+
+ def __enter__(self):
+ self.external_lvl = get_reportable_log_level()
+ set_reportable_log_level(Level.Error)
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ set_reportable_log_level(self.external_lvl)
+
+
+class warnings:
+ """Context-manager to limit displayed log messages to just warnings and above
+
+ Example::
+
+ with torch_tensorrt.logging.warnings():
+ model_trt = torch_tensorrt.compile(model, **spec)
+ """
+
+ def __enter__(self):
+ self.external_lvl = get_reportable_log_level()
+ set_reportable_log_level(Level.Warning)
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ set_reportable_log_level(self.external_lvl)
+
+
+class info:
+ """Context-manager to display all info and greater severity messages
+
+ Example::
+
+ with torch_tensorrt.logging.info():
+ model_trt = torch_tensorrt.compile(model, **spec)
+ """
+
+ def __enter__(self):
+ self.external_lvl = get_reportable_log_level()
+ set_reportable_log_level(Level.Info)
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ set_reportable_log_level(self.external_lvl)
+
+
+class debug:
+ """Context-manager to display full debug information through the logger
+
+ Example::
+
+ with torch_tensorrt.logging.debug():
+ model_trt = torch_tensorrt.compile(model, **spec)
+ """
+
+ def __enter__(self):
+ self.external_lvl = get_reportable_log_level()
+ set_reportable_log_level(Level.Debug)
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ set_reportable_log_level(self.external_lvl)
+
+
+class graphs:
+ """Context-manager to display the results of intermediate lowering passes
+ as well as full debug information through the logger
+
+ Example::
+
+ with torch_tensorrt.logging.graphs():
+ model_trt = torch_tensorrt.compile(model, **spec)
+ """
+
+ def __enter__(self):
+ self.external_lvl = get_reportable_log_level()
+ set_reportable_log_level(Level.Graph)
+
+ def __exit__(self, exc_type, exc_value, exc_tb):
+ set_reportable_log_level(self.external_lvl)
|
{"golden_diff": "diff --git a/py/torch_tensorrt/logging.py b/py/torch_tensorrt/logging.py\n--- a/py/torch_tensorrt/logging.py\n+++ b/py/torch_tensorrt/logging.py\n@@ -96,3 +96,113 @@\n msg (str): Actual message text\n \"\"\"\n _log(Level._to_internal_level(level), msg)\n+\n+ InternalError = LogLevel.INTERNAL_ERROR\n+ Error = LogLevel.ERROR\n+ Warning = LogLevel.WARNING\n+ Info = LogLevel.INFO\n+ Debug = LogLevel.DEBUG\n+ Graph = LogLevel.GRAPH\n+\n+\n+class internal_errors:\n+ \"\"\"Context-manager to limit displayed log messages to just internal errors\n+\n+ Example::\n+\n+ with torch_tensorrt.logging.internal_errors():\n+ outputs = model_torchtrt(inputs)\n+ \"\"\"\n+\n+ def __enter__(self):\n+ self.external_lvl = get_reportable_log_level()\n+ set_reportable_log_level(Level.InternalError)\n+\n+ def __exit__(self, exc_type, exc_value, exc_tb):\n+ set_reportable_log_level(self.external_lvl)\n+\n+\n+class errors:\n+ \"\"\"Context-manager to limit displayed log messages to just errors and above\n+\n+ Example::\n+\n+ with torch_tensorrt.logging.errors():\n+ outputs = model_torchtrt(inputs)\n+ \"\"\"\n+\n+ def __enter__(self):\n+ self.external_lvl = get_reportable_log_level()\n+ set_reportable_log_level(Level.Error)\n+\n+ def __exit__(self, exc_type, exc_value, exc_tb):\n+ set_reportable_log_level(self.external_lvl)\n+\n+\n+class warnings:\n+ \"\"\"Context-manager to limit displayed log messages to just warnings and above\n+\n+ Example::\n+\n+ with torch_tensorrt.logging.warnings():\n+ model_trt = torch_tensorrt.compile(model, **spec)\n+ \"\"\"\n+\n+ def __enter__(self):\n+ self.external_lvl = get_reportable_log_level()\n+ set_reportable_log_level(Level.Warning)\n+\n+ def __exit__(self, exc_type, exc_value, exc_tb):\n+ set_reportable_log_level(self.external_lvl)\n+\n+\n+class info:\n+ \"\"\"Context-manager to display all info and greater severity messages\n+\n+ Example::\n+\n+ with torch_tensorrt.logging.info():\n+ model_trt = torch_tensorrt.compile(model, **spec)\n+ \"\"\"\n+\n+ def __enter__(self):\n+ self.external_lvl = get_reportable_log_level()\n+ set_reportable_log_level(Level.Info)\n+\n+ def __exit__(self, exc_type, exc_value, exc_tb):\n+ set_reportable_log_level(self.external_lvl)\n+\n+\n+class debug:\n+ \"\"\"Context-manager to display full debug information through the logger\n+\n+ Example::\n+\n+ with torch_tensorrt.logging.debug():\n+ model_trt = torch_tensorrt.compile(model, **spec)\n+ \"\"\"\n+\n+ def __enter__(self):\n+ self.external_lvl = get_reportable_log_level()\n+ set_reportable_log_level(Level.Debug)\n+\n+ def __exit__(self, exc_type, exc_value, exc_tb):\n+ set_reportable_log_level(self.external_lvl)\n+\n+\n+class graphs:\n+ \"\"\"Context-manager to display the results of intermediate lowering passes\n+ as well as full debug information through the logger\n+\n+ Example::\n+\n+ with torch_tensorrt.logging.graphs():\n+ model_trt = torch_tensorrt.compile(model, **spec)\n+ \"\"\"\n+\n+ def __enter__(self):\n+ self.external_lvl = get_reportable_log_level()\n+ set_reportable_log_level(Level.Graph)\n+\n+ def __exit__(self, exc_type, exc_value, exc_tb):\n+ set_reportable_log_level(self.external_lvl)\n", "issue": "\u2728[Feature] Enable debug logging with a context \n**Is your feature request related to a problem? Please describe.**\r\n<!--A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]-->\r\n\r\nRight now seems like users don't know how or don't enable debug logging. We can probably add some syntax to make this easier.\r\n\r\n**Describe the solution you'd like**\r\n<!--A clear and concise description of what you want to happen.-->\r\n\r\nI would love to see something like: \r\n\r\n```py\r\nimport torch_tensorrt as torchtrt \r\n\r\nwith torchtrt.debug:\r\n torchtrt.ts.compile(....)\r\n\r\n```\r\n\r\nunder the hood this would be equivalent to:\r\n\r\n ```py\r\nimport torch_tensorrt as torchtrt \r\n\r\ntorchtrt.logging.set_reportable_log_level(torchtrt.logging.Level.Debug)\r\ntorchtrt.ts.compile(....)\r\ntorchtrt.logging.set_reportable_log_level(torchtrt.logging.Level.Error)\r\n```\r\n\r\n**Describe alternatives you've considered**\r\n<!--A clear and concise description of any alternative solutions or features you've considered.-->\r\n\r\n**Additional context**\r\n<!--Add any other context or screenshots about the feature request here.-->\r\n\n", "before_files": [{"content": "from enum import Enum\nfrom torch_tensorrt._C import _get_logging_prefix, _set_logging_prefix, \\\n _get_reportable_log_level, _set_reportable_log_level, \\\n _get_is_colored_output_on, _set_is_colored_output_on, \\\n _log, LogLevel\n\n\nclass Level(Enum):\n \"\"\"Enum to set the minimum required logging level to print a message to stdout\n \"\"\"\n InternalError = LogLevel.INTERNAL_ERROR\n Error = LogLevel.ERROR\n Warning = LogLevel.WARNING\n Info = LogLevel.INFO\n Debug = LogLevel.DEBUG\n Graph = LogLevel.GRAPH\n\n @staticmethod\n def _to_internal_level(external) -> LogLevel:\n if external == Level.InternalError:\n return LogLevel.INTERNAL_ERROR\n if external == Level.Error:\n return LogLevel.ERROR\n if external == Level.Warning:\n return LogLevel.WARNING\n if external == Level.Info:\n return LogLevel.INFO\n if external == Level.Debug:\n return LogLevel.DEBUG\n if external == Level.Graph:\n return LogLevel.GRAPH\n\n\ndef get_logging_prefix() -> str:\n \"\"\"Get the prefix set for logging messages\n\n Returns:\n str: Prefix used for logger\n \"\"\"\n return _get_logging_prefix()\n\n\ndef set_logging_prefix(prefix: str):\n \"\"\"Set the prefix used when logging messages\n\n Args:\n prefix (str): Prefix to use for logging messages\n \"\"\"\n _set_logging_prefix(prefix)\n\n\ndef get_reportable_log_level() -> Level:\n \"\"\"Get the level required for a message to be printed in the log\n\n Returns:\n torch_tensorrt.logging.Level: The enum representing the level required to print\n \"\"\"\n return Level(_get_reportable_log_level())\n\n\ndef set_reportable_log_level(level: Level):\n \"\"\"Set the level required for a message to be printed to the log\n\n Args:\n level (torch_tensorrt.logging.Level): The enum representing the level required to print\n \"\"\"\n _set_reportable_log_level(Level._to_internal_level(level))\n\n\ndef get_is_colored_output_on() -> bool:\n \"\"\"Get if colored output is enabled for logging\n\n Returns:\n bool: If colored output is one\n \"\"\"\n return _get_is_colored_output_on()\n\n\ndef set_is_colored_output_on(colored_output_on: bool):\n \"\"\"Enable or disable color in the log output\n\n Args:\n colored_output_on (bool): If colored output should be enabled or not\n \"\"\"\n _set_is_colored_output_on(colored_output_on)\n\n\ndef log(level: Level, msg: str):\n \"\"\"Add a new message to the log\n\n Adds a new message to the log at a specified level. The message\n will only get printed out if Level > reportable_log_level\n\n Args:\n level (torch_tensorrt.logging.Level): Severity of the message\n msg (str): Actual message text\n \"\"\"\n _log(Level._to_internal_level(level), msg)\n", "path": "py/torch_tensorrt/logging.py"}], "after_files": [{"content": "from enum import Enum\nfrom torch_tensorrt._C import _get_logging_prefix, _set_logging_prefix, \\\n _get_reportable_log_level, _set_reportable_log_level, \\\n _get_is_colored_output_on, _set_is_colored_output_on, \\\n _log, LogLevel\n\n\nclass Level(Enum):\n \"\"\"Enum to set the minimum required logging level to print a message to stdout\n \"\"\"\n InternalError = LogLevel.INTERNAL_ERROR\n Error = LogLevel.ERROR\n Warning = LogLevel.WARNING\n Info = LogLevel.INFO\n Debug = LogLevel.DEBUG\n Graph = LogLevel.GRAPH\n\n @staticmethod\n def _to_internal_level(external) -> LogLevel:\n if external == Level.InternalError:\n return LogLevel.INTERNAL_ERROR\n if external == Level.Error:\n return LogLevel.ERROR\n if external == Level.Warning:\n return LogLevel.WARNING\n if external == Level.Info:\n return LogLevel.INFO\n if external == Level.Debug:\n return LogLevel.DEBUG\n if external == Level.Graph:\n return LogLevel.GRAPH\n\n\ndef get_logging_prefix() -> str:\n \"\"\"Get the prefix set for logging messages\n\n Returns:\n str: Prefix used for logger\n \"\"\"\n return _get_logging_prefix()\n\n\ndef set_logging_prefix(prefix: str):\n \"\"\"Set the prefix used when logging messages\n\n Args:\n prefix (str): Prefix to use for logging messages\n \"\"\"\n _set_logging_prefix(prefix)\n\n\ndef get_reportable_log_level() -> Level:\n \"\"\"Get the level required for a message to be printed in the log\n\n Returns:\n torch_tensorrt.logging.Level: The enum representing the level required to print\n \"\"\"\n return Level(_get_reportable_log_level())\n\n\ndef set_reportable_log_level(level: Level):\n \"\"\"Set the level required for a message to be printed to the log\n\n Args:\n level (torch_tensorrt.logging.Level): The enum representing the level required to print\n \"\"\"\n _set_reportable_log_level(Level._to_internal_level(level))\n\n\ndef get_is_colored_output_on() -> bool:\n \"\"\"Get if colored output is enabled for logging\n\n Returns:\n bool: If colored output is one\n \"\"\"\n return _get_is_colored_output_on()\n\n\ndef set_is_colored_output_on(colored_output_on: bool):\n \"\"\"Enable or disable color in the log output\n\n Args:\n colored_output_on (bool): If colored output should be enabled or not\n \"\"\"\n _set_is_colored_output_on(colored_output_on)\n\n\ndef log(level: Level, msg: str):\n \"\"\"Add a new message to the log\n\n Adds a new message to the log at a specified level. The message\n will only get printed out if Level > reportable_log_level\n\n Args:\n level (torch_tensorrt.logging.Level): Severity of the message\n msg (str): Actual message text\n \"\"\"\n _log(Level._to_internal_level(level), msg)\n\n InternalError = LogLevel.INTERNAL_ERROR\n Error = LogLevel.ERROR\n Warning = LogLevel.WARNING\n Info = LogLevel.INFO\n Debug = LogLevel.DEBUG\n Graph = LogLevel.GRAPH\n\n\nclass internal_errors:\n \"\"\"Context-manager to limit displayed log messages to just internal errors\n\n Example::\n\n with torch_tensorrt.logging.internal_errors():\n outputs = model_torchtrt(inputs)\n \"\"\"\n\n def __enter__(self):\n self.external_lvl = get_reportable_log_level()\n set_reportable_log_level(Level.InternalError)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n set_reportable_log_level(self.external_lvl)\n\n\nclass errors:\n \"\"\"Context-manager to limit displayed log messages to just errors and above\n\n Example::\n\n with torch_tensorrt.logging.errors():\n outputs = model_torchtrt(inputs)\n \"\"\"\n\n def __enter__(self):\n self.external_lvl = get_reportable_log_level()\n set_reportable_log_level(Level.Error)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n set_reportable_log_level(self.external_lvl)\n\n\nclass warnings:\n \"\"\"Context-manager to limit displayed log messages to just warnings and above\n\n Example::\n\n with torch_tensorrt.logging.warnings():\n model_trt = torch_tensorrt.compile(model, **spec)\n \"\"\"\n\n def __enter__(self):\n self.external_lvl = get_reportable_log_level()\n set_reportable_log_level(Level.Warning)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n set_reportable_log_level(self.external_lvl)\n\n\nclass info:\n \"\"\"Context-manager to display all info and greater severity messages\n\n Example::\n\n with torch_tensorrt.logging.info():\n model_trt = torch_tensorrt.compile(model, **spec)\n \"\"\"\n\n def __enter__(self):\n self.external_lvl = get_reportable_log_level()\n set_reportable_log_level(Level.Info)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n set_reportable_log_level(self.external_lvl)\n\n\nclass debug:\n \"\"\"Context-manager to display full debug information through the logger\n\n Example::\n\n with torch_tensorrt.logging.debug():\n model_trt = torch_tensorrt.compile(model, **spec)\n \"\"\"\n\n def __enter__(self):\n self.external_lvl = get_reportable_log_level()\n set_reportable_log_level(Level.Debug)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n set_reportable_log_level(self.external_lvl)\n\n\nclass graphs:\n \"\"\"Context-manager to display the results of intermediate lowering passes\n as well as full debug information through the logger\n\n Example::\n\n with torch_tensorrt.logging.graphs():\n model_trt = torch_tensorrt.compile(model, **spec)\n \"\"\"\n\n def __enter__(self):\n self.external_lvl = get_reportable_log_level()\n set_reportable_log_level(Level.Graph)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n set_reportable_log_level(self.external_lvl)\n", "path": "py/torch_tensorrt/logging.py"}]}
| 1,330 | 816 |
gh_patches_debug_20562
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-13464
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pants package does not build missing docker images if previous build was cached.
**Describe the bug**
Pant's caching of build targets does not take into consideration that the final target does not exist.
Take this example: https://www.pantsbuild.org/v2.8/docs/docker#example
```
$ ./pants package src/docker/hw/Dockerfile
[...]
18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex
18:07:31.83 [INFO] Completed: Building docker image helloworld:latest
18:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
helloworld latest abcdefabcdef 6 seconds ago 420MB
$ docker rmi helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
$ ./pants package src/docker/hw/Dockerfile
19:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
```
If you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.
**Pants version**
2.8rc1
**OS**
Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/docker/util_rules/docker_binary.py`
Content:
```
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7 from typing import Mapping
8
9 from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
10 from pants.engine.fs import Digest
11 from pants.engine.process import (
12 BinaryNotFoundError,
13 BinaryPath,
14 BinaryPathRequest,
15 BinaryPaths,
16 BinaryPathTest,
17 Process,
18 SearchPath,
19 )
20 from pants.engine.rules import Get, collect_rules, rule
21 from pants.util.logging import LogLevel
22 from pants.util.strutil import pluralize
23
24
25 class DockerBinary(BinaryPath):
26 """The `docker` binary."""
27
28 DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
29
30 def build_image(
31 self,
32 tags: tuple[str, ...],
33 digest: Digest,
34 dockerfile: str | None = None,
35 build_args: DockerBuildArgs | None = None,
36 env: Mapping[str, str] | None = None,
37 ) -> Process:
38 args = [self.path, "build"]
39
40 for tag in tags:
41 args.extend(["-t", tag])
42
43 if build_args:
44 for build_arg in build_args:
45 args.extend(["--build-arg", build_arg])
46
47 if dockerfile:
48 args.extend(["-f", dockerfile])
49
50 # Add build context root.
51 args.append(".")
52
53 return Process(
54 argv=tuple(args),
55 description=(
56 f"Building docker image {tags[0]}"
57 + (f" +{pluralize(len(tags)-1, 'additional tag')}." if len(tags) > 1 else ".")
58 ),
59 env=env,
60 input_digest=digest,
61 )
62
63 def push_image(self, tags: tuple[str, ...]) -> Process | None:
64 if not tags:
65 return None
66
67 return Process(
68 argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
69 )
70
71
72 @dataclass(frozen=True)
73 class DockerBinaryRequest:
74 search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH
75
76
77 @rule(desc="Finding the `docker` binary", level=LogLevel.DEBUG)
78 async def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:
79 request = BinaryPathRequest(
80 binary_name="docker",
81 search_path=docker_request.search_path,
82 test=BinaryPathTest(args=["-v"]),
83 )
84 paths = await Get(BinaryPaths, BinaryPathRequest, request)
85 first_path = paths.first_path
86 if not first_path:
87 raise BinaryNotFoundError.from_request(request, rationale="interact with the docker daemon")
88 return DockerBinary(first_path.path, first_path.fingerprint)
89
90
91 @rule
92 async def get_docker() -> DockerBinary:
93 return await Get(DockerBinary, DockerBinaryRequest())
94
95
96 def rules():
97 return collect_rules()
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py
--- a/src/python/pants/backend/docker/util_rules/docker_binary.py
+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py
@@ -15,6 +15,7 @@
BinaryPaths,
BinaryPathTest,
Process,
+ ProcessCacheScope,
SearchPath,
)
from pants.engine.rules import Get, collect_rules, rule
@@ -58,6 +59,7 @@
),
env=env,
input_digest=digest,
+ cache_scope=ProcessCacheScope.PER_SESSION,
)
def push_image(self, tags: tuple[str, ...]) -> Process | None:
@@ -65,7 +67,9 @@
return None
return Process(
- argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
+ argv=(self.path, "push", *tags),
+ cache_scope=ProcessCacheScope.PER_SESSION,
+ description=f"Pushing docker image {tags[0]}",
)
|
{"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py\n--- a/src/python/pants/backend/docker/util_rules/docker_binary.py\n+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py\n@@ -15,6 +15,7 @@\n BinaryPaths,\n BinaryPathTest,\n Process,\n+ ProcessCacheScope,\n SearchPath,\n )\n from pants.engine.rules import Get, collect_rules, rule\n@@ -58,6 +59,7 @@\n ),\n env=env,\n input_digest=digest,\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n )\n \n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n@@ -65,7 +67,9 @@\n return None\n \n return Process(\n- argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n+ argv=(self.path, \"push\", *tags),\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n+ description=f\"Pushing docker image {tags[0]}\",\n )\n", "issue": "pants package does not build missing docker images if previous build was cached.\n**Describe the bug**\r\nPant's caching of build targets does not take into consideration that the final target does not exist.\r\n\r\nTake this example: https://www.pantsbuild.org/v2.8/docs/docker#example\r\n\r\n```\r\n$ ./pants package src/docker/hw/Dockerfile\r\n[...]\r\n18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex\r\n18:07:31.83 [INFO] Completed: Building docker image helloworld:latest\r\n18:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\nhelloworld latest abcdefabcdef 6 seconds ago 420MB\r\n\r\n$ docker rmi helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n\r\n$ ./pants package src/docker/hw/Dockerfile\r\n19:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n```\r\nIf you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.\r\n\r\n**Pants version**\r\n2.8rc1\r\n\r\n**OS**\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n ProcessCacheScope,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n cache_scope=ProcessCacheScope.PER_SESSION,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Pushing docker image {tags[0]}\",\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}]}
| 1,400 | 245 |
gh_patches_debug_33920
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-5988
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Please cut a release of Cloud Asset
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `asset/setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20 name = 'google-cloud-cloudasset'
21 description = 'Cloud Asset API API client library'
22 version = '0.1.0'
23 release_status = '3 - Alpha'
24 dependencies = [
25 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',
26 'enum34; python_version < "3.4"',
27 'grpc-google-iam-v1<0.12dev,>=0.11.4',
28 ]
29
30 package_root = os.path.abspath(os.path.dirname(__file__))
31
32 readme_filename = os.path.join(package_root, 'README.rst')
33 with io.open(readme_filename, encoding='utf-8') as readme_file:
34 readme = readme_file.read()
35
36 packages = [
37 package for package in setuptools.find_packages()
38 if package.startswith('google')
39 ]
40
41 namespaces = ['google']
42 if 'google.cloud' in packages:
43 namespaces.append('google.cloud')
44
45 setuptools.setup(
46 name=name,
47 version=version,
48 description=description,
49 long_description=readme,
50 author='Google LLC',
51 author_email='[email protected]',
52 license='Apache 2.0',
53 url='https://github.com/GoogleCloudPlatform/google-cloud-python',
54 classifiers=[
55 release_status,
56 'Intended Audience :: Developers',
57 'License :: OSI Approved :: Apache Software License',
58 'Programming Language :: Python',
59 'Programming Language :: Python :: 2',
60 'Programming Language :: Python :: 2.7',
61 'Programming Language :: Python :: 3',
62 'Programming Language :: Python :: 3.4',
63 'Programming Language :: Python :: 3.5',
64 'Programming Language :: Python :: 3.6',
65 'Operating System :: OS Independent',
66 'Topic :: Internet',
67 ],
68 platforms='Posix; MacOS X; Windows',
69 packages=packages,
70 namespace_packages=namespaces,
71 install_requires=dependencies,
72 include_package_data=True,
73 zip_safe=False,
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/asset/setup.py b/asset/setup.py
--- a/asset/setup.py
+++ b/asset/setup.py
@@ -17,27 +17,38 @@
import setuptools
+# Package metadata.
+
name = 'google-cloud-cloudasset'
description = 'Cloud Asset API API client library'
version = '0.1.0'
-release_status = '3 - Alpha'
+# Should be one of:
+# 'Development Status :: 3 - Alpha'
+# 'Development Status :: 4 - Beta'
+# 'Development Status :: 5 - Production/Stable'
+release_status = 'Development Status :: 3 - Alpha'
dependencies = [
'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',
'enum34; python_version < "3.4"',
'grpc-google-iam-v1<0.12dev,>=0.11.4',
]
+# Setup boilerplate below this line.
+
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, 'README.rst')
with io.open(readme_filename, encoding='utf-8') as readme_file:
readme = readme_file.read()
+# Only include packages under the 'google' namespace. Do not include tests,
+# benchmarks, etc.
packages = [
package for package in setuptools.find_packages()
if package.startswith('google')
]
+# Determine which namespaces are needed.
namespaces = ['google']
if 'google.cloud' in packages:
namespaces.append('google.cloud')
@@ -59,9 +70,9 @@
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent',
'Topic :: Internet',
],
|
{"golden_diff": "diff --git a/asset/setup.py b/asset/setup.py\n--- a/asset/setup.py\n+++ b/asset/setup.py\n@@ -17,27 +17,38 @@\n \n import setuptools\n \n+# Package metadata.\n+\n name = 'google-cloud-cloudasset'\n description = 'Cloud Asset API API client library'\n version = '0.1.0'\n-release_status = '3 - Alpha'\n+# Should be one of:\n+# 'Development Status :: 3 - Alpha'\n+# 'Development Status :: 4 - Beta'\n+# 'Development Status :: 5 - Production/Stable'\n+release_status = 'Development Status :: 3 - Alpha'\n dependencies = [\n 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',\n 'enum34; python_version < \"3.4\"',\n 'grpc-google-iam-v1<0.12dev,>=0.11.4',\n ]\n \n+# Setup boilerplate below this line.\n+\n package_root = os.path.abspath(os.path.dirname(__file__))\n \n readme_filename = os.path.join(package_root, 'README.rst')\n with io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n \n+# Only include packages under the 'google' namespace. Do not include tests,\n+# benchmarks, etc.\n packages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')\n ]\n \n+# Determine which namespaces are needed.\n namespaces = ['google']\n if 'google.cloud' in packages:\n namespaces.append('google.cloud')\n@@ -59,9 +70,9 @@\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n", "issue": "Please cut a release of Cloud Asset\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\nname = 'google-cloud-cloudasset'\ndescription = 'Cloud Asset API API client library'\nversion = '0.1.0'\nrelease_status = '3 - Alpha'\ndependencies = [\n 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',\n 'enum34; python_version < \"3.4\"',\n 'grpc-google-iam-v1<0.12dev,>=0.11.4',\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')\n]\n\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "asset/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n# Package metadata.\n\nname = 'google-cloud-cloudasset'\ndescription = 'Cloud Asset API API client library'\nversion = '0.1.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 3 - Alpha'\ndependencies = [\n 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',\n 'enum34; python_version < \"3.4\"',\n 'grpc-google-iam-v1<0.12dev,>=0.11.4',\n]\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')\n]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "asset/setup.py"}]}
| 966 | 444 |
gh_patches_debug_6963
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-1193
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wandb logger doesn't upload saved model checkpoint for final epoch
## 🐛 Bug
When training a model on the TPU and using the wandb logger, the checkpoint for the last epoch trained doesn't get uploaded to wandb.
### To Reproduce
Colab notebook: https://colab.research.google.com/drive/1oPaRWGZcz6YEol012xFADN42LV-jowtT
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/loggers/wandb.py`
Content:
```
1 r"""
2
3 .. _wandb:
4
5 WandbLogger
6 -------------
7 """
8 import os
9 from argparse import Namespace
10 from typing import Optional, List, Dict, Union, Any
11
12 import torch.nn as nn
13
14 try:
15 import wandb
16 from wandb.wandb_run import Run
17 except ImportError: # pragma: no-cover
18 raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover
19 ' install it with `pip install wandb`.')
20
21 from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only
22
23
24 class WandbLogger(LightningLoggerBase):
25 """
26 Logger for `W&B <https://www.wandb.com/>`_.
27
28 Args:
29 name (str): display name for the run.
30 save_dir (str): path where data is saved.
31 offline (bool): run offline (data can be streamed later to wandb servers).
32 id or version (str): sets the version, mainly used to resume a previous run.
33 anonymous (bool): enables or explicitly disables anonymous logging.
34 project (str): the name of the project to which this run will belong.
35 tags (list of str): tags associated with this run.
36
37 Example
38 --------
39 .. code-block:: python
40
41 from pytorch_lightning.loggers import WandbLogger
42 from pytorch_lightning import Trainer
43
44 wandb_logger = WandbLogger()
45 trainer = Trainer(logger=wandb_logger)
46 """
47
48 def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None,
49 offline: bool = False, id: Optional[str] = None, anonymous: bool = False,
50 version: Optional[str] = None, project: Optional[str] = None,
51 tags: Optional[List[str]] = None, experiment=None, entity=None):
52 super().__init__()
53 self._name = name
54 self._save_dir = save_dir
55 self._anonymous = 'allow' if anonymous else None
56 self._id = version or id
57 self._tags = tags
58 self._project = project
59 self._experiment = experiment
60 self._offline = offline
61 self._entity = entity
62
63 def __getstate__(self):
64 state = self.__dict__.copy()
65 # cannot be pickled
66 state['_experiment'] = None
67 # args needed to reload correct experiment
68 state['_id'] = self.experiment.id
69 return state
70
71 @property
72 def experiment(self) -> Run:
73 r"""
74
75 Actual wandb object. To use wandb features do the following.
76
77 Example::
78
79 self.logger.experiment.some_wandb_function()
80
81 """
82 if self._experiment is None:
83 if self._offline:
84 os.environ['WANDB_MODE'] = 'dryrun'
85 self._experiment = wandb.init(
86 name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,
87 id=self._id, resume='allow', tags=self._tags, entity=self._entity)
88 return self._experiment
89
90 def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):
91 wandb.watch(model, log=log, log_freq=log_freq)
92
93 @rank_zero_only
94 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
95 params = self._convert_params(params)
96 self.experiment.config.update(params)
97
98 @rank_zero_only
99 def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
100 if step is not None:
101 metrics['global_step'] = step
102 self.experiment.log(metrics)
103
104 @rank_zero_only
105 def finalize(self, status: str = 'success') -> None:
106 try:
107 exit_code = 0 if status == 'success' else 1
108 wandb.join(exit_code)
109 except TypeError:
110 wandb.join()
111
112 @property
113 def name(self) -> str:
114 return self.experiment.project_name()
115
116 @property
117 def version(self) -> str:
118 return self.experiment.id
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py
--- a/pytorch_lightning/loggers/wandb.py
+++ b/pytorch_lightning/loggers/wandb.py
@@ -101,14 +101,6 @@
metrics['global_step'] = step
self.experiment.log(metrics)
- @rank_zero_only
- def finalize(self, status: str = 'success') -> None:
- try:
- exit_code = 0 if status == 'success' else 1
- wandb.join(exit_code)
- except TypeError:
- wandb.join()
-
@property
def name(self) -> str:
return self.experiment.project_name()
|
{"golden_diff": "diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py\n--- a/pytorch_lightning/loggers/wandb.py\n+++ b/pytorch_lightning/loggers/wandb.py\n@@ -101,14 +101,6 @@\n metrics['global_step'] = step\n self.experiment.log(metrics)\n \n- @rank_zero_only\n- def finalize(self, status: str = 'success') -> None:\n- try:\n- exit_code = 0 if status == 'success' else 1\n- wandb.join(exit_code)\n- except TypeError:\n- wandb.join()\n-\n @property\n def name(self) -> str:\n return self.experiment.project_name()\n", "issue": "Wandb logger doesn't upload saved model checkpoint for final epoch\n## \ud83d\udc1b Bug\r\n\r\nWhen training a model on the TPU and using the wandb logger, the checkpoint for the last epoch trained doesn't get uploaded to wandb.\r\n\r\n### To Reproduce\r\n\r\nColab notebook: https://colab.research.google.com/drive/1oPaRWGZcz6YEol012xFADN42LV-jowtT\n", "before_files": [{"content": "r\"\"\"\n\n.. _wandb:\n\nWandbLogger\n-------------\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, List, Dict, Union, Any\n\nimport torch.nn as nn\n\ntry:\n import wandb\n from wandb.wandb_run import Run\nexcept ImportError: # pragma: no-cover\n raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover\n ' install it with `pip install wandb`.')\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Logger for `W&B <https://www.wandb.com/>`_.\n\n Args:\n name (str): display name for the run.\n save_dir (str): path where data is saved.\n offline (bool): run offline (data can be streamed later to wandb servers).\n id or version (str): sets the version, mainly used to resume a previous run.\n anonymous (bool): enables or explicitly disables anonymous logging.\n project (str): the name of the project to which this run will belong.\n tags (list of str): tags associated with this run.\n\n Example\n --------\n .. code-block:: python\n\n from pytorch_lightning.loggers import WandbLogger\n from pytorch_lightning import Trainer\n\n wandb_logger = WandbLogger()\n trainer = Trainer(logger=wandb_logger)\n \"\"\"\n\n def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None,\n offline: bool = False, id: Optional[str] = None, anonymous: bool = False,\n version: Optional[str] = None, project: Optional[str] = None,\n tags: Optional[List[str]] = None, experiment=None, entity=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = 'allow' if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = experiment\n self._offline = offline\n self._entity = entity\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # cannot be pickled\n state['_experiment'] = None\n # args needed to reload correct experiment\n state['_id'] = self.experiment.id\n return state\n\n @property\n def experiment(self) -> Run:\n r\"\"\"\n\n Actual wandb object. To use wandb features do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n id=self._id, resume='allow', tags=self._tags, entity=self._entity)\n return self._experiment\n\n def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):\n wandb.watch(model, log=log, log_freq=log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n self.experiment.config.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n if step is not None:\n metrics['global_step'] = step\n self.experiment.log(metrics)\n\n @rank_zero_only\n def finalize(self, status: str = 'success') -> None:\n try:\n exit_code = 0 if status == 'success' else 1\n wandb.join(exit_code)\n except TypeError:\n wandb.join()\n\n @property\n def name(self) -> str:\n return self.experiment.project_name()\n\n @property\n def version(self) -> str:\n return self.experiment.id\n", "path": "pytorch_lightning/loggers/wandb.py"}], "after_files": [{"content": "r\"\"\"\n\n.. _wandb:\n\nWandbLogger\n-------------\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, List, Dict, Union, Any\n\nimport torch.nn as nn\n\ntry:\n import wandb\n from wandb.wandb_run import Run\nexcept ImportError: # pragma: no-cover\n raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover\n ' install it with `pip install wandb`.')\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Logger for `W&B <https://www.wandb.com/>`_.\n\n Args:\n name (str): display name for the run.\n save_dir (str): path where data is saved.\n offline (bool): run offline (data can be streamed later to wandb servers).\n id or version (str): sets the version, mainly used to resume a previous run.\n anonymous (bool): enables or explicitly disables anonymous logging.\n project (str): the name of the project to which this run will belong.\n tags (list of str): tags associated with this run.\n\n Example\n --------\n .. code-block:: python\n\n from pytorch_lightning.loggers import WandbLogger\n from pytorch_lightning import Trainer\n\n wandb_logger = WandbLogger()\n trainer = Trainer(logger=wandb_logger)\n \"\"\"\n\n def __init__(self, name: Optional[str] = None, save_dir: Optional[str] = None,\n offline: bool = False, id: Optional[str] = None, anonymous: bool = False,\n version: Optional[str] = None, project: Optional[str] = None,\n tags: Optional[List[str]] = None, experiment=None, entity=None):\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = 'allow' if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = experiment\n self._offline = offline\n self._entity = entity\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # cannot be pickled\n state['_experiment'] = None\n # args needed to reload correct experiment\n state['_id'] = self.experiment.id\n return state\n\n @property\n def experiment(self) -> Run:\n r\"\"\"\n\n Actual wandb object. To use wandb features do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n id=self._id, resume='allow', tags=self._tags, entity=self._entity)\n return self._experiment\n\n def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):\n wandb.watch(model, log=log, log_freq=log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n self.experiment.config.update(params)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n if step is not None:\n metrics['global_step'] = step\n self.experiment.log(metrics)\n\n @property\n def name(self) -> str:\n return self.experiment.project_name()\n\n @property\n def version(self) -> str:\n return self.experiment.id\n", "path": "pytorch_lightning/loggers/wandb.py"}]}
| 1,522 | 168 |
gh_patches_debug_36338
|
rasdani/github-patches
|
git_diff
|
kedro-org__kedro-3199
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `--conf-source` option to `kedro ipython`
## Description
Right now we can pass `env` and `params` as options with `kedro ipython` / `%reload_kedro`. Add `--conf-source` option as well.
## Context
User question from slack :
> Speaking of kedro ipython :
Is there a way to specify /override the conf_source ? :slightly_smiling_face:
> My problem comes from the fact that we have a single pipeline / repo for all our clients.
To better enforce “tenant isolation”
I have organized conf and data as follow:
conf
└── client_A
├── base
└── local
└── client_B
├── base
└── local
data
├── client_A
│ └── 01_raw
│ └── 02_intermediate
│ └── ...
└── client_B
│ └── 01_raw
│ └── 02_intermediate
│ └── ...conf
I did so because I liked the idea of not being able to run the pipeline without being explicit about which client it should be done for…
(the structure above makes it so that kedro run without --conf-source will raise an error)
Another reason I did so (if I recall correctly) was to avoid having “duplicate keys” conflicts in the different yaml files across clients…
## Possible Implementation
Changes to be made in `kedro/ipython/__init__.py`
PR for when `--conf-source` was added to `kedro run` - https://github.com/kedro-org/kedro/pull/2117
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kedro/ipython/__init__.py`
Content:
```
1 """
2 This script creates an IPython extension to load Kedro-related variables in
3 local scope.
4 """
5 from __future__ import annotations
6
7 import logging
8 import sys
9 from pathlib import Path
10 from typing import Any
11
12 from IPython import get_ipython
13 from IPython.core.magic import needs_local_scope, register_line_magic
14 from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
15
16 from kedro.framework.cli import load_entry_points
17 from kedro.framework.cli.project import PARAMS_ARG_HELP
18 from kedro.framework.cli.utils import ENV_HELP, _split_params
19 from kedro.framework.project import (
20 LOGGING, # noqa
21 configure_project,
22 pipelines,
23 )
24 from kedro.framework.session import KedroSession
25 from kedro.framework.startup import _is_project, bootstrap_project
26
27 logger = logging.getLogger(__name__)
28
29
30 def load_ipython_extension(ipython):
31 """
32 Main entry point when %load_ext kedro.ipython is executed, either manually or
33 automatically through `kedro ipython` or `kedro jupyter lab/notebook`.
34 IPython will look for this function specifically.
35 See https://ipython.readthedocs.io/en/stable/config/extensions/index.html
36 """
37 ipython.register_magic_function(magic_reload_kedro, magic_name="reload_kedro")
38
39 if _find_kedro_project(Path.cwd()) is None:
40 logger.warning(
41 "Kedro extension was registered but couldn't find a Kedro project. "
42 "Make sure you run '%reload_kedro <project_root>'."
43 )
44 return
45
46 reload_kedro()
47
48
49 @needs_local_scope
50 @magic_arguments()
51 @argument(
52 "path",
53 type=str,
54 help=(
55 "Path to the project root directory. If not given, use the previously set"
56 "project root."
57 ),
58 nargs="?",
59 default=None,
60 )
61 @argument("-e", "--env", type=str, default=None, help=ENV_HELP)
62 @argument(
63 "--params",
64 type=lambda value: _split_params(None, None, value),
65 default=None,
66 help=PARAMS_ARG_HELP,
67 )
68 def magic_reload_kedro(line: str, local_ns: dict[str, Any] = None):
69 """
70 The `%reload_kedro` IPython line magic.
71 See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic # noqa: line-too-long
72 for more.
73 """
74 args = parse_argstring(magic_reload_kedro, line)
75 reload_kedro(args.path, args.env, args.params, local_ns)
76
77
78 def reload_kedro(
79 path: str = None,
80 env: str = None,
81 extra_params: dict[str, Any] = None,
82 local_namespace: dict[str, Any] | None = None,
83 ) -> None: # pragma: no cover
84 """Function that underlies the %reload_kedro Line magic. This should not be imported
85 or run directly but instead invoked through %reload_kedro."""
86
87 project_path = _resolve_project_path(path, local_namespace)
88
89 metadata = bootstrap_project(project_path)
90 _remove_cached_modules(metadata.package_name)
91 configure_project(metadata.package_name)
92
93 session = KedroSession.create(
94 metadata.package_name, project_path, env=env, extra_params=extra_params
95 )
96 context = session.load_context()
97 catalog = context.catalog
98
99 get_ipython().push(
100 variables={
101 "context": context,
102 "catalog": catalog,
103 "session": session,
104 "pipelines": pipelines,
105 }
106 )
107
108 logger.info("Kedro project %s", str(metadata.project_name))
109 logger.info(
110 "Defined global variable 'context', 'session', 'catalog' and 'pipelines'"
111 )
112
113 for line_magic in load_entry_points("line_magic"):
114 register_line_magic(needs_local_scope(line_magic))
115 logger.info("Registered line magic '%s'", line_magic.__name__) # type: ignore
116
117
118 def _resolve_project_path(
119 path: str | None = None, local_namespace: dict[str, Any] | None = None
120 ) -> Path:
121 """
122 Resolve the project path to use with reload_kedro, updating or adding it
123 (in-place) to the local ipython Namespace (``local_namespace``) if necessary.
124
125 Arguments:
126 path: the path to use as a string object
127 local_namespace: Namespace with local variables of the scope where the line
128 magic is invoked in a dict.
129 """
130 if path:
131 project_path = Path(path).expanduser().resolve()
132 else:
133 if local_namespace and "context" in local_namespace:
134 # noqa: protected-access
135 project_path = local_namespace["context"]._project_path
136 else:
137 project_path = _find_kedro_project(Path.cwd())
138 if project_path:
139 logger.info(
140 "Resolved project path as: %s.\nTo set a different path, run "
141 "'%%reload_kedro <project_root>'",
142 project_path,
143 )
144
145 # noqa: protected-access
146 if (
147 project_path
148 and local_namespace
149 and "context" in local_namespace
150 and project_path != local_namespace["context"]._project_path
151 ):
152 logger.info("Updating path to Kedro project: %s...", project_path)
153
154 return project_path
155
156
157 def _remove_cached_modules(package_name): # pragma: no cover
158 to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]
159 # `del` is used instead of `reload()` because: If the new version of a module does not
160 # define a name that was defined by the old version, the old definition remains.
161 for module in to_remove:
162 del sys.modules[module]
163
164
165 def _find_kedro_project(current_dir: Path): # pragma: no cover
166 while current_dir != current_dir.parent:
167 if _is_project(current_dir):
168 return current_dir
169 current_dir = current_dir.parent
170
171 return None
172
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kedro/ipython/__init__.py b/kedro/ipython/__init__.py
--- a/kedro/ipython/__init__.py
+++ b/kedro/ipython/__init__.py
@@ -14,7 +14,7 @@
from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring
from kedro.framework.cli import load_entry_points
-from kedro.framework.cli.project import PARAMS_ARG_HELP
+from kedro.framework.cli.project import CONF_SOURCE_HELP, PARAMS_ARG_HELP
from kedro.framework.cli.utils import ENV_HELP, _split_params
from kedro.framework.project import (
LOGGING, # noqa
@@ -65,14 +65,17 @@
default=None,
help=PARAMS_ARG_HELP,
)
-def magic_reload_kedro(line: str, local_ns: dict[str, Any] = None):
+@argument("--conf-source", type=str, default=None, help=CONF_SOURCE_HELP)
+def magic_reload_kedro(
+ line: str, local_ns: dict[str, Any] = None, conf_source: str = None
+):
"""
The `%reload_kedro` IPython line magic.
See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic # noqa: line-too-long
for more.
"""
args = parse_argstring(magic_reload_kedro, line)
- reload_kedro(args.path, args.env, args.params, local_ns)
+ reload_kedro(args.path, args.env, args.params, local_ns, args.conf_source)
def reload_kedro(
@@ -80,6 +83,7 @@
env: str = None,
extra_params: dict[str, Any] = None,
local_namespace: dict[str, Any] | None = None,
+ conf_source: str = None,
) -> None: # pragma: no cover
"""Function that underlies the %reload_kedro Line magic. This should not be imported
or run directly but instead invoked through %reload_kedro."""
@@ -91,7 +95,11 @@
configure_project(metadata.package_name)
session = KedroSession.create(
- metadata.package_name, project_path, env=env, extra_params=extra_params
+ metadata.package_name,
+ project_path,
+ env=env,
+ extra_params=extra_params,
+ conf_source=conf_source,
)
context = session.load_context()
catalog = context.catalog
|
{"golden_diff": "diff --git a/kedro/ipython/__init__.py b/kedro/ipython/__init__.py\n--- a/kedro/ipython/__init__.py\n+++ b/kedro/ipython/__init__.py\n@@ -14,7 +14,7 @@\n from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n \n from kedro.framework.cli import load_entry_points\n-from kedro.framework.cli.project import PARAMS_ARG_HELP\n+from kedro.framework.cli.project import CONF_SOURCE_HELP, PARAMS_ARG_HELP\n from kedro.framework.cli.utils import ENV_HELP, _split_params\n from kedro.framework.project import (\n LOGGING, # noqa\n@@ -65,14 +65,17 @@\n default=None,\n help=PARAMS_ARG_HELP,\n )\n-def magic_reload_kedro(line: str, local_ns: dict[str, Any] = None):\n+@argument(\"--conf-source\", type=str, default=None, help=CONF_SOURCE_HELP)\n+def magic_reload_kedro(\n+ line: str, local_ns: dict[str, Any] = None, conf_source: str = None\n+):\n \"\"\"\n The `%reload_kedro` IPython line magic.\n See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic # noqa: line-too-long\n for more.\n \"\"\"\n args = parse_argstring(magic_reload_kedro, line)\n- reload_kedro(args.path, args.env, args.params, local_ns)\n+ reload_kedro(args.path, args.env, args.params, local_ns, args.conf_source)\n \n \n def reload_kedro(\n@@ -80,6 +83,7 @@\n env: str = None,\n extra_params: dict[str, Any] = None,\n local_namespace: dict[str, Any] | None = None,\n+ conf_source: str = None,\n ) -> None: # pragma: no cover\n \"\"\"Function that underlies the %reload_kedro Line magic. This should not be imported\n or run directly but instead invoked through %reload_kedro.\"\"\"\n@@ -91,7 +95,11 @@\n configure_project(metadata.package_name)\n \n session = KedroSession.create(\n- metadata.package_name, project_path, env=env, extra_params=extra_params\n+ metadata.package_name,\n+ project_path,\n+ env=env,\n+ extra_params=extra_params,\n+ conf_source=conf_source,\n )\n context = session.load_context()\n catalog = context.catalog\n", "issue": "Add `--conf-source` option to `kedro ipython`\n## Description\r\nRight now we can pass `env` and `params` as options with `kedro ipython` / `%reload_kedro`. Add `--conf-source` option as well.\r\n\r\n## Context\r\nUser question from slack : \r\n\r\n> Speaking of kedro ipython :\r\nIs there a way to specify /override the conf_source ? :slightly_smiling_face:\r\n\r\n> My problem comes from the fact that we have a single pipeline / repo for all our clients.\r\nTo better enforce \u201ctenant isolation\u201d\r\nI have organized conf and data as follow:\r\nconf\r\n\u2514\u2500\u2500 client_A\r\n \u251c\u2500\u2500 base\r\n \u2514\u2500\u2500 local\r\n\u2514\u2500\u2500 client_B\r\n \u251c\u2500\u2500 base\r\n \u2514\u2500\u2500 local\r\ndata\r\n\u251c\u2500\u2500 client_A\r\n\u2502 \u2514\u2500\u2500 01_raw\r\n\u2502 \u2514\u2500\u2500 02_intermediate\r\n\u2502 \u2514\u2500\u2500 ...\r\n\u2514\u2500\u2500 client_B\r\n\u2502 \u2514\u2500\u2500 01_raw\r\n\u2502 \u2514\u2500\u2500 02_intermediate\r\n\u2502 \u2514\u2500\u2500 ...conf\r\nI did so because I liked the idea of not being able to run the pipeline without being explicit about which client it should be done for\u2026\r\n(the structure above makes it so that kedro run without --conf-source will raise an error)\r\nAnother reason I did so (if I recall correctly) was to avoid having \u201cduplicate keys\u201d conflicts in the different yaml files across clients\u2026 \r\n\r\n## Possible Implementation\r\nChanges to be made in `kedro/ipython/__init__.py`\r\nPR for when `--conf-source` was added to `kedro run` - https://github.com/kedro-org/kedro/pull/2117\r\n\n", "before_files": [{"content": "\"\"\"\nThis script creates an IPython extension to load Kedro-related variables in\nlocal scope.\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any\n\nfrom IPython import get_ipython\nfrom IPython.core.magic import needs_local_scope, register_line_magic\nfrom IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n\nfrom kedro.framework.cli import load_entry_points\nfrom kedro.framework.cli.project import PARAMS_ARG_HELP\nfrom kedro.framework.cli.utils import ENV_HELP, _split_params\nfrom kedro.framework.project import (\n LOGGING, # noqa\n configure_project,\n pipelines,\n)\nfrom kedro.framework.session import KedroSession\nfrom kedro.framework.startup import _is_project, bootstrap_project\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_ipython_extension(ipython):\n \"\"\"\n Main entry point when %load_ext kedro.ipython is executed, either manually or\n automatically through `kedro ipython` or `kedro jupyter lab/notebook`.\n IPython will look for this function specifically.\n See https://ipython.readthedocs.io/en/stable/config/extensions/index.html\n \"\"\"\n ipython.register_magic_function(magic_reload_kedro, magic_name=\"reload_kedro\")\n\n if _find_kedro_project(Path.cwd()) is None:\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n return\n\n reload_kedro()\n\n\n@needs_local_scope\n@magic_arguments()\n@argument(\n \"path\",\n type=str,\n help=(\n \"Path to the project root directory. If not given, use the previously set\"\n \"project root.\"\n ),\n nargs=\"?\",\n default=None,\n)\n@argument(\"-e\", \"--env\", type=str, default=None, help=ENV_HELP)\n@argument(\n \"--params\",\n type=lambda value: _split_params(None, None, value),\n default=None,\n help=PARAMS_ARG_HELP,\n)\ndef magic_reload_kedro(line: str, local_ns: dict[str, Any] = None):\n \"\"\"\n The `%reload_kedro` IPython line magic.\n See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic # noqa: line-too-long\n for more.\n \"\"\"\n args = parse_argstring(magic_reload_kedro, line)\n reload_kedro(args.path, args.env, args.params, local_ns)\n\n\ndef reload_kedro(\n path: str = None,\n env: str = None,\n extra_params: dict[str, Any] = None,\n local_namespace: dict[str, Any] | None = None,\n) -> None: # pragma: no cover\n \"\"\"Function that underlies the %reload_kedro Line magic. This should not be imported\n or run directly but instead invoked through %reload_kedro.\"\"\"\n\n project_path = _resolve_project_path(path, local_namespace)\n\n metadata = bootstrap_project(project_path)\n _remove_cached_modules(metadata.package_name)\n configure_project(metadata.package_name)\n\n session = KedroSession.create(\n metadata.package_name, project_path, env=env, extra_params=extra_params\n )\n context = session.load_context()\n catalog = context.catalog\n\n get_ipython().push(\n variables={\n \"context\": context,\n \"catalog\": catalog,\n \"session\": session,\n \"pipelines\": pipelines,\n }\n )\n\n logger.info(\"Kedro project %s\", str(metadata.project_name))\n logger.info(\n \"Defined global variable 'context', 'session', 'catalog' and 'pipelines'\"\n )\n\n for line_magic in load_entry_points(\"line_magic\"):\n register_line_magic(needs_local_scope(line_magic))\n logger.info(\"Registered line magic '%s'\", line_magic.__name__) # type: ignore\n\n\ndef _resolve_project_path(\n path: str | None = None, local_namespace: dict[str, Any] | None = None\n) -> Path:\n \"\"\"\n Resolve the project path to use with reload_kedro, updating or adding it\n (in-place) to the local ipython Namespace (``local_namespace``) if necessary.\n\n Arguments:\n path: the path to use as a string object\n local_namespace: Namespace with local variables of the scope where the line\n magic is invoked in a dict.\n \"\"\"\n if path:\n project_path = Path(path).expanduser().resolve()\n else:\n if local_namespace and \"context\" in local_namespace:\n # noqa: protected-access\n project_path = local_namespace[\"context\"]._project_path\n else:\n project_path = _find_kedro_project(Path.cwd())\n if project_path:\n logger.info(\n \"Resolved project path as: %s.\\nTo set a different path, run \"\n \"'%%reload_kedro <project_root>'\",\n project_path,\n )\n\n # noqa: protected-access\n if (\n project_path\n and local_namespace\n and \"context\" in local_namespace\n and project_path != local_namespace[\"context\"]._project_path\n ):\n logger.info(\"Updating path to Kedro project: %s...\", project_path)\n\n return project_path\n\n\ndef _remove_cached_modules(package_name): # pragma: no cover\n to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]\n # `del` is used instead of `reload()` because: If the new version of a module does not\n # define a name that was defined by the old version, the old definition remains.\n for module in to_remove:\n del sys.modules[module]\n\n\ndef _find_kedro_project(current_dir: Path): # pragma: no cover\n while current_dir != current_dir.parent:\n if _is_project(current_dir):\n return current_dir\n current_dir = current_dir.parent\n\n return None\n", "path": "kedro/ipython/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis script creates an IPython extension to load Kedro-related variables in\nlocal scope.\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any\n\nfrom IPython import get_ipython\nfrom IPython.core.magic import needs_local_scope, register_line_magic\nfrom IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n\nfrom kedro.framework.cli import load_entry_points\nfrom kedro.framework.cli.project import CONF_SOURCE_HELP, PARAMS_ARG_HELP\nfrom kedro.framework.cli.utils import ENV_HELP, _split_params\nfrom kedro.framework.project import (\n LOGGING, # noqa\n configure_project,\n pipelines,\n)\nfrom kedro.framework.session import KedroSession\nfrom kedro.framework.startup import _is_project, bootstrap_project\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_ipython_extension(ipython):\n \"\"\"\n Main entry point when %load_ext kedro.ipython is executed, either manually or\n automatically through `kedro ipython` or `kedro jupyter lab/notebook`.\n IPython will look for this function specifically.\n See https://ipython.readthedocs.io/en/stable/config/extensions/index.html\n \"\"\"\n ipython.register_magic_function(magic_reload_kedro, magic_name=\"reload_kedro\")\n\n if _find_kedro_project(Path.cwd()) is None:\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n return\n\n reload_kedro()\n\n\n@needs_local_scope\n@magic_arguments()\n@argument(\n \"path\",\n type=str,\n help=(\n \"Path to the project root directory. If not given, use the previously set\"\n \"project root.\"\n ),\n nargs=\"?\",\n default=None,\n)\n@argument(\"-e\", \"--env\", type=str, default=None, help=ENV_HELP)\n@argument(\n \"--params\",\n type=lambda value: _split_params(None, None, value),\n default=None,\n help=PARAMS_ARG_HELP,\n)\n@argument(\"--conf-source\", type=str, default=None, help=CONF_SOURCE_HELP)\ndef magic_reload_kedro(\n line: str, local_ns: dict[str, Any] = None, conf_source: str = None\n):\n \"\"\"\n The `%reload_kedro` IPython line magic.\n See https://kedro.readthedocs.io/en/stable/notebooks_and_ipython/kedro_and_notebooks.html#reload-kedro-line-magic # noqa: line-too-long\n for more.\n \"\"\"\n args = parse_argstring(magic_reload_kedro, line)\n reload_kedro(args.path, args.env, args.params, local_ns, args.conf_source)\n\n\ndef reload_kedro(\n path: str = None,\n env: str = None,\n extra_params: dict[str, Any] = None,\n local_namespace: dict[str, Any] | None = None,\n conf_source: str = None,\n) -> None: # pragma: no cover\n \"\"\"Function that underlies the %reload_kedro Line magic. This should not be imported\n or run directly but instead invoked through %reload_kedro.\"\"\"\n\n project_path = _resolve_project_path(path, local_namespace)\n\n metadata = bootstrap_project(project_path)\n _remove_cached_modules(metadata.package_name)\n configure_project(metadata.package_name)\n\n session = KedroSession.create(\n metadata.package_name,\n project_path,\n env=env,\n extra_params=extra_params,\n conf_source=conf_source,\n )\n context = session.load_context()\n catalog = context.catalog\n\n get_ipython().push(\n variables={\n \"context\": context,\n \"catalog\": catalog,\n \"session\": session,\n \"pipelines\": pipelines,\n }\n )\n\n logger.info(\"Kedro project %s\", str(metadata.project_name))\n logger.info(\n \"Defined global variable 'context', 'session', 'catalog' and 'pipelines'\"\n )\n\n for line_magic in load_entry_points(\"line_magic\"):\n register_line_magic(needs_local_scope(line_magic))\n logger.info(\"Registered line magic '%s'\", line_magic.__name__) # type: ignore\n\n\ndef _resolve_project_path(\n path: str | None = None, local_namespace: dict[str, Any] | None = None\n) -> Path:\n \"\"\"\n Resolve the project path to use with reload_kedro, updating or adding it\n (in-place) to the local ipython Namespace (``local_namespace``) if necessary.\n\n Arguments:\n path: the path to use as a string object\n local_namespace: Namespace with local variables of the scope where the line\n magic is invoked in a dict.\n \"\"\"\n if path:\n project_path = Path(path).expanduser().resolve()\n else:\n if local_namespace and \"context\" in local_namespace:\n # noqa: protected-access\n project_path = local_namespace[\"context\"]._project_path\n else:\n project_path = _find_kedro_project(Path.cwd())\n if project_path:\n logger.info(\n \"Resolved project path as: %s.\\nTo set a different path, run \"\n \"'%%reload_kedro <project_root>'\",\n project_path,\n )\n\n # noqa: protected-access\n if (\n project_path\n and local_namespace\n and \"context\" in local_namespace\n and project_path != local_namespace[\"context\"]._project_path\n ):\n logger.info(\"Updating path to Kedro project: %s...\", project_path)\n\n return project_path\n\n\ndef _remove_cached_modules(package_name): # pragma: no cover\n to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]\n # `del` is used instead of `reload()` because: If the new version of a module does not\n # define a name that was defined by the old version, the old definition remains.\n for module in to_remove:\n del sys.modules[module]\n\n\ndef _find_kedro_project(current_dir: Path): # pragma: no cover\n while current_dir != current_dir.parent:\n if _is_project(current_dir):\n return current_dir\n current_dir = current_dir.parent\n\n return None\n", "path": "kedro/ipython/__init__.py"}]}
| 2,355 | 570 |
gh_patches_debug_35487
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3129
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider jiffylube is broken
During the global build at 2021-09-01-14-42-16, spider **jiffylube** failed with **0 features** and **49 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/jiffylube.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/jiffylube.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/jiffylube.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/jiffylube.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3
4 import scrapy
5
6 from locations.items import GeojsonPointItem
7 from locations.hours import OpeningHours
8
9
10 STATES = [
11 'AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FM', 'FL',
12 'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MH',
13 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM',
14 'NY', 'NC', 'ND', 'MP', 'OH', 'OK', 'OR', 'PW', 'PA', 'PR', 'RI', 'SC',
15 'SD', 'TN', 'TX', 'UT', 'VT', 'VI', 'VA', 'WA', 'WV', 'WI', 'WY'
16 ]
17
18 DAY_MAPPING = {
19 'Monday': 'Mo',
20 'Tuesday': 'Tu',
21 'Wednesday': 'We',
22 'Thursday': 'Th',
23 'Friday': 'Fr',
24 'Saturday': 'Sa',
25 'Sunday': 'Su'
26 }
27
28 class JiffyLubeSpider(scrapy.Spider):
29 name = "jiffylube"
30 item_attributes = {'brand': "Jiffy Lube"}
31 allowed_domains = ["www.jiffylube.com"]
32
33 def start_requests(self):
34 template = 'https://www.jiffylube.com/api/locations?state={state}'
35
36 headers = {
37 'Accept': 'application/json',
38 }
39
40 for state in STATES:
41 yield scrapy.http.FormRequest(
42 url=template.format(state=state),
43 method='GET',
44 headers=headers,
45 callback=self.parse
46 )
47 def parse(self, response):
48 jsonresponse = json.loads(response.body_as_unicode())
49
50 for stores in jsonresponse:
51 store = json.dumps(stores)
52 store_data = json.loads(store)
53
54 properties = {
55 'name': store_data["nickname"],
56 'ref': store_data["id"],
57 'addr_full': store_data["address"],
58 'city': store_data["city"],
59 'state': store_data["state"],
60 'postcode': store_data["postal_code"].strip(),
61 'country': store_data["country"],
62 'phone': store_data["phone_main"],
63 'lat': float(store_data["coordinates"]["latitude"]),
64 'lon': float(store_data["coordinates"]["longitude"]),
65 'website': "https://www.jiffylube.com{}".format(store_data["_links"]["_self"])
66 }
67
68 hours = store_data["hours_schema"]
69
70 if hours:
71 properties['opening_hours'] = self.process_hours(hours)
72
73 yield GeojsonPointItem(**properties)
74
75 def process_hours(self, hours):
76 opening_hours = OpeningHours()
77
78 for hour in hours:
79 day = hour["name"]
80 open_time = hour["time_open"]
81 close_time = hour["time_close"]
82
83 opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time,
84 time_format='%H:%M')
85 return opening_hours.as_opening_hours()
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/jiffylube.py b/locations/spiders/jiffylube.py
--- a/locations/spiders/jiffylube.py
+++ b/locations/spiders/jiffylube.py
@@ -29,30 +29,27 @@
name = "jiffylube"
item_attributes = {'brand': "Jiffy Lube"}
allowed_domains = ["www.jiffylube.com"]
+ start_urls = (
+ 'https://www.jiffylube.com/api/locations',
+ )
- def start_requests(self):
- template = 'https://www.jiffylube.com/api/locations?state={state}'
- headers = {
- 'Accept': 'application/json',
- }
-
- for state in STATES:
- yield scrapy.http.FormRequest(
- url=template.format(state=state),
- method='GET',
- headers=headers,
- callback=self.parse
- )
def parse(self, response):
- jsonresponse = json.loads(response.body_as_unicode())
+ stores = json.loads(response.text)
+
+
+ for store in stores:
+ store_url = "https://www.jiffylube.com/api" + store["_links"]["_self"]
+ yield scrapy.Request(
+ store_url,
+ callback=self.parse_store
+ )
- for stores in jsonresponse:
- store = json.dumps(stores)
- store_data = json.loads(store)
+
+ def parse_store(self, response):
+ store_data = json.loads(response.text)
properties = {
- 'name': store_data["nickname"],
'ref': store_data["id"],
'addr_full': store_data["address"],
'city': store_data["city"],
@@ -64,22 +61,5 @@
'lon': float(store_data["coordinates"]["longitude"]),
'website': "https://www.jiffylube.com{}".format(store_data["_links"]["_self"])
}
-
- hours = store_data["hours_schema"]
-
- if hours:
- properties['opening_hours'] = self.process_hours(hours)
-
+
yield GeojsonPointItem(**properties)
-
- def process_hours(self, hours):
- opening_hours = OpeningHours()
-
- for hour in hours:
- day = hour["name"]
- open_time = hour["time_open"]
- close_time = hour["time_close"]
-
- opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time,
- time_format='%H:%M')
- return opening_hours.as_opening_hours()
\ No newline at end of file
|
{"golden_diff": "diff --git a/locations/spiders/jiffylube.py b/locations/spiders/jiffylube.py\n--- a/locations/spiders/jiffylube.py\n+++ b/locations/spiders/jiffylube.py\n@@ -29,30 +29,27 @@\n name = \"jiffylube\"\n item_attributes = {'brand': \"Jiffy Lube\"}\n allowed_domains = [\"www.jiffylube.com\"]\n+ start_urls = (\n+ 'https://www.jiffylube.com/api/locations',\n+ )\n \n- def start_requests(self):\n- template = 'https://www.jiffylube.com/api/locations?state={state}'\n \n- headers = {\n- 'Accept': 'application/json',\n- }\n-\n- for state in STATES:\n- yield scrapy.http.FormRequest(\n- url=template.format(state=state),\n- method='GET',\n- headers=headers,\n- callback=self.parse\n- )\n def parse(self, response):\n- jsonresponse = json.loads(response.body_as_unicode())\n+ stores = json.loads(response.text)\n+ \n+\n+ for store in stores:\n+ store_url = \"https://www.jiffylube.com/api\" + store[\"_links\"][\"_self\"]\n+ yield scrapy.Request(\n+ store_url,\n+ callback=self.parse_store\n+ )\n \n- for stores in jsonresponse:\n- store = json.dumps(stores)\n- store_data = json.loads(store)\n+\n+ def parse_store(self, response):\n+ store_data = json.loads(response.text)\n \n properties = {\n- 'name': store_data[\"nickname\"],\n 'ref': store_data[\"id\"],\n 'addr_full': store_data[\"address\"],\n 'city': store_data[\"city\"],\n@@ -64,22 +61,5 @@\n 'lon': float(store_data[\"coordinates\"][\"longitude\"]),\n 'website': \"https://www.jiffylube.com{}\".format(store_data[\"_links\"][\"_self\"])\n }\n-\n- hours = store_data[\"hours_schema\"]\n-\n- if hours:\n- properties['opening_hours'] = self.process_hours(hours)\n-\n+ \n yield GeojsonPointItem(**properties)\n-\n- def process_hours(self, hours):\n- opening_hours = OpeningHours()\n-\n- for hour in hours:\n- day = hour[\"name\"]\n- open_time = hour[\"time_open\"]\n- close_time = hour[\"time_close\"]\n-\n- opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time,\n- time_format='%H:%M')\n- return opening_hours.as_opening_hours()\n\\ No newline at end of file\n", "issue": "Spider jiffylube is broken\nDuring the global build at 2021-09-01-14-42-16, spider **jiffylube** failed with **0 features** and **49 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/logs/jiffylube.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/jiffylube.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-01-14-42-16/output/jiffylube.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nSTATES = [\n 'AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FM', 'FL',\n 'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MH',\n 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM',\n 'NY', 'NC', 'ND', 'MP', 'OH', 'OK', 'OR', 'PW', 'PA', 'PR', 'RI', 'SC',\n 'SD', 'TN', 'TX', 'UT', 'VT', 'VI', 'VA', 'WA', 'WV', 'WI', 'WY'\n]\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\nclass JiffyLubeSpider(scrapy.Spider):\n name = \"jiffylube\"\n item_attributes = {'brand': \"Jiffy Lube\"}\n allowed_domains = [\"www.jiffylube.com\"]\n\n def start_requests(self):\n template = 'https://www.jiffylube.com/api/locations?state={state}'\n\n headers = {\n 'Accept': 'application/json',\n }\n\n for state in STATES:\n yield scrapy.http.FormRequest(\n url=template.format(state=state),\n method='GET',\n headers=headers,\n callback=self.parse\n )\n def parse(self, response):\n jsonresponse = json.loads(response.body_as_unicode())\n\n for stores in jsonresponse:\n store = json.dumps(stores)\n store_data = json.loads(store)\n\n properties = {\n 'name': store_data[\"nickname\"],\n 'ref': store_data[\"id\"],\n 'addr_full': store_data[\"address\"],\n 'city': store_data[\"city\"],\n 'state': store_data[\"state\"],\n 'postcode': store_data[\"postal_code\"].strip(),\n 'country': store_data[\"country\"],\n 'phone': store_data[\"phone_main\"],\n 'lat': float(store_data[\"coordinates\"][\"latitude\"]),\n 'lon': float(store_data[\"coordinates\"][\"longitude\"]),\n 'website': \"https://www.jiffylube.com{}\".format(store_data[\"_links\"][\"_self\"])\n }\n\n hours = store_data[\"hours_schema\"]\n\n if hours:\n properties['opening_hours'] = self.process_hours(hours)\n\n yield GeojsonPointItem(**properties)\n\n def process_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n day = hour[\"name\"]\n open_time = hour[\"time_open\"]\n close_time = hour[\"time_close\"]\n\n opening_hours.add_range(day=DAY_MAPPING[day], open_time=open_time, close_time=close_time,\n time_format='%H:%M')\n return opening_hours.as_opening_hours()", "path": "locations/spiders/jiffylube.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nSTATES = [\n 'AL', 'AK', 'AS', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE', 'DC', 'FM', 'FL',\n 'GA', 'GU', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS', 'KY', 'LA', 'ME', 'MH',\n 'MD', 'MA', 'MI', 'MN', 'MS', 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ', 'NM',\n 'NY', 'NC', 'ND', 'MP', 'OH', 'OK', 'OR', 'PW', 'PA', 'PR', 'RI', 'SC',\n 'SD', 'TN', 'TX', 'UT', 'VT', 'VI', 'VA', 'WA', 'WV', 'WI', 'WY'\n]\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\nclass JiffyLubeSpider(scrapy.Spider):\n name = \"jiffylube\"\n item_attributes = {'brand': \"Jiffy Lube\"}\n allowed_domains = [\"www.jiffylube.com\"]\n start_urls = (\n 'https://www.jiffylube.com/api/locations',\n )\n\n\n def parse(self, response):\n stores = json.loads(response.text)\n \n\n for store in stores:\n store_url = \"https://www.jiffylube.com/api\" + store[\"_links\"][\"_self\"]\n yield scrapy.Request(\n store_url,\n callback=self.parse_store\n )\n\n\n def parse_store(self, response):\n store_data = json.loads(response.text)\n\n properties = {\n 'ref': store_data[\"id\"],\n 'addr_full': store_data[\"address\"],\n 'city': store_data[\"city\"],\n 'state': store_data[\"state\"],\n 'postcode': store_data[\"postal_code\"].strip(),\n 'country': store_data[\"country\"],\n 'phone': store_data[\"phone_main\"],\n 'lat': float(store_data[\"coordinates\"][\"latitude\"]),\n 'lon': float(store_data[\"coordinates\"][\"longitude\"]),\n 'website': \"https://www.jiffylube.com{}\".format(store_data[\"_links\"][\"_self\"])\n }\n \n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/jiffylube.py"}]}
| 1,318 | 586 |
gh_patches_debug_11677
|
rasdani/github-patches
|
git_diff
|
pyqtgraph__pyqtgraph-1166
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pg.mkQApp generates warning
<!-- In the following, please describe your issue in detail! -->
<!-- If some of the sections do not apply, just remove them. -->
### Short description
`pg.mkQApp()` generates warning `QSettings::value: Empty key passed`
### Code to reproduce
```python
import pyqtgraph as pg
pg.mkQApp()
```
### Expected behavior
No output
### Real behavior
<!-- What happens? -->
Output to console:
```
QSettings::value: Empty key passed
QSettings::value: Empty key passed
```
### Tested environment(s)
* PyQtGraph version: Latest commit from `develop` ( 6f4048596b080f71eaec544555159cb7c40c075f )
* Qt Python binding: PyQt5 5.14.1 Qt 5.14.1
* Python version: 3.8.2
* NumPy version: 1.18.2
* Operating system: Linux 5.5.7-1-MANJARO x86_64 19.0.2 Kyria
* Installation method: Executed from source package
### Additional context
This is because `mkQApp` calls `QApplication([])`, which according to its documentation should never be called without arguments: See warning in https://doc.qt.io/qt-5/qapplication.html#QApplication
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/Qt.py`
Content:
```
1 """
2 This module exists to smooth out some of the differences between PySide and PyQt4:
3
4 * Automatically import either PyQt4 or PySide depending on availability
5 * Allow to import QtCore/QtGui pyqtgraph.Qt without specifying which Qt wrapper
6 you want to use.
7 * Declare QtCore.Signal, .Slot in PyQt4
8 * Declare loadUiType function for Pyside
9
10 """
11
12 import os, sys, re, time
13
14 from .python2_3 import asUnicode
15
16 PYSIDE = 'PySide'
17 PYSIDE2 = 'PySide2'
18 PYQT4 = 'PyQt4'
19 PYQT5 = 'PyQt5'
20
21 QT_LIB = os.getenv('PYQTGRAPH_QT_LIB')
22
23 ## Automatically determine which Qt package to use (unless specified by
24 ## environment variable).
25 ## This is done by first checking to see whether one of the libraries
26 ## is already imported. If not, then attempt to import PyQt4, then PySide.
27 if QT_LIB is None:
28 libOrder = [PYQT4, PYSIDE, PYQT5, PYSIDE2]
29
30 for lib in libOrder:
31 if lib in sys.modules:
32 QT_LIB = lib
33 break
34
35 if QT_LIB is None:
36 for lib in libOrder:
37 try:
38 __import__(lib)
39 QT_LIB = lib
40 break
41 except ImportError:
42 pass
43
44 if QT_LIB is None:
45 raise Exception("PyQtGraph requires one of PyQt4, PyQt5, PySide or PySide2; none of these packages could be imported.")
46
47
48 class FailedImport(object):
49 """Used to defer ImportErrors until we are sure the module is needed.
50 """
51 def __init__(self, err):
52 self.err = err
53
54 def __getattr__(self, attr):
55 raise self.err
56
57
58 def _isQObjectAlive(obj):
59 """An approximation of PyQt's isQObjectAlive().
60 """
61 try:
62 if hasattr(obj, 'parent'):
63 obj.parent()
64 elif hasattr(obj, 'parentItem'):
65 obj.parentItem()
66 else:
67 raise Exception("Cannot determine whether Qt object %s is still alive." % obj)
68 except RuntimeError:
69 return False
70 else:
71 return True
72
73
74 # Make a loadUiType function like PyQt has
75
76 # Credit:
77 # http://stackoverflow.com/questions/4442286/python-code-genration-with-pyside-uic/14195313#14195313
78
79 class _StringIO(object):
80 """Alternative to built-in StringIO needed to circumvent unicode/ascii issues"""
81 def __init__(self):
82 self.data = []
83
84 def write(self, data):
85 self.data.append(data)
86
87 def getvalue(self):
88 return ''.join(map(asUnicode, self.data)).encode('utf8')
89
90
91 def _loadUiType(uiFile):
92 """
93 PySide lacks a "loadUiType" command like PyQt4's, so we have to convert
94 the ui file to py code in-memory first and then execute it in a
95 special frame to retrieve the form_class.
96
97 from stackoverflow: http://stackoverflow.com/a/14195313/3781327
98
99 seems like this might also be a legitimate solution, but I'm not sure
100 how to make PyQt4 and pyside look the same...
101 http://stackoverflow.com/a/8717832
102 """
103
104 if QT_LIB == "PYSIDE":
105 import pysideuic
106 else:
107 import pyside2uic as pysideuic
108 import xml.etree.ElementTree as xml
109
110 parsed = xml.parse(uiFile)
111 widget_class = parsed.find('widget').get('class')
112 form_class = parsed.find('class').text
113
114 with open(uiFile, 'r') as f:
115 o = _StringIO()
116 frame = {}
117
118 pysideuic.compileUi(f, o, indent=0)
119 pyc = compile(o.getvalue(), '<string>', 'exec')
120 exec(pyc, frame)
121
122 #Fetch the base_class and form class based on their type in the xml from designer
123 form_class = frame['Ui_%s'%form_class]
124 base_class = eval('QtGui.%s'%widget_class)
125
126 return form_class, base_class
127
128
129 if QT_LIB == PYSIDE:
130 from PySide import QtGui, QtCore
131
132 try:
133 from PySide import QtOpenGL
134 except ImportError as err:
135 QtOpenGL = FailedImport(err)
136 try:
137 from PySide import QtSvg
138 except ImportError as err:
139 QtSvg = FailedImport(err)
140
141 try:
142 from PySide import QtTest
143 except ImportError as err:
144 QtTest = FailedImport(err)
145
146 try:
147 from PySide import shiboken
148 isQObjectAlive = shiboken.isValid
149 except ImportError:
150 # use approximate version
151 isQObjectAlive = _isQObjectAlive
152
153 import PySide
154 VERSION_INFO = 'PySide ' + PySide.__version__ + ' Qt ' + QtCore.__version__
155
156 elif QT_LIB == PYQT4:
157 from PyQt4 import QtGui, QtCore, uic
158 try:
159 from PyQt4 import QtSvg
160 except ImportError as err:
161 QtSvg = FailedImport(err)
162 try:
163 from PyQt4 import QtOpenGL
164 except ImportError as err:
165 QtOpenGL = FailedImport(err)
166 try:
167 from PyQt4 import QtTest
168 except ImportError as err:
169 QtTest = FailedImport(err)
170
171 VERSION_INFO = 'PyQt4 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR
172
173 elif QT_LIB == PYQT5:
174 # We're using PyQt5 which has a different structure so we're going to use a shim to
175 # recreate the Qt4 structure for Qt5
176 from PyQt5 import QtGui, QtCore, QtWidgets, uic
177
178 # PyQt5, starting in v5.5, calls qAbort when an exception is raised inside
179 # a slot. To maintain backward compatibility (and sanity for interactive
180 # users), we install a global exception hook to override this behavior.
181 ver = QtCore.PYQT_VERSION_STR.split('.')
182 if int(ver[1]) >= 5:
183 if sys.excepthook == sys.__excepthook__:
184 sys_excepthook = sys.excepthook
185 def pyqt5_qabort_override(*args, **kwds):
186 return sys_excepthook(*args, **kwds)
187 sys.excepthook = pyqt5_qabort_override
188
189 try:
190 from PyQt5 import QtSvg
191 except ImportError as err:
192 QtSvg = FailedImport(err)
193 try:
194 from PyQt5 import QtOpenGL
195 except ImportError as err:
196 QtOpenGL = FailedImport(err)
197 try:
198 from PyQt5 import QtTest
199 QtTest.QTest.qWaitForWindowShown = QtTest.QTest.qWaitForWindowExposed
200 except ImportError as err:
201 QtTest = FailedImport(err)
202
203 VERSION_INFO = 'PyQt5 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR
204
205 elif QT_LIB == PYSIDE2:
206 from PySide2 import QtGui, QtCore, QtWidgets
207
208 try:
209 from PySide2 import QtSvg
210 except ImportError as err:
211 QtSvg = FailedImport(err)
212 try:
213 from PySide2 import QtOpenGL
214 except ImportError as err:
215 QtOpenGL = FailedImport(err)
216 try:
217 from PySide2 import QtTest
218 QtTest.QTest.qWaitForWindowShown = QtTest.QTest.qWaitForWindowExposed
219 except ImportError as err:
220 QtTest = FailedImport(err)
221
222 try:
223 import shiboken2
224 isQObjectAlive = shiboken2.isValid
225 except ImportError:
226 # use approximate version
227 isQObjectAlive = _isQObjectAlive
228 import PySide2
229 VERSION_INFO = 'PySide2 ' + PySide2.__version__ + ' Qt ' + QtCore.__version__
230
231 else:
232 raise ValueError("Invalid Qt lib '%s'" % QT_LIB)
233
234
235 # common to PyQt5 and PySide2
236 if QT_LIB in [PYQT5, PYSIDE2]:
237 # We're using Qt5 which has a different structure so we're going to use a shim to
238 # recreate the Qt4 structure
239
240 __QGraphicsItem_scale = QtWidgets.QGraphicsItem.scale
241
242 def scale(self, *args):
243 if args:
244 sx, sy = args
245 tr = self.transform()
246 tr.scale(sx, sy)
247 self.setTransform(tr)
248 else:
249 return __QGraphicsItem_scale(self)
250
251 QtWidgets.QGraphicsItem.scale = scale
252
253 def rotate(self, angle):
254 tr = self.transform()
255 tr.rotate(angle)
256 self.setTransform(tr)
257 QtWidgets.QGraphicsItem.rotate = rotate
258
259 def translate(self, dx, dy):
260 tr = self.transform()
261 tr.translate(dx, dy)
262 self.setTransform(tr)
263 QtWidgets.QGraphicsItem.translate = translate
264
265 def setMargin(self, i):
266 self.setContentsMargins(i, i, i, i)
267 QtWidgets.QGridLayout.setMargin = setMargin
268
269 def setResizeMode(self, *args):
270 self.setSectionResizeMode(*args)
271 QtWidgets.QHeaderView.setResizeMode = setResizeMode
272
273
274 QtGui.QApplication = QtWidgets.QApplication
275 QtGui.QGraphicsScene = QtWidgets.QGraphicsScene
276 QtGui.QGraphicsObject = QtWidgets.QGraphicsObject
277 QtGui.QGraphicsWidget = QtWidgets.QGraphicsWidget
278
279 QtGui.QApplication.setGraphicsSystem = None
280
281 # Import all QtWidgets objects into QtGui
282 for o in dir(QtWidgets):
283 if o.startswith('Q'):
284 setattr(QtGui, o, getattr(QtWidgets,o) )
285
286
287 # Common to PySide and PySide2
288 if QT_LIB in [PYSIDE, PYSIDE2]:
289 QtVersion = QtCore.__version__
290 loadUiType = _loadUiType
291
292 # PySide does not implement qWait
293 if not isinstance(QtTest, FailedImport):
294 if not hasattr(QtTest.QTest, 'qWait'):
295 @staticmethod
296 def qWait(msec):
297 start = time.time()
298 QtGui.QApplication.processEvents()
299 while time.time() < start + msec * 0.001:
300 QtGui.QApplication.processEvents()
301 QtTest.QTest.qWait = qWait
302
303
304 # Common to PyQt4 and 5
305 if QT_LIB in [PYQT4, PYQT5]:
306 QtVersion = QtCore.QT_VERSION_STR
307
308 import sip
309 def isQObjectAlive(obj):
310 return not sip.isdeleted(obj)
311
312 loadUiType = uic.loadUiType
313
314 QtCore.Signal = QtCore.pyqtSignal
315
316
317 # USE_XXX variables are deprecated
318 USE_PYSIDE = QT_LIB == PYSIDE
319 USE_PYQT4 = QT_LIB == PYQT4
320 USE_PYQT5 = QT_LIB == PYQT5
321
322
323 ## Make sure we have Qt >= 4.7
324 versionReq = [4, 7]
325 m = re.match(r'(\d+)\.(\d+).*', QtVersion)
326 if m is not None and list(map(int, m.groups())) < versionReq:
327 print(list(map(int, m.groups())))
328 raise Exception('pyqtgraph requires Qt version >= %d.%d (your version is %s)' % (versionReq[0], versionReq[1], QtVersion))
329
330
331 QAPP = None
332 def mkQApp():
333 global QAPP
334 QAPP = QtGui.QApplication.instance()
335 if QAPP is None:
336 QAPP = QtGui.QApplication([])
337 return QAPP
338
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pyqtgraph/Qt.py b/pyqtgraph/Qt.py
--- a/pyqtgraph/Qt.py
+++ b/pyqtgraph/Qt.py
@@ -329,9 +329,18 @@
QAPP = None
-def mkQApp():
- global QAPP
+def mkQApp(name="pyqtgraph", qt_args=[]):
+ """
+ Creates new QApplication or returns current instance if existing.
+
+ ============== =================================================================================
+ **Arguments:**
+ name Application name, passed to Qt
+ qt_args Array of command line arguments passed to Qt
+ ============== =================================================================================
+ """
+ global QAPP
QAPP = QtGui.QApplication.instance()
if QAPP is None:
- QAPP = QtGui.QApplication([])
+ QAPP = QtGui.QApplication([name] + qt_args)
return QAPP
|
{"golden_diff": "diff --git a/pyqtgraph/Qt.py b/pyqtgraph/Qt.py\n--- a/pyqtgraph/Qt.py\n+++ b/pyqtgraph/Qt.py\n@@ -329,9 +329,18 @@\n \n \n QAPP = None\n-def mkQApp():\n- global QAPP \n+def mkQApp(name=\"pyqtgraph\", qt_args=[]):\n+ \"\"\"\n+ Creates new QApplication or returns current instance if existing.\n+ \n+ ============== =================================================================================\n+ **Arguments:**\n+ name Application name, passed to Qt\n+ qt_args Array of command line arguments passed to Qt\n+ ============== =================================================================================\n+ \"\"\"\n+ global QAPP\n QAPP = QtGui.QApplication.instance()\n if QAPP is None:\n- QAPP = QtGui.QApplication([])\n+ QAPP = QtGui.QApplication([name] + qt_args)\n return QAPP\n", "issue": "pg.mkQApp generates warning\n<!-- In the following, please describe your issue in detail! -->\r\n<!-- If some of the sections do not apply, just remove them. -->\r\n\r\n### Short description\r\n`pg.mkQApp()` generates warning `QSettings::value: Empty key passed`\r\n\r\n### Code to reproduce\r\n```python\r\nimport pyqtgraph as pg\r\npg.mkQApp()\r\n```\r\n\r\n### Expected behavior\r\nNo output\r\n\r\n### Real behavior\r\n<!-- What happens? -->\r\nOutput to console:\r\n```\r\nQSettings::value: Empty key passed\r\nQSettings::value: Empty key passed\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: Latest commit from `develop` ( 6f4048596b080f71eaec544555159cb7c40c075f )\r\n * Qt Python binding: PyQt5 5.14.1 Qt 5.14.1\r\n * Python version: 3.8.2\r\n * NumPy version: 1.18.2\r\n * Operating system: Linux 5.5.7-1-MANJARO x86_64 19.0.2 Kyria\r\n * Installation method: Executed from source package\r\n\r\n### Additional context\r\nThis is because `mkQApp` calls `QApplication([])`, which according to its documentation should never be called without arguments: See warning in https://doc.qt.io/qt-5/qapplication.html#QApplication\n", "before_files": [{"content": "\"\"\"\nThis module exists to smooth out some of the differences between PySide and PyQt4:\n\n* Automatically import either PyQt4 or PySide depending on availability\n* Allow to import QtCore/QtGui pyqtgraph.Qt without specifying which Qt wrapper\n you want to use.\n* Declare QtCore.Signal, .Slot in PyQt4\n* Declare loadUiType function for Pyside\n\n\"\"\"\n\nimport os, sys, re, time\n\nfrom .python2_3 import asUnicode\n\nPYSIDE = 'PySide'\nPYSIDE2 = 'PySide2'\nPYQT4 = 'PyQt4'\nPYQT5 = 'PyQt5'\n\nQT_LIB = os.getenv('PYQTGRAPH_QT_LIB')\n\n## Automatically determine which Qt package to use (unless specified by\n## environment variable).\n## This is done by first checking to see whether one of the libraries\n## is already imported. If not, then attempt to import PyQt4, then PySide.\nif QT_LIB is None:\n libOrder = [PYQT4, PYSIDE, PYQT5, PYSIDE2]\n\n for lib in libOrder:\n if lib in sys.modules:\n QT_LIB = lib\n break\n\nif QT_LIB is None:\n for lib in libOrder:\n try:\n __import__(lib)\n QT_LIB = lib\n break\n except ImportError:\n pass\n\nif QT_LIB is None:\n raise Exception(\"PyQtGraph requires one of PyQt4, PyQt5, PySide or PySide2; none of these packages could be imported.\")\n\n\nclass FailedImport(object):\n \"\"\"Used to defer ImportErrors until we are sure the module is needed.\n \"\"\"\n def __init__(self, err):\n self.err = err\n \n def __getattr__(self, attr):\n raise self.err\n\n\ndef _isQObjectAlive(obj):\n \"\"\"An approximation of PyQt's isQObjectAlive().\n \"\"\"\n try:\n if hasattr(obj, 'parent'):\n obj.parent()\n elif hasattr(obj, 'parentItem'):\n obj.parentItem()\n else:\n raise Exception(\"Cannot determine whether Qt object %s is still alive.\" % obj)\n except RuntimeError:\n return False\n else:\n return True\n\n\n# Make a loadUiType function like PyQt has\n\n# Credit:\n# http://stackoverflow.com/questions/4442286/python-code-genration-with-pyside-uic/14195313#14195313\n\nclass _StringIO(object):\n \"\"\"Alternative to built-in StringIO needed to circumvent unicode/ascii issues\"\"\"\n def __init__(self):\n self.data = []\n \n def write(self, data):\n self.data.append(data)\n \n def getvalue(self):\n return ''.join(map(asUnicode, self.data)).encode('utf8')\n\n \ndef _loadUiType(uiFile):\n \"\"\"\n PySide lacks a \"loadUiType\" command like PyQt4's, so we have to convert\n the ui file to py code in-memory first and then execute it in a\n special frame to retrieve the form_class.\n\n from stackoverflow: http://stackoverflow.com/a/14195313/3781327\n\n seems like this might also be a legitimate solution, but I'm not sure\n how to make PyQt4 and pyside look the same...\n http://stackoverflow.com/a/8717832\n \"\"\"\n\n if QT_LIB == \"PYSIDE\":\n import pysideuic\n else:\n import pyside2uic as pysideuic\n import xml.etree.ElementTree as xml\n\n parsed = xml.parse(uiFile)\n widget_class = parsed.find('widget').get('class')\n form_class = parsed.find('class').text\n \n with open(uiFile, 'r') as f:\n o = _StringIO()\n frame = {}\n\n pysideuic.compileUi(f, o, indent=0)\n pyc = compile(o.getvalue(), '<string>', 'exec')\n exec(pyc, frame)\n\n #Fetch the base_class and form class based on their type in the xml from designer\n form_class = frame['Ui_%s'%form_class]\n base_class = eval('QtGui.%s'%widget_class)\n\n return form_class, base_class\n\n\nif QT_LIB == PYSIDE:\n from PySide import QtGui, QtCore\n\n try:\n from PySide import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PySide import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n\n try:\n from PySide import QtTest\n except ImportError as err:\n QtTest = FailedImport(err)\n \n try:\n from PySide import shiboken\n isQObjectAlive = shiboken.isValid\n except ImportError:\n # use approximate version\n isQObjectAlive = _isQObjectAlive\n \n import PySide\n VERSION_INFO = 'PySide ' + PySide.__version__ + ' Qt ' + QtCore.__version__\n \nelif QT_LIB == PYQT4:\n from PyQt4 import QtGui, QtCore, uic\n try:\n from PyQt4 import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n try:\n from PyQt4 import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PyQt4 import QtTest\n except ImportError as err:\n QtTest = FailedImport(err)\n\n VERSION_INFO = 'PyQt4 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR\n\nelif QT_LIB == PYQT5:\n # We're using PyQt5 which has a different structure so we're going to use a shim to\n # recreate the Qt4 structure for Qt5\n from PyQt5 import QtGui, QtCore, QtWidgets, uic\n \n # PyQt5, starting in v5.5, calls qAbort when an exception is raised inside\n # a slot. To maintain backward compatibility (and sanity for interactive\n # users), we install a global exception hook to override this behavior.\n ver = QtCore.PYQT_VERSION_STR.split('.')\n if int(ver[1]) >= 5:\n if sys.excepthook == sys.__excepthook__:\n sys_excepthook = sys.excepthook\n def pyqt5_qabort_override(*args, **kwds):\n return sys_excepthook(*args, **kwds)\n sys.excepthook = pyqt5_qabort_override\n \n try:\n from PyQt5 import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n try:\n from PyQt5 import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PyQt5 import QtTest\n QtTest.QTest.qWaitForWindowShown = QtTest.QTest.qWaitForWindowExposed\n except ImportError as err:\n QtTest = FailedImport(err)\n\n VERSION_INFO = 'PyQt5 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR\n\nelif QT_LIB == PYSIDE2:\n from PySide2 import QtGui, QtCore, QtWidgets\n \n try:\n from PySide2 import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n try:\n from PySide2 import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PySide2 import QtTest\n QtTest.QTest.qWaitForWindowShown = QtTest.QTest.qWaitForWindowExposed\n except ImportError as err:\n QtTest = FailedImport(err)\n\n try:\n import shiboken2\n isQObjectAlive = shiboken2.isValid\n except ImportError:\n # use approximate version\n isQObjectAlive = _isQObjectAlive \n import PySide2\n VERSION_INFO = 'PySide2 ' + PySide2.__version__ + ' Qt ' + QtCore.__version__\n\nelse:\n raise ValueError(\"Invalid Qt lib '%s'\" % QT_LIB)\n\n\n# common to PyQt5 and PySide2\nif QT_LIB in [PYQT5, PYSIDE2]:\n # We're using Qt5 which has a different structure so we're going to use a shim to\n # recreate the Qt4 structure\n \n __QGraphicsItem_scale = QtWidgets.QGraphicsItem.scale\n\n def scale(self, *args):\n if args:\n sx, sy = args\n tr = self.transform()\n tr.scale(sx, sy)\n self.setTransform(tr)\n else:\n return __QGraphicsItem_scale(self)\n\n QtWidgets.QGraphicsItem.scale = scale\n\n def rotate(self, angle):\n tr = self.transform()\n tr.rotate(angle)\n self.setTransform(tr)\n QtWidgets.QGraphicsItem.rotate = rotate\n\n def translate(self, dx, dy):\n tr = self.transform()\n tr.translate(dx, dy)\n self.setTransform(tr)\n QtWidgets.QGraphicsItem.translate = translate\n\n def setMargin(self, i):\n self.setContentsMargins(i, i, i, i)\n QtWidgets.QGridLayout.setMargin = setMargin\n\n def setResizeMode(self, *args):\n self.setSectionResizeMode(*args)\n QtWidgets.QHeaderView.setResizeMode = setResizeMode\n\n \n QtGui.QApplication = QtWidgets.QApplication\n QtGui.QGraphicsScene = QtWidgets.QGraphicsScene\n QtGui.QGraphicsObject = QtWidgets.QGraphicsObject\n QtGui.QGraphicsWidget = QtWidgets.QGraphicsWidget\n\n QtGui.QApplication.setGraphicsSystem = None\n \n # Import all QtWidgets objects into QtGui\n for o in dir(QtWidgets):\n if o.startswith('Q'):\n setattr(QtGui, o, getattr(QtWidgets,o) )\n \n\n# Common to PySide and PySide2\nif QT_LIB in [PYSIDE, PYSIDE2]:\n QtVersion = QtCore.__version__\n loadUiType = _loadUiType\n \n # PySide does not implement qWait\n if not isinstance(QtTest, FailedImport):\n if not hasattr(QtTest.QTest, 'qWait'):\n @staticmethod\n def qWait(msec):\n start = time.time()\n QtGui.QApplication.processEvents()\n while time.time() < start + msec * 0.001:\n QtGui.QApplication.processEvents()\n QtTest.QTest.qWait = qWait\n\n\n# Common to PyQt4 and 5\nif QT_LIB in [PYQT4, PYQT5]:\n QtVersion = QtCore.QT_VERSION_STR\n \n import sip\n def isQObjectAlive(obj):\n return not sip.isdeleted(obj)\n \n loadUiType = uic.loadUiType\n\n QtCore.Signal = QtCore.pyqtSignal\n \n\n# USE_XXX variables are deprecated\nUSE_PYSIDE = QT_LIB == PYSIDE\nUSE_PYQT4 = QT_LIB == PYQT4\nUSE_PYQT5 = QT_LIB == PYQT5\n\n \n## Make sure we have Qt >= 4.7\nversionReq = [4, 7]\nm = re.match(r'(\\d+)\\.(\\d+).*', QtVersion)\nif m is not None and list(map(int, m.groups())) < versionReq:\n print(list(map(int, m.groups())))\n raise Exception('pyqtgraph requires Qt version >= %d.%d (your version is %s)' % (versionReq[0], versionReq[1], QtVersion))\n\n\nQAPP = None\ndef mkQApp():\n global QAPP \n QAPP = QtGui.QApplication.instance()\n if QAPP is None:\n QAPP = QtGui.QApplication([])\n return QAPP\n", "path": "pyqtgraph/Qt.py"}], "after_files": [{"content": "\"\"\"\nThis module exists to smooth out some of the differences between PySide and PyQt4:\n\n* Automatically import either PyQt4 or PySide depending on availability\n* Allow to import QtCore/QtGui pyqtgraph.Qt without specifying which Qt wrapper\n you want to use.\n* Declare QtCore.Signal, .Slot in PyQt4\n* Declare loadUiType function for Pyside\n\n\"\"\"\n\nimport os, sys, re, time\n\nfrom .python2_3 import asUnicode\n\nPYSIDE = 'PySide'\nPYSIDE2 = 'PySide2'\nPYQT4 = 'PyQt4'\nPYQT5 = 'PyQt5'\n\nQT_LIB = os.getenv('PYQTGRAPH_QT_LIB')\n\n## Automatically determine which Qt package to use (unless specified by\n## environment variable).\n## This is done by first checking to see whether one of the libraries\n## is already imported. If not, then attempt to import PyQt4, then PySide.\nif QT_LIB is None:\n libOrder = [PYQT4, PYSIDE, PYQT5, PYSIDE2]\n\n for lib in libOrder:\n if lib in sys.modules:\n QT_LIB = lib\n break\n\nif QT_LIB is None:\n for lib in libOrder:\n try:\n __import__(lib)\n QT_LIB = lib\n break\n except ImportError:\n pass\n\nif QT_LIB is None:\n raise Exception(\"PyQtGraph requires one of PyQt4, PyQt5, PySide or PySide2; none of these packages could be imported.\")\n\n\nclass FailedImport(object):\n \"\"\"Used to defer ImportErrors until we are sure the module is needed.\n \"\"\"\n def __init__(self, err):\n self.err = err\n \n def __getattr__(self, attr):\n raise self.err\n\n\ndef _isQObjectAlive(obj):\n \"\"\"An approximation of PyQt's isQObjectAlive().\n \"\"\"\n try:\n if hasattr(obj, 'parent'):\n obj.parent()\n elif hasattr(obj, 'parentItem'):\n obj.parentItem()\n else:\n raise Exception(\"Cannot determine whether Qt object %s is still alive.\" % obj)\n except RuntimeError:\n return False\n else:\n return True\n\n\n# Make a loadUiType function like PyQt has\n\n# Credit:\n# http://stackoverflow.com/questions/4442286/python-code-genration-with-pyside-uic/14195313#14195313\n\nclass _StringIO(object):\n \"\"\"Alternative to built-in StringIO needed to circumvent unicode/ascii issues\"\"\"\n def __init__(self):\n self.data = []\n \n def write(self, data):\n self.data.append(data)\n \n def getvalue(self):\n return ''.join(map(asUnicode, self.data)).encode('utf8')\n\n \ndef _loadUiType(uiFile):\n \"\"\"\n PySide lacks a \"loadUiType\" command like PyQt4's, so we have to convert\n the ui file to py code in-memory first and then execute it in a\n special frame to retrieve the form_class.\n\n from stackoverflow: http://stackoverflow.com/a/14195313/3781327\n\n seems like this might also be a legitimate solution, but I'm not sure\n how to make PyQt4 and pyside look the same...\n http://stackoverflow.com/a/8717832\n \"\"\"\n\n if QT_LIB == \"PYSIDE\":\n import pysideuic\n else:\n import pyside2uic as pysideuic\n import xml.etree.ElementTree as xml\n\n parsed = xml.parse(uiFile)\n widget_class = parsed.find('widget').get('class')\n form_class = parsed.find('class').text\n \n with open(uiFile, 'r') as f:\n o = _StringIO()\n frame = {}\n\n pysideuic.compileUi(f, o, indent=0)\n pyc = compile(o.getvalue(), '<string>', 'exec')\n exec(pyc, frame)\n\n #Fetch the base_class and form class based on their type in the xml from designer\n form_class = frame['Ui_%s'%form_class]\n base_class = eval('QtGui.%s'%widget_class)\n\n return form_class, base_class\n\n\nif QT_LIB == PYSIDE:\n from PySide import QtGui, QtCore\n\n try:\n from PySide import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PySide import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n\n try:\n from PySide import QtTest\n except ImportError as err:\n QtTest = FailedImport(err)\n \n try:\n from PySide import shiboken\n isQObjectAlive = shiboken.isValid\n except ImportError:\n # use approximate version\n isQObjectAlive = _isQObjectAlive\n \n import PySide\n VERSION_INFO = 'PySide ' + PySide.__version__ + ' Qt ' + QtCore.__version__\n \nelif QT_LIB == PYQT4:\n from PyQt4 import QtGui, QtCore, uic\n try:\n from PyQt4 import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n try:\n from PyQt4 import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PyQt4 import QtTest\n except ImportError as err:\n QtTest = FailedImport(err)\n\n VERSION_INFO = 'PyQt4 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR\n\nelif QT_LIB == PYQT5:\n # We're using PyQt5 which has a different structure so we're going to use a shim to\n # recreate the Qt4 structure for Qt5\n from PyQt5 import QtGui, QtCore, QtWidgets, uic\n \n # PyQt5, starting in v5.5, calls qAbort when an exception is raised inside\n # a slot. To maintain backward compatibility (and sanity for interactive\n # users), we install a global exception hook to override this behavior.\n ver = QtCore.PYQT_VERSION_STR.split('.')\n if int(ver[1]) >= 5:\n if sys.excepthook == sys.__excepthook__:\n sys_excepthook = sys.excepthook\n def pyqt5_qabort_override(*args, **kwds):\n return sys_excepthook(*args, **kwds)\n sys.excepthook = pyqt5_qabort_override\n \n try:\n from PyQt5 import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n try:\n from PyQt5 import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PyQt5 import QtTest\n QtTest.QTest.qWaitForWindowShown = QtTest.QTest.qWaitForWindowExposed\n except ImportError as err:\n QtTest = FailedImport(err)\n\n VERSION_INFO = 'PyQt5 ' + QtCore.PYQT_VERSION_STR + ' Qt ' + QtCore.QT_VERSION_STR\n\nelif QT_LIB == PYSIDE2:\n from PySide2 import QtGui, QtCore, QtWidgets\n \n try:\n from PySide2 import QtSvg\n except ImportError as err:\n QtSvg = FailedImport(err)\n try:\n from PySide2 import QtOpenGL\n except ImportError as err:\n QtOpenGL = FailedImport(err)\n try:\n from PySide2 import QtTest\n QtTest.QTest.qWaitForWindowShown = QtTest.QTest.qWaitForWindowExposed\n except ImportError as err:\n QtTest = FailedImport(err)\n\n try:\n import shiboken2\n isQObjectAlive = shiboken2.isValid\n except ImportError:\n # use approximate version\n isQObjectAlive = _isQObjectAlive \n import PySide2\n VERSION_INFO = 'PySide2 ' + PySide2.__version__ + ' Qt ' + QtCore.__version__\n\nelse:\n raise ValueError(\"Invalid Qt lib '%s'\" % QT_LIB)\n\n\n# common to PyQt5 and PySide2\nif QT_LIB in [PYQT5, PYSIDE2]:\n # We're using Qt5 which has a different structure so we're going to use a shim to\n # recreate the Qt4 structure\n \n __QGraphicsItem_scale = QtWidgets.QGraphicsItem.scale\n\n def scale(self, *args):\n if args:\n sx, sy = args\n tr = self.transform()\n tr.scale(sx, sy)\n self.setTransform(tr)\n else:\n return __QGraphicsItem_scale(self)\n\n QtWidgets.QGraphicsItem.scale = scale\n\n def rotate(self, angle):\n tr = self.transform()\n tr.rotate(angle)\n self.setTransform(tr)\n QtWidgets.QGraphicsItem.rotate = rotate\n\n def translate(self, dx, dy):\n tr = self.transform()\n tr.translate(dx, dy)\n self.setTransform(tr)\n QtWidgets.QGraphicsItem.translate = translate\n\n def setMargin(self, i):\n self.setContentsMargins(i, i, i, i)\n QtWidgets.QGridLayout.setMargin = setMargin\n\n def setResizeMode(self, *args):\n self.setSectionResizeMode(*args)\n QtWidgets.QHeaderView.setResizeMode = setResizeMode\n\n \n QtGui.QApplication = QtWidgets.QApplication\n QtGui.QGraphicsScene = QtWidgets.QGraphicsScene\n QtGui.QGraphicsObject = QtWidgets.QGraphicsObject\n QtGui.QGraphicsWidget = QtWidgets.QGraphicsWidget\n\n QtGui.QApplication.setGraphicsSystem = None\n \n # Import all QtWidgets objects into QtGui\n for o in dir(QtWidgets):\n if o.startswith('Q'):\n setattr(QtGui, o, getattr(QtWidgets,o) )\n \n\n# Common to PySide and PySide2\nif QT_LIB in [PYSIDE, PYSIDE2]:\n QtVersion = QtCore.__version__\n loadUiType = _loadUiType\n \n # PySide does not implement qWait\n if not isinstance(QtTest, FailedImport):\n if not hasattr(QtTest.QTest, 'qWait'):\n @staticmethod\n def qWait(msec):\n start = time.time()\n QtGui.QApplication.processEvents()\n while time.time() < start + msec * 0.001:\n QtGui.QApplication.processEvents()\n QtTest.QTest.qWait = qWait\n\n\n# Common to PyQt4 and 5\nif QT_LIB in [PYQT4, PYQT5]:\n QtVersion = QtCore.QT_VERSION_STR\n \n import sip\n def isQObjectAlive(obj):\n return not sip.isdeleted(obj)\n \n loadUiType = uic.loadUiType\n\n QtCore.Signal = QtCore.pyqtSignal\n \n\n# USE_XXX variables are deprecated\nUSE_PYSIDE = QT_LIB == PYSIDE\nUSE_PYQT4 = QT_LIB == PYQT4\nUSE_PYQT5 = QT_LIB == PYQT5\n\n \n## Make sure we have Qt >= 4.7\nversionReq = [4, 7]\nm = re.match(r'(\\d+)\\.(\\d+).*', QtVersion)\nif m is not None and list(map(int, m.groups())) < versionReq:\n print(list(map(int, m.groups())))\n raise Exception('pyqtgraph requires Qt version >= %d.%d (your version is %s)' % (versionReq[0], versionReq[1], QtVersion))\n\n\nQAPP = None\ndef mkQApp(name=\"pyqtgraph\", qt_args=[]):\n \"\"\"\n Creates new QApplication or returns current instance if existing.\n \n ============== =================================================================================\n **Arguments:**\n name Application name, passed to Qt\n qt_args Array of command line arguments passed to Qt\n ============== =================================================================================\n \"\"\"\n global QAPP\n QAPP = QtGui.QApplication.instance()\n if QAPP is None:\n QAPP = QtGui.QApplication([name] + qt_args)\n return QAPP\n", "path": "pyqtgraph/Qt.py"}]}
| 4,024 | 199 |
gh_patches_debug_36148
|
rasdani/github-patches
|
git_diff
|
gratipay__gratipay.com-2917
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
use uuid instead of username for Bitbucket
**Update:** Bitbucket recently rolled out `uuid`s for their user API, so we can switch to that instead of dropping them. See [below](https://github.com/gratipay/gratipay.com/issues/1945#issuecomment-62242306).
---
In the long run we're considering dropping all authentication via OAuth and only allowing password authentication (#1052). In the short run we need to drop Bitbucket as an auth method, because Bitbucket gives us no immutable user id, so if someone deletes their Bitbucket account, an attacker could create a Bitbucket account with their username and take over their Gittip account. @bruceadams demonstrated this here: https://github.com/gittip/www.gittip.com/issues/1807#issuecomment-33314863.
<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1387030-drop-bitbucket-as-auth-method?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/elsewhere/__init__.py`
Content:
```
1 """This subpackage contains functionality for working with accounts elsewhere.
2 """
3 from __future__ import division, print_function, unicode_literals
4
5 from collections import OrderedDict
6 from datetime import datetime
7 import hashlib
8 import json
9 import logging
10 from urllib import quote
11 import xml.etree.ElementTree as ET
12
13 from aspen import log, Response
14 from aspen.utils import to_age, utc
15 from requests_oauthlib import OAuth1Session, OAuth2Session
16
17 from gratipay.elsewhere._extractors import not_available
18
19
20 ACTIONS = {'opt-in', 'connect', 'lock', 'unlock'}
21 PLATFORMS = 'facebook google bitbucket bountysource github openstreetmap twitter venmo'.split()
22
23
24 class UnknownAccountElsewhere(Exception): pass
25
26
27 class PlatformRegistry(object):
28 """Registry of platforms we support connecting to Gratipay accounts.
29 """
30 def __init__(self, platforms):
31 self.__dict__ = OrderedDict((p.name, p) for p in platforms)
32
33 def __contains__(self, platform):
34 return platform.name in self.__dict__
35
36 def __iter__(self):
37 return iter(self.__dict__.values())
38
39
40 class UserInfo(object):
41 """A simple container for a user's info.
42
43 Accessing a non-existing attribute returns `None`.
44 """
45
46 def __init__(self, **kw):
47 self.__dict__.update(kw)
48
49 def __getattr__(self, key):
50 return self.__dict__.get(key, None)
51
52 def __setattr__(self, key, value):
53 if value is None:
54 self.__dict__.pop(key, None)
55 else:
56 self.__dict__[key] = value
57
58
59 class Platform(object):
60
61 allows_team_connect = False
62
63 # "x" stands for "extract"
64 x_user_info = not_available
65 x_user_id = not_available
66 x_user_name = not_available
67 x_display_name = not_available
68 x_email = not_available
69 x_gravatar_id = not_available
70 x_avatar_url = not_available
71 x_is_team = not_available
72
73 required_attrs = ( 'account_url'
74 , 'display_name'
75 , 'name'
76 )
77
78 def __init__(self, api_key, api_secret, callback_url, api_url=None, auth_url=None):
79 self.api_key = api_key
80 self.api_secret = api_secret
81 self.callback_url = callback_url
82 if api_url:
83 self.api_url = api_url
84 if auth_url:
85 self.auth_url = auth_url
86 elif not getattr(self, 'auth_url', None):
87 self.auth_url = self.api_url
88
89 # Determine the appropriate response parser using `self.api_format`
90 api_format = getattr(self, 'api_format', None)
91 if api_format == 'json':
92 self.api_parser = lambda r: r.json()
93 elif api_format == 'xml':
94 self.api_parser = lambda r: ET.fromstring(r.content)
95 elif api_format:
96 raise ValueError('unknown API format: '+str(api_format))
97
98 # Make sure the subclass was implemented properly.
99 missing_attrs = [a for a in self.required_attrs if not hasattr(self, a)]
100 if missing_attrs:
101 msg = "The class %s is missing these required attributes: %s"
102 msg %= self.__class__.__name__, ', '.join(missing_attrs)
103 raise AttributeError(msg)
104
105 def api_get(self, path, sess=None, **kw):
106 """
107 Given a `path` (e.g. /users/foo), this function sends a GET request to
108 the platform's API (e.g. https://api.github.com/users/foo).
109
110 The response is returned, after checking its status code and ratelimit
111 headers.
112 """
113 if not sess:
114 sess = self.get_auth_session()
115 response = sess.get(self.api_url+path, **kw)
116
117 # Check status
118 status = response.status_code
119 if status == 404:
120 raise Response(404)
121 elif status != 200:
122 log('{} api responded with {}:\n{}'.format(self.name, status, response.text)
123 , level=logging.ERROR)
124 raise Response(500, '{} lookup failed with {}'.format(self.name, status))
125
126 # Check ratelimit headers
127 prefix = getattr(self, 'ratelimit_headers_prefix', None)
128 if prefix:
129 limit = response.headers[prefix+'limit']
130 remaining = response.headers[prefix+'remaining']
131 reset = response.headers[prefix+'reset']
132 try:
133 limit, remaining, reset = int(limit), int(remaining), int(reset)
134 except (TypeError, ValueError):
135 d = dict(limit=limit, remaining=remaining, reset=reset)
136 log('Got weird rate headers from %s: %s' % (self.name, d))
137 else:
138 percent_remaining = remaining/limit
139 if percent_remaining < 0.5:
140 reset = to_age(datetime.fromtimestamp(reset, tz=utc))
141 log_msg = (
142 '{0} API: {1:.1%} of ratelimit has been consumed, '
143 '{2} requests remaining, resets {3}.'
144 ).format(self.name, 1 - percent_remaining, remaining, reset)
145 log_lvl = logging.WARNING
146 if percent_remaining < 0.2:
147 log_lvl = logging.ERROR
148 elif percent_remaining < 0.05:
149 log_lvl = logging.CRITICAL
150 log(log_msg, log_lvl)
151
152 return response
153
154 def extract_user_info(self, info):
155 """
156 Given a user_info object of variable type (depending on the platform),
157 extract the relevant information by calling the platform's extractors
158 (`x_user_name`, `x_user_id`, etc).
159
160 Returns a `UserInfo`. The `user_id` attribute is guaranteed to have a
161 unique non-empty value.
162 """
163 r = UserInfo(platform=self.name)
164 info = self.x_user_info(r, info, info)
165 r.user_name = self.x_user_name(r, info, None)
166 if self.x_user_id.__func__ is not_available:
167 r.user_id = r.user_name
168 else:
169 r.user_id = self.x_user_id(r, info)
170 assert r.user_id is not None
171 r.user_id = unicode(r.user_id)
172 assert len(r.user_id) > 0
173 r.display_name = self.x_display_name(r, info, None)
174 r.email = self.x_email(r, info, None)
175 r.avatar_url = self.x_avatar_url(r, info, None)
176 if not r.avatar_url:
177 gravatar_id = self.x_gravatar_id(r, info, None)
178 if r.email and not gravatar_id:
179 gravatar_id = hashlib.md5(r.email.strip().lower()).hexdigest()
180 if gravatar_id:
181 r.avatar_url = 'https://secure.gravatar.com/avatar/'+gravatar_id
182 r.is_team = self.x_is_team(r, info, False)
183 r.extra_info = info
184 return r
185
186 def get_team_members(self, team_name, page_url=None):
187 """Given a team_name on the platform, return the team's membership list
188 from the API.
189 """
190 default_url = self.api_team_members_path.format(user_name=quote(team_name))
191 r = self.api_get(page_url or default_url)
192 members, count, pages_urls = self.api_paginator(r, self.api_parser(r))
193 members = [self.extract_user_info(m) for m in members]
194 return members, count, pages_urls
195
196 def get_user_info(self, user_name, sess=None):
197 """Given a user_name on the platform, get the user's info from the API.
198 """
199 try:
200 path = self.api_user_info_path.format(user_name=quote(user_name))
201 except KeyError:
202 raise Response(404)
203 info = self.api_parser(self.api_get(path, sess=sess))
204 return self.extract_user_info(info)
205
206 def get_user_self_info(self, sess):
207 """Get the authenticated user's info from the API.
208 """
209 r = self.api_get(self.api_user_self_info_path, sess=sess)
210 info = self.extract_user_info(self.api_parser(r))
211 token = getattr(sess, 'token', None)
212 if token:
213 info.token = json.dumps(token)
214 return info
215
216
217 class PlatformOAuth1(Platform):
218
219 request_token_path = '/oauth/request_token'
220 authorize_path = '/oauth/authorize'
221 access_token_path = '/oauth/access_token'
222
223 def get_auth_session(self, token=None):
224 args = ()
225 if token:
226 args = (token['token'], token['token_secret'])
227 return OAuth1Session(self.api_key, self.api_secret, *args,
228 callback_uri=self.callback_url)
229
230 def get_auth_url(self, **kw):
231 sess = self.get_auth_session()
232 r = sess.fetch_request_token(self.auth_url+self.request_token_path)
233 url = sess.authorization_url(self.auth_url+self.authorize_path)
234 return url, r['oauth_token'], r['oauth_token_secret']
235
236 def get_query_id(self, querystring):
237 return querystring['oauth_token']
238
239 def handle_auth_callback(self, url, token, token_secret):
240 sess = self.get_auth_session(dict(token=token, token_secret=token_secret))
241 sess.parse_authorization_response(url)
242 r = sess.fetch_access_token(self.auth_url+self.access_token_path)
243 sess.token = dict(token=r['oauth_token'],
244 token_secret=r['oauth_token_secret'])
245 return sess
246
247
248 class PlatformOAuth2(Platform):
249
250 oauth_default_scope = None
251 oauth_email_scope = None
252 oauth_payment_scope = None
253
254 def get_auth_session(self, state=None, token=None, token_updater=None):
255 return OAuth2Session(self.api_key, state=state, token=token,
256 token_updater=token_updater,
257 redirect_uri=self.callback_url,
258 scope=self.oauth_default_scope)
259
260 def get_auth_url(self, **kw):
261 sess = self.get_auth_session()
262 url, state = sess.authorization_url(self.auth_url)
263 return url, state, ''
264
265 def get_query_id(self, querystring):
266 return querystring['state']
267
268 def handle_auth_callback(self, url, state, unused_arg):
269 sess = self.get_auth_session(state=state)
270 sess.fetch_token(self.access_token_url,
271 client_secret=self.api_secret,
272 authorization_response=url)
273 return sess
274
```
Path: `gratipay/elsewhere/bitbucket.py`
Content:
```
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from gratipay.elsewhere import PlatformOAuth1
4 from gratipay.elsewhere._extractors import any_key, key, not_available
5 from gratipay.elsewhere._paginators import keys_paginator
6
7
8 class Bitbucket(PlatformOAuth1):
9
10 # Platform attributes
11 name = 'bitbucket'
12 display_name = 'Bitbucket'
13 account_url = 'https://bitbucket.org/{user_name}'
14
15 # Auth attributes
16 auth_url = 'https://bitbucket.org/api/1.0'
17 authorize_path = '/oauth/authenticate'
18
19 # API attributes
20 api_format = 'json'
21 api_paginator = keys_paginator(prev='previous')
22 api_url = 'https://bitbucket.org/api'
23 api_user_info_path = '/1.0/users/{user_name}'
24 api_user_self_info_path = '/1.0/user'
25 api_team_members_path = '/2.0/teams/{user_name}/members'
26
27 # User info extractors
28 x_user_info = key('user')
29 x_user_id = not_available # No immutable id. :-/
30 x_user_name = key('username')
31 x_display_name = key('display_name')
32 x_email = not_available
33 x_avatar_url = any_key('avatar', ('links', 'avatar', 'href'))
34 x_is_team = key('is_team')
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gratipay/elsewhere/__init__.py b/gratipay/elsewhere/__init__.py
--- a/gratipay/elsewhere/__init__.py
+++ b/gratipay/elsewhere/__init__.py
@@ -117,7 +117,7 @@
# Check status
status = response.status_code
if status == 404:
- raise Response(404)
+ raise Response(404, response.text)
elif status != 200:
log('{} api responded with {}:\n{}'.format(self.name, status, response.text)
, level=logging.ERROR)
diff --git a/gratipay/elsewhere/bitbucket.py b/gratipay/elsewhere/bitbucket.py
--- a/gratipay/elsewhere/bitbucket.py
+++ b/gratipay/elsewhere/bitbucket.py
@@ -1,5 +1,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals
+from aspen import Response
from gratipay.elsewhere import PlatformOAuth1
from gratipay.elsewhere._extractors import any_key, key, not_available
from gratipay.elsewhere._paginators import keys_paginator
@@ -20,15 +21,31 @@
api_format = 'json'
api_paginator = keys_paginator(prev='previous')
api_url = 'https://bitbucket.org/api'
- api_user_info_path = '/1.0/users/{user_name}'
- api_user_self_info_path = '/1.0/user'
+ api_user_info_path = '/2.0/users/{user_name}'
+ api_user_self_info_path = '/2.0/user'
api_team_members_path = '/2.0/teams/{user_name}/members'
# User info extractors
x_user_info = key('user')
- x_user_id = not_available # No immutable id. :-/
+ x_user_id = key('uuid')
x_user_name = key('username')
x_display_name = key('display_name')
x_email = not_available
x_avatar_url = any_key('avatar', ('links', 'avatar', 'href'))
- x_is_team = key('is_team')
+ x_is_team = key('type', lambda v: v == 'team')
+
+ def api_get(self, path, sess=None, **kw):
+ """Extend to manually retry /users/pypy as /teams/pypy.
+
+ Bitbucket gives us a 404 where a 30x would be more helpful.
+
+ """
+ try:
+ return PlatformOAuth1.api_get(self, path, sess, **kw)
+ except Response, response:
+ if response.code == 404 and ' is a team account' in response.body:
+ assert path.startswith('/2.0/users/')
+ path = '/2.0/teams/' + path[11:]
+ return PlatformOAuth1.api_get(self, path, sess, **kw)
+ else:
+ raise
|
{"golden_diff": "diff --git a/gratipay/elsewhere/__init__.py b/gratipay/elsewhere/__init__.py\n--- a/gratipay/elsewhere/__init__.py\n+++ b/gratipay/elsewhere/__init__.py\n@@ -117,7 +117,7 @@\n # Check status\n status = response.status_code\n if status == 404:\n- raise Response(404)\n+ raise Response(404, response.text)\n elif status != 200:\n log('{} api responded with {}:\\n{}'.format(self.name, status, response.text)\n , level=logging.ERROR)\ndiff --git a/gratipay/elsewhere/bitbucket.py b/gratipay/elsewhere/bitbucket.py\n--- a/gratipay/elsewhere/bitbucket.py\n+++ b/gratipay/elsewhere/bitbucket.py\n@@ -1,5 +1,6 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+from aspen import Response\n from gratipay.elsewhere import PlatformOAuth1\n from gratipay.elsewhere._extractors import any_key, key, not_available\n from gratipay.elsewhere._paginators import keys_paginator\n@@ -20,15 +21,31 @@\n api_format = 'json'\n api_paginator = keys_paginator(prev='previous')\n api_url = 'https://bitbucket.org/api'\n- api_user_info_path = '/1.0/users/{user_name}'\n- api_user_self_info_path = '/1.0/user'\n+ api_user_info_path = '/2.0/users/{user_name}'\n+ api_user_self_info_path = '/2.0/user'\n api_team_members_path = '/2.0/teams/{user_name}/members'\n \n # User info extractors\n x_user_info = key('user')\n- x_user_id = not_available # No immutable id. :-/\n+ x_user_id = key('uuid')\n x_user_name = key('username')\n x_display_name = key('display_name')\n x_email = not_available\n x_avatar_url = any_key('avatar', ('links', 'avatar', 'href'))\n- x_is_team = key('is_team')\n+ x_is_team = key('type', lambda v: v == 'team')\n+\n+ def api_get(self, path, sess=None, **kw):\n+ \"\"\"Extend to manually retry /users/pypy as /teams/pypy.\n+\n+ Bitbucket gives us a 404 where a 30x would be more helpful.\n+\n+ \"\"\"\n+ try:\n+ return PlatformOAuth1.api_get(self, path, sess, **kw)\n+ except Response, response:\n+ if response.code == 404 and ' is a team account' in response.body:\n+ assert path.startswith('/2.0/users/')\n+ path = '/2.0/teams/' + path[11:]\n+ return PlatformOAuth1.api_get(self, path, sess, **kw)\n+ else:\n+ raise\n", "issue": "use uuid instead of username for Bitbucket\n**Update:** Bitbucket recently rolled out `uuid`s for their user API, so we can switch to that instead of dropping them. See [below](https://github.com/gratipay/gratipay.com/issues/1945#issuecomment-62242306).\n\n---\n\nIn the long run we're considering dropping all authentication via OAuth and only allowing password authentication (#1052). In the short run we need to drop Bitbucket as an auth method, because Bitbucket gives us no immutable user id, so if someone deletes their Bitbucket account, an attacker could create a Bitbucket account with their username and take over their Gittip account. @bruceadams demonstrated this here: https://github.com/gittip/www.gittip.com/issues/1807#issuecomment-33314863.\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1387030-drop-bitbucket-as-auth-method?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "\"\"\"This subpackage contains functionality for working with accounts elsewhere.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport hashlib\nimport json\nimport logging\nfrom urllib import quote\nimport xml.etree.ElementTree as ET\n\nfrom aspen import log, Response\nfrom aspen.utils import to_age, utc\nfrom requests_oauthlib import OAuth1Session, OAuth2Session\n\nfrom gratipay.elsewhere._extractors import not_available\n\n\nACTIONS = {'opt-in', 'connect', 'lock', 'unlock'}\nPLATFORMS = 'facebook google bitbucket bountysource github openstreetmap twitter venmo'.split()\n\n\nclass UnknownAccountElsewhere(Exception): pass\n\n\nclass PlatformRegistry(object):\n \"\"\"Registry of platforms we support connecting to Gratipay accounts.\n \"\"\"\n def __init__(self, platforms):\n self.__dict__ = OrderedDict((p.name, p) for p in platforms)\n\n def __contains__(self, platform):\n return platform.name in self.__dict__\n\n def __iter__(self):\n return iter(self.__dict__.values())\n\n\nclass UserInfo(object):\n \"\"\"A simple container for a user's info.\n\n Accessing a non-existing attribute returns `None`.\n \"\"\"\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n def __getattr__(self, key):\n return self.__dict__.get(key, None)\n\n def __setattr__(self, key, value):\n if value is None:\n self.__dict__.pop(key, None)\n else:\n self.__dict__[key] = value\n\n\nclass Platform(object):\n\n allows_team_connect = False\n\n # \"x\" stands for \"extract\"\n x_user_info = not_available\n x_user_id = not_available\n x_user_name = not_available\n x_display_name = not_available\n x_email = not_available\n x_gravatar_id = not_available\n x_avatar_url = not_available\n x_is_team = not_available\n\n required_attrs = ( 'account_url'\n , 'display_name'\n , 'name'\n )\n\n def __init__(self, api_key, api_secret, callback_url, api_url=None, auth_url=None):\n self.api_key = api_key\n self.api_secret = api_secret\n self.callback_url = callback_url\n if api_url:\n self.api_url = api_url\n if auth_url:\n self.auth_url = auth_url\n elif not getattr(self, 'auth_url', None):\n self.auth_url = self.api_url\n\n # Determine the appropriate response parser using `self.api_format`\n api_format = getattr(self, 'api_format', None)\n if api_format == 'json':\n self.api_parser = lambda r: r.json()\n elif api_format == 'xml':\n self.api_parser = lambda r: ET.fromstring(r.content)\n elif api_format:\n raise ValueError('unknown API format: '+str(api_format))\n\n # Make sure the subclass was implemented properly.\n missing_attrs = [a for a in self.required_attrs if not hasattr(self, a)]\n if missing_attrs:\n msg = \"The class %s is missing these required attributes: %s\"\n msg %= self.__class__.__name__, ', '.join(missing_attrs)\n raise AttributeError(msg)\n\n def api_get(self, path, sess=None, **kw):\n \"\"\"\n Given a `path` (e.g. /users/foo), this function sends a GET request to\n the platform's API (e.g. https://api.github.com/users/foo).\n\n The response is returned, after checking its status code and ratelimit\n headers.\n \"\"\"\n if not sess:\n sess = self.get_auth_session()\n response = sess.get(self.api_url+path, **kw)\n\n # Check status\n status = response.status_code\n if status == 404:\n raise Response(404)\n elif status != 200:\n log('{} api responded with {}:\\n{}'.format(self.name, status, response.text)\n , level=logging.ERROR)\n raise Response(500, '{} lookup failed with {}'.format(self.name, status))\n\n # Check ratelimit headers\n prefix = getattr(self, 'ratelimit_headers_prefix', None)\n if prefix:\n limit = response.headers[prefix+'limit']\n remaining = response.headers[prefix+'remaining']\n reset = response.headers[prefix+'reset']\n try:\n limit, remaining, reset = int(limit), int(remaining), int(reset)\n except (TypeError, ValueError):\n d = dict(limit=limit, remaining=remaining, reset=reset)\n log('Got weird rate headers from %s: %s' % (self.name, d))\n else:\n percent_remaining = remaining/limit\n if percent_remaining < 0.5:\n reset = to_age(datetime.fromtimestamp(reset, tz=utc))\n log_msg = (\n '{0} API: {1:.1%} of ratelimit has been consumed, '\n '{2} requests remaining, resets {3}.'\n ).format(self.name, 1 - percent_remaining, remaining, reset)\n log_lvl = logging.WARNING\n if percent_remaining < 0.2:\n log_lvl = logging.ERROR\n elif percent_remaining < 0.05:\n log_lvl = logging.CRITICAL\n log(log_msg, log_lvl)\n\n return response\n\n def extract_user_info(self, info):\n \"\"\"\n Given a user_info object of variable type (depending on the platform),\n extract the relevant information by calling the platform's extractors\n (`x_user_name`, `x_user_id`, etc).\n\n Returns a `UserInfo`. The `user_id` attribute is guaranteed to have a\n unique non-empty value.\n \"\"\"\n r = UserInfo(platform=self.name)\n info = self.x_user_info(r, info, info)\n r.user_name = self.x_user_name(r, info, None)\n if self.x_user_id.__func__ is not_available:\n r.user_id = r.user_name\n else:\n r.user_id = self.x_user_id(r, info)\n assert r.user_id is not None\n r.user_id = unicode(r.user_id)\n assert len(r.user_id) > 0\n r.display_name = self.x_display_name(r, info, None)\n r.email = self.x_email(r, info, None)\n r.avatar_url = self.x_avatar_url(r, info, None)\n if not r.avatar_url:\n gravatar_id = self.x_gravatar_id(r, info, None)\n if r.email and not gravatar_id:\n gravatar_id = hashlib.md5(r.email.strip().lower()).hexdigest()\n if gravatar_id:\n r.avatar_url = 'https://secure.gravatar.com/avatar/'+gravatar_id\n r.is_team = self.x_is_team(r, info, False)\n r.extra_info = info\n return r\n\n def get_team_members(self, team_name, page_url=None):\n \"\"\"Given a team_name on the platform, return the team's membership list\n from the API.\n \"\"\"\n default_url = self.api_team_members_path.format(user_name=quote(team_name))\n r = self.api_get(page_url or default_url)\n members, count, pages_urls = self.api_paginator(r, self.api_parser(r))\n members = [self.extract_user_info(m) for m in members]\n return members, count, pages_urls\n\n def get_user_info(self, user_name, sess=None):\n \"\"\"Given a user_name on the platform, get the user's info from the API.\n \"\"\"\n try:\n path = self.api_user_info_path.format(user_name=quote(user_name))\n except KeyError:\n raise Response(404)\n info = self.api_parser(self.api_get(path, sess=sess))\n return self.extract_user_info(info)\n\n def get_user_self_info(self, sess):\n \"\"\"Get the authenticated user's info from the API.\n \"\"\"\n r = self.api_get(self.api_user_self_info_path, sess=sess)\n info = self.extract_user_info(self.api_parser(r))\n token = getattr(sess, 'token', None)\n if token:\n info.token = json.dumps(token)\n return info\n\n\nclass PlatformOAuth1(Platform):\n\n request_token_path = '/oauth/request_token'\n authorize_path = '/oauth/authorize'\n access_token_path = '/oauth/access_token'\n\n def get_auth_session(self, token=None):\n args = ()\n if token:\n args = (token['token'], token['token_secret'])\n return OAuth1Session(self.api_key, self.api_secret, *args,\n callback_uri=self.callback_url)\n\n def get_auth_url(self, **kw):\n sess = self.get_auth_session()\n r = sess.fetch_request_token(self.auth_url+self.request_token_path)\n url = sess.authorization_url(self.auth_url+self.authorize_path)\n return url, r['oauth_token'], r['oauth_token_secret']\n\n def get_query_id(self, querystring):\n return querystring['oauth_token']\n\n def handle_auth_callback(self, url, token, token_secret):\n sess = self.get_auth_session(dict(token=token, token_secret=token_secret))\n sess.parse_authorization_response(url)\n r = sess.fetch_access_token(self.auth_url+self.access_token_path)\n sess.token = dict(token=r['oauth_token'],\n token_secret=r['oauth_token_secret'])\n return sess\n\n\nclass PlatformOAuth2(Platform):\n\n oauth_default_scope = None\n oauth_email_scope = None\n oauth_payment_scope = None\n\n def get_auth_session(self, state=None, token=None, token_updater=None):\n return OAuth2Session(self.api_key, state=state, token=token,\n token_updater=token_updater,\n redirect_uri=self.callback_url,\n scope=self.oauth_default_scope)\n\n def get_auth_url(self, **kw):\n sess = self.get_auth_session()\n url, state = sess.authorization_url(self.auth_url)\n return url, state, ''\n\n def get_query_id(self, querystring):\n return querystring['state']\n\n def handle_auth_callback(self, url, state, unused_arg):\n sess = self.get_auth_session(state=state)\n sess.fetch_token(self.access_token_url,\n client_secret=self.api_secret,\n authorization_response=url)\n return sess\n", "path": "gratipay/elsewhere/__init__.py"}, {"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth1\nfrom gratipay.elsewhere._extractors import any_key, key, not_available\nfrom gratipay.elsewhere._paginators import keys_paginator\n\n\nclass Bitbucket(PlatformOAuth1):\n\n # Platform attributes\n name = 'bitbucket'\n display_name = 'Bitbucket'\n account_url = 'https://bitbucket.org/{user_name}'\n\n # Auth attributes\n auth_url = 'https://bitbucket.org/api/1.0'\n authorize_path = '/oauth/authenticate'\n\n # API attributes\n api_format = 'json'\n api_paginator = keys_paginator(prev='previous')\n api_url = 'https://bitbucket.org/api'\n api_user_info_path = '/1.0/users/{user_name}'\n api_user_self_info_path = '/1.0/user'\n api_team_members_path = '/2.0/teams/{user_name}/members'\n\n # User info extractors\n x_user_info = key('user')\n x_user_id = not_available # No immutable id. :-/\n x_user_name = key('username')\n x_display_name = key('display_name')\n x_email = not_available\n x_avatar_url = any_key('avatar', ('links', 'avatar', 'href'))\n x_is_team = key('is_team')\n", "path": "gratipay/elsewhere/bitbucket.py"}], "after_files": [{"content": "\"\"\"This subpackage contains functionality for working with accounts elsewhere.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport hashlib\nimport json\nimport logging\nfrom urllib import quote\nimport xml.etree.ElementTree as ET\n\nfrom aspen import log, Response\nfrom aspen.utils import to_age, utc\nfrom requests_oauthlib import OAuth1Session, OAuth2Session\n\nfrom gratipay.elsewhere._extractors import not_available\n\n\nACTIONS = {'opt-in', 'connect', 'lock', 'unlock'}\nPLATFORMS = 'facebook google bitbucket bountysource github openstreetmap twitter venmo'.split()\n\n\nclass UnknownAccountElsewhere(Exception): pass\n\n\nclass PlatformRegistry(object):\n \"\"\"Registry of platforms we support connecting to Gratipay accounts.\n \"\"\"\n def __init__(self, platforms):\n self.__dict__ = OrderedDict((p.name, p) for p in platforms)\n\n def __contains__(self, platform):\n return platform.name in self.__dict__\n\n def __iter__(self):\n return iter(self.__dict__.values())\n\n\nclass UserInfo(object):\n \"\"\"A simple container for a user's info.\n\n Accessing a non-existing attribute returns `None`.\n \"\"\"\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n def __getattr__(self, key):\n return self.__dict__.get(key, None)\n\n def __setattr__(self, key, value):\n if value is None:\n self.__dict__.pop(key, None)\n else:\n self.__dict__[key] = value\n\n\nclass Platform(object):\n\n allows_team_connect = False\n\n # \"x\" stands for \"extract\"\n x_user_info = not_available\n x_user_id = not_available\n x_user_name = not_available\n x_display_name = not_available\n x_email = not_available\n x_gravatar_id = not_available\n x_avatar_url = not_available\n x_is_team = not_available\n\n required_attrs = ( 'account_url'\n , 'display_name'\n , 'name'\n )\n\n def __init__(self, api_key, api_secret, callback_url, api_url=None, auth_url=None):\n self.api_key = api_key\n self.api_secret = api_secret\n self.callback_url = callback_url\n if api_url:\n self.api_url = api_url\n if auth_url:\n self.auth_url = auth_url\n elif not getattr(self, 'auth_url', None):\n self.auth_url = self.api_url\n\n # Determine the appropriate response parser using `self.api_format`\n api_format = getattr(self, 'api_format', None)\n if api_format == 'json':\n self.api_parser = lambda r: r.json()\n elif api_format == 'xml':\n self.api_parser = lambda r: ET.fromstring(r.content)\n elif api_format:\n raise ValueError('unknown API format: '+str(api_format))\n\n # Make sure the subclass was implemented properly.\n missing_attrs = [a for a in self.required_attrs if not hasattr(self, a)]\n if missing_attrs:\n msg = \"The class %s is missing these required attributes: %s\"\n msg %= self.__class__.__name__, ', '.join(missing_attrs)\n raise AttributeError(msg)\n\n def api_get(self, path, sess=None, **kw):\n \"\"\"\n Given a `path` (e.g. /users/foo), this function sends a GET request to\n the platform's API (e.g. https://api.github.com/users/foo).\n\n The response is returned, after checking its status code and ratelimit\n headers.\n \"\"\"\n if not sess:\n sess = self.get_auth_session()\n response = sess.get(self.api_url+path, **kw)\n\n # Check status\n status = response.status_code\n if status == 404:\n raise Response(404, response.text)\n elif status != 200:\n log('{} api responded with {}:\\n{}'.format(self.name, status, response.text)\n , level=logging.ERROR)\n raise Response(500, '{} lookup failed with {}'.format(self.name, status))\n\n # Check ratelimit headers\n prefix = getattr(self, 'ratelimit_headers_prefix', None)\n if prefix:\n limit = response.headers[prefix+'limit']\n remaining = response.headers[prefix+'remaining']\n reset = response.headers[prefix+'reset']\n try:\n limit, remaining, reset = int(limit), int(remaining), int(reset)\n except (TypeError, ValueError):\n d = dict(limit=limit, remaining=remaining, reset=reset)\n log('Got weird rate headers from %s: %s' % (self.name, d))\n else:\n percent_remaining = remaining/limit\n if percent_remaining < 0.5:\n reset = to_age(datetime.fromtimestamp(reset, tz=utc))\n log_msg = (\n '{0} API: {1:.1%} of ratelimit has been consumed, '\n '{2} requests remaining, resets {3}.'\n ).format(self.name, 1 - percent_remaining, remaining, reset)\n log_lvl = logging.WARNING\n if percent_remaining < 0.2:\n log_lvl = logging.ERROR\n elif percent_remaining < 0.05:\n log_lvl = logging.CRITICAL\n log(log_msg, log_lvl)\n\n return response\n\n def extract_user_info(self, info):\n \"\"\"\n Given a user_info object of variable type (depending on the platform),\n extract the relevant information by calling the platform's extractors\n (`x_user_name`, `x_user_id`, etc).\n\n Returns a `UserInfo`. The `user_id` attribute is guaranteed to have a\n unique non-empty value.\n \"\"\"\n r = UserInfo(platform=self.name)\n info = self.x_user_info(r, info, info)\n r.user_name = self.x_user_name(r, info, None)\n if self.x_user_id.__func__ is not_available:\n r.user_id = r.user_name\n else:\n r.user_id = self.x_user_id(r, info)\n assert r.user_id is not None\n r.user_id = unicode(r.user_id)\n assert len(r.user_id) > 0\n r.display_name = self.x_display_name(r, info, None)\n r.email = self.x_email(r, info, None)\n r.avatar_url = self.x_avatar_url(r, info, None)\n if not r.avatar_url:\n gravatar_id = self.x_gravatar_id(r, info, None)\n if r.email and not gravatar_id:\n gravatar_id = hashlib.md5(r.email.strip().lower()).hexdigest()\n if gravatar_id:\n r.avatar_url = 'https://secure.gravatar.com/avatar/'+gravatar_id\n r.is_team = self.x_is_team(r, info, False)\n r.extra_info = info\n return r\n\n def get_team_members(self, team_name, page_url=None):\n \"\"\"Given a team_name on the platform, return the team's membership list\n from the API.\n \"\"\"\n default_url = self.api_team_members_path.format(user_name=quote(team_name))\n r = self.api_get(page_url or default_url)\n members, count, pages_urls = self.api_paginator(r, self.api_parser(r))\n members = [self.extract_user_info(m) for m in members]\n return members, count, pages_urls\n\n def get_user_info(self, user_name, sess=None):\n \"\"\"Given a user_name on the platform, get the user's info from the API.\n \"\"\"\n try:\n path = self.api_user_info_path.format(user_name=quote(user_name))\n except KeyError:\n raise Response(404)\n info = self.api_parser(self.api_get(path, sess=sess))\n return self.extract_user_info(info)\n\n def get_user_self_info(self, sess):\n \"\"\"Get the authenticated user's info from the API.\n \"\"\"\n r = self.api_get(self.api_user_self_info_path, sess=sess)\n info = self.extract_user_info(self.api_parser(r))\n token = getattr(sess, 'token', None)\n if token:\n info.token = json.dumps(token)\n return info\n\n\nclass PlatformOAuth1(Platform):\n\n request_token_path = '/oauth/request_token'\n authorize_path = '/oauth/authorize'\n access_token_path = '/oauth/access_token'\n\n def get_auth_session(self, token=None):\n args = ()\n if token:\n args = (token['token'], token['token_secret'])\n return OAuth1Session(self.api_key, self.api_secret, *args,\n callback_uri=self.callback_url)\n\n def get_auth_url(self, **kw):\n sess = self.get_auth_session()\n r = sess.fetch_request_token(self.auth_url+self.request_token_path)\n url = sess.authorization_url(self.auth_url+self.authorize_path)\n return url, r['oauth_token'], r['oauth_token_secret']\n\n def get_query_id(self, querystring):\n return querystring['oauth_token']\n\n def handle_auth_callback(self, url, token, token_secret):\n sess = self.get_auth_session(dict(token=token, token_secret=token_secret))\n sess.parse_authorization_response(url)\n r = sess.fetch_access_token(self.auth_url+self.access_token_path)\n sess.token = dict(token=r['oauth_token'],\n token_secret=r['oauth_token_secret'])\n return sess\n\n\nclass PlatformOAuth2(Platform):\n\n oauth_default_scope = None\n oauth_email_scope = None\n oauth_payment_scope = None\n\n def get_auth_session(self, state=None, token=None, token_updater=None):\n return OAuth2Session(self.api_key, state=state, token=token,\n token_updater=token_updater,\n redirect_uri=self.callback_url,\n scope=self.oauth_default_scope)\n\n def get_auth_url(self, **kw):\n sess = self.get_auth_session()\n url, state = sess.authorization_url(self.auth_url)\n return url, state, ''\n\n def get_query_id(self, querystring):\n return querystring['state']\n\n def handle_auth_callback(self, url, state, unused_arg):\n sess = self.get_auth_session(state=state)\n sess.fetch_token(self.access_token_url,\n client_secret=self.api_secret,\n authorization_response=url)\n return sess\n", "path": "gratipay/elsewhere/__init__.py"}, {"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom aspen import Response\nfrom gratipay.elsewhere import PlatformOAuth1\nfrom gratipay.elsewhere._extractors import any_key, key, not_available\nfrom gratipay.elsewhere._paginators import keys_paginator\n\n\nclass Bitbucket(PlatformOAuth1):\n\n # Platform attributes\n name = 'bitbucket'\n display_name = 'Bitbucket'\n account_url = 'https://bitbucket.org/{user_name}'\n\n # Auth attributes\n auth_url = 'https://bitbucket.org/api/1.0'\n authorize_path = '/oauth/authenticate'\n\n # API attributes\n api_format = 'json'\n api_paginator = keys_paginator(prev='previous')\n api_url = 'https://bitbucket.org/api'\n api_user_info_path = '/2.0/users/{user_name}'\n api_user_self_info_path = '/2.0/user'\n api_team_members_path = '/2.0/teams/{user_name}/members'\n\n # User info extractors\n x_user_info = key('user')\n x_user_id = key('uuid')\n x_user_name = key('username')\n x_display_name = key('display_name')\n x_email = not_available\n x_avatar_url = any_key('avatar', ('links', 'avatar', 'href'))\n x_is_team = key('type', lambda v: v == 'team')\n\n def api_get(self, path, sess=None, **kw):\n \"\"\"Extend to manually retry /users/pypy as /teams/pypy.\n\n Bitbucket gives us a 404 where a 30x would be more helpful.\n\n \"\"\"\n try:\n return PlatformOAuth1.api_get(self, path, sess, **kw)\n except Response, response:\n if response.code == 404 and ' is a team account' in response.body:\n assert path.startswith('/2.0/users/')\n path = '/2.0/teams/' + path[11:]\n return PlatformOAuth1.api_get(self, path, sess, **kw)\n else:\n raise\n", "path": "gratipay/elsewhere/bitbucket.py"}]}
| 3,916 | 669 |
gh_patches_debug_1933
|
rasdani/github-patches
|
git_diff
|
spack__spack-5099
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
spack find : always prompt 0 installed packages
On a clean `develop` checkout :
```
$ git clone https://github.com/LLNL/spack.git
Cloning into 'spack'...
remote: Counting objects: 25613, done.
remote: Compressing objects: 100% (42/42), done.
remote: Total 25613 (delta 12), reused 3 (delta 3), pack-reused 25557
Receiving objects: 100% (25613/25613), 6.65 MiB | 6.46 MiB/s, done.
Resolving deltas: 100% (13031/13031), done.
Checking connectivity... done.
$ cd spack
$ . share/spack/setup-env.sh
$ spack compilers
==> Available compilers
-- gcc ----------------------------------------------------------
[email protected]
$ spack install zlib
==> Installing zlib
==> Trying to fetch from file:///home/mculpo/production/spack-mirror/zlib/zlib-1.2.8.tar.gz
######################################################################## 100,0%
==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix/zlib-1.2.8.tar.gz
==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix
==> No patches needed for zlib
==> Building zlib
==> Successfully installed zlib
Fetch: 0.01s. Build: 3.69s. Total: 3.70s.
[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix
$ spack find
==> 0 installed packages.
$ spack install szip
==> Installing szip
==> Trying to fetch from file:///home/mculpo/production/spack-mirror/szip/szip-2.1.tar.gz
######################################################################## 100,0%
==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq/szip-2.1.tar.gz
==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq
==> No patches needed for szip
==> Building szip
==> Successfully installed szip
Fetch: 0.01s. Build: 8.09s. Total: 8.10s.
[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq
$ spack find
==> 0 installed packages.
```
The db seems to be written correctly :
```
database:
installs:
d6pdl6xvnvap6ihrqcqtgvweghbszmix:
explicit: true
installed: true
path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix
ref_count: 0
spec:
zlib:
arch: linux-x86_64
compiler:
name: gcc
version: '4.8'
dependencies: {}
namespace: builtin
parameters:
cflags: []
cppflags: []
cxxflags: []
fflags: []
ldflags: []
ldlibs: []
version: 1.2.8
esfmhl54wbdb7nnnip6y6jbxlbmxs2jq:
explicit: true
installed: true
path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq
ref_count: 0
spec:
szip:
arch: linux-x86_64
compiler:
name: gcc
version: '4.8'
dependencies: {}
namespace: builtin
parameters:
cflags: []
cppflags: []
cxxflags: []
fflags: []
ldflags: []
ldlibs: []
version: '2.1'
version: 0.9.1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/h5z-zfp/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class H5zZfp(MakefilePackage):
29 """A highly flexible floating point and integer compression plugin for the
30 HDF5 library using ZFP compression."""
31
32 homepage = "http://h5z-zfp.readthedocs.io/en/latest"
33 url = "https://github.com/LLNL/H5Z-ZFP"
34
35 version('develop', git='https://github.com/LLNL/H5Z-ZFP.git', tag='master')
36 version('0.7.0', git='https://github.com/LLNL/H5Z-ZFP.git', commit='58ac811')
37
38 variant('fortran', default=True, description='Enable Fortran support')
39
40 depends_on('hdf5')
41 # depends_on('zfp bsws=8')
42 depends_on('zfp')
43
44 @property
45 def make_defs(self):
46 make_defs = [
47 'PREFIX=%s' % prefix,
48 'CC=%s' % spack_cc,
49 'HDF5_HOME=%s' % self.spec['hdf5'].prefix,
50 'ZFP_HOME=%s' % self.spec['zfp'].prefix]
51
52 if '+fortran' in self.spec and spack_fc:
53 make_defs += ['FC=%s' % spack_fc]
54
55 return make_defs
56
57 @property
58 def build_targets(self):
59 targets = ['all']
60 return self.make_defs + targets
61
62 @property
63 def install_targets(self):
64 make_args = ['install']
65 return make_args + self.make_defs
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/var/spack/repos/builtin/packages/h5z-zfp/package.py b/var/spack/repos/builtin/packages/h5z-zfp/package.py
--- a/var/spack/repos/builtin/packages/h5z-zfp/package.py
+++ b/var/spack/repos/builtin/packages/h5z-zfp/package.py
@@ -38,8 +38,7 @@
variant('fortran', default=True, description='Enable Fortran support')
depends_on('hdf5')
-# depends_on('zfp bsws=8')
- depends_on('zfp')
+ depends_on('zfp bsws=8')
@property
def make_defs(self):
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/h5z-zfp/package.py b/var/spack/repos/builtin/packages/h5z-zfp/package.py\n--- a/var/spack/repos/builtin/packages/h5z-zfp/package.py\n+++ b/var/spack/repos/builtin/packages/h5z-zfp/package.py\n@@ -38,8 +38,7 @@\n variant('fortran', default=True, description='Enable Fortran support')\n \n depends_on('hdf5')\n-# depends_on('zfp bsws=8')\n- depends_on('zfp')\n+ depends_on('zfp bsws=8')\n \n @property\n def make_defs(self):\n", "issue": "spack find : always prompt 0 installed packages\nOn a clean `develop` checkout : \n\n```\n$ git clone https://github.com/LLNL/spack.git\nCloning into 'spack'...\nremote: Counting objects: 25613, done.\nremote: Compressing objects: 100% (42/42), done.\nremote: Total 25613 (delta 12), reused 3 (delta 3), pack-reused 25557\nReceiving objects: 100% (25613/25613), 6.65 MiB | 6.46 MiB/s, done.\nResolving deltas: 100% (13031/13031), done.\nChecking connectivity... done.\n\n$ cd spack\n$ . share/spack/setup-env.sh \n$ spack compilers\n==> Available compilers\n-- gcc ----------------------------------------------------------\[email protected]\n\n$ spack install zlib\n==> Installing zlib\n==> Trying to fetch from file:///home/mculpo/production/spack-mirror/zlib/zlib-1.2.8.tar.gz\n######################################################################## 100,0%\n==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix/zlib-1.2.8.tar.gz\n==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix\n==> No patches needed for zlib\n==> Building zlib\n==> Successfully installed zlib\n Fetch: 0.01s. Build: 3.69s. Total: 3.70s.\n[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix\n\n$ spack find\n==> 0 installed packages.\n\n$ spack install szip\n==> Installing szip\n==> Trying to fetch from file:///home/mculpo/production/spack-mirror/szip/szip-2.1.tar.gz\n######################################################################## 100,0%\n==> Staging archive: /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq/szip-2.1.tar.gz\n==> Created stage in /home/mculpo/tmp/spack/var/spack/stage/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq\n==> No patches needed for szip\n==> Building szip\n==> Successfully installed szip\n Fetch: 0.01s. Build: 8.09s. Total: 8.10s.\n[+] /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq\n\n$ spack find \n==> 0 installed packages.\n```\n\nThe db seems to be written correctly : \n\n```\ndatabase:\n installs:\n d6pdl6xvnvap6ihrqcqtgvweghbszmix:\n explicit: true\n installed: true\n path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/zlib-1.2.8-d6pdl6xvnvap6ihrqcqtgvweghbszmix\n ref_count: 0\n spec:\n zlib:\n arch: linux-x86_64\n compiler:\n name: gcc\n version: '4.8'\n dependencies: {}\n namespace: builtin\n parameters:\n cflags: []\n cppflags: []\n cxxflags: []\n fflags: []\n ldflags: []\n ldlibs: []\n version: 1.2.8\n esfmhl54wbdb7nnnip6y6jbxlbmxs2jq:\n explicit: true\n installed: true\n path: /home/mculpo/tmp/spack/opt/spack/linux-x86_64/gcc-4.8/szip-2.1-esfmhl54wbdb7nnnip6y6jbxlbmxs2jq\n ref_count: 0\n spec:\n szip:\n arch: linux-x86_64\n compiler:\n name: gcc\n version: '4.8'\n dependencies: {}\n namespace: builtin\n parameters:\n cflags: []\n cppflags: []\n cxxflags: []\n fflags: []\n ldflags: []\n ldlibs: []\n version: '2.1'\n version: 0.9.1\n```\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass H5zZfp(MakefilePackage):\n \"\"\"A highly flexible floating point and integer compression plugin for the\n HDF5 library using ZFP compression.\"\"\"\n\n homepage = \"http://h5z-zfp.readthedocs.io/en/latest\"\n url = \"https://github.com/LLNL/H5Z-ZFP\"\n\n version('develop', git='https://github.com/LLNL/H5Z-ZFP.git', tag='master')\n version('0.7.0', git='https://github.com/LLNL/H5Z-ZFP.git', commit='58ac811')\n\n variant('fortran', default=True, description='Enable Fortran support')\n\n depends_on('hdf5')\n# depends_on('zfp bsws=8')\n depends_on('zfp')\n\n @property\n def make_defs(self):\n make_defs = [\n 'PREFIX=%s' % prefix,\n 'CC=%s' % spack_cc,\n 'HDF5_HOME=%s' % self.spec['hdf5'].prefix,\n 'ZFP_HOME=%s' % self.spec['zfp'].prefix]\n\n if '+fortran' in self.spec and spack_fc:\n make_defs += ['FC=%s' % spack_fc]\n\n return make_defs\n\n @property\n def build_targets(self):\n targets = ['all']\n return self.make_defs + targets\n\n @property\n def install_targets(self):\n make_args = ['install']\n return make_args + self.make_defs\n", "path": "var/spack/repos/builtin/packages/h5z-zfp/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass H5zZfp(MakefilePackage):\n \"\"\"A highly flexible floating point and integer compression plugin for the\n HDF5 library using ZFP compression.\"\"\"\n\n homepage = \"http://h5z-zfp.readthedocs.io/en/latest\"\n url = \"https://github.com/LLNL/H5Z-ZFP\"\n\n version('develop', git='https://github.com/LLNL/H5Z-ZFP.git', tag='master')\n version('0.7.0', git='https://github.com/LLNL/H5Z-ZFP.git', commit='58ac811')\n\n variant('fortran', default=True, description='Enable Fortran support')\n\n depends_on('hdf5')\n depends_on('zfp bsws=8')\n\n @property\n def make_defs(self):\n make_defs = [\n 'PREFIX=%s' % prefix,\n 'CC=%s' % spack_cc,\n 'HDF5_HOME=%s' % self.spec['hdf5'].prefix,\n 'ZFP_HOME=%s' % self.spec['zfp'].prefix]\n\n if '+fortran' in self.spec and spack_fc:\n make_defs += ['FC=%s' % spack_fc]\n\n return make_defs\n\n @property\n def build_targets(self):\n targets = ['all']\n return self.make_defs + targets\n\n @property\n def install_targets(self):\n make_args = ['install']\n return make_args + self.make_defs\n", "path": "var/spack/repos/builtin/packages/h5z-zfp/package.py"}]}
| 2,142 | 151 |
gh_patches_debug_31028
|
rasdani/github-patches
|
git_diff
|
pretix__pretix-346
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Log old email when changing an order's email address
… because otherwise it's completely lost.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretix/control/logdisplay.py`
Content:
```
1 import json
2 from decimal import Decimal
3
4 from django.dispatch import receiver
5 from django.utils import formats
6 from django.utils.translation import ugettext_lazy as _
7
8 from pretix.base.models import Event, ItemVariation, LogEntry
9 from pretix.base.signals import logentry_display
10
11
12 def _display_order_changed(event: Event, logentry: LogEntry):
13 data = json.loads(logentry.data)
14
15 text = _('The order has been changed:')
16 if logentry.action_type == 'pretix.event.order.changed.item':
17 old_item = str(event.items.get(pk=data['old_item']))
18 if data['old_variation']:
19 old_item += ' - ' + str(event.itemvariations.get(pk=data['old_variation']))
20 new_item = str(event.items.get(pk=data['new_item']))
21 if data['new_variation']:
22 new_item += ' - ' + str(event.itemvariations.get(pk=data['new_variation']))
23 return text + ' ' + _('{old_item} ({old_price} {currency}) changed to {new_item} ({new_price} {currency}).').format(
24 old_item=old_item, new_item=new_item,
25 old_price=formats.localize(Decimal(data['old_price'])),
26 new_price=formats.localize(Decimal(data['new_price'])),
27 currency=event.currency
28 )
29 elif logentry.action_type == 'pretix.event.order.changed.price':
30 return text + ' ' + _('Price of a position changed from {old_price} {currency} to {new_price} {currency}.').format(
31 old_price=formats.localize(Decimal(data['old_price'])),
32 new_price=formats.localize(Decimal(data['new_price'])),
33 currency=event.currency
34 )
35 elif logentry.action_type == 'pretix.event.order.changed.cancel':
36 old_item = str(event.items.get(pk=data['old_item']))
37 if data['old_variation']:
38 old_item += ' - ' + str(ItemVariation.objects.get(pk=data['old_variation']))
39 return text + ' ' + _('{old_item} ({old_price} {currency}) removed.').format(
40 old_item=old_item,
41 old_price=formats.localize(Decimal(data['old_price'])),
42 currency=event.currency
43 )
44
45
46 @receiver(signal=logentry_display, dispatch_uid="pretixcontrol_logentry_display")
47 def pretixcontrol_logentry_display(sender: Event, logentry: LogEntry, **kwargs):
48 plains = {
49 'pretix.event.order.modified': _('The order details have been modified.'),
50 'pretix.event.order.unpaid': _('The order has been marked as unpaid.'),
51 'pretix.event.order.resend': _('The link to the order detail page has been resent to the user.'),
52 'pretix.event.order.expirychanged': _('The order\'s expiry date has been changed.'),
53 'pretix.event.order.expired': _('The order has been marked as expired.'),
54 'pretix.event.order.paid': _('The order has been marked as paid.'),
55 'pretix.event.order.refunded': _('The order has been refunded.'),
56 'pretix.event.order.canceled': _('The order has been canceled.'),
57 'pretix.event.order.placed': _('The order has been created.'),
58 'pretix.event.order.invoice.generated': _('The invoice has been generated.'),
59 'pretix.event.order.invoice.regenerated': _('The invoice has been regenerated.'),
60 'pretix.event.order.invoice.reissued': _('The invoice has been reissued.'),
61 'pretix.event.order.comment': _('The order\'s internal comment has been updated.'),
62 'pretix.event.order.contact.changed': _('The email address has been changed.'),
63 'pretix.event.order.payment.changed': _('The payment method has been changed.'),
64 'pretix.event.order.expire_warning_sent': _('An email has been sent with a warning that the order is about to expire.'),
65 'pretix.user.settings.2fa.enabled': _('Two-factor authentication has been enabled.'),
66 'pretix.user.settings.2fa.disabled': _('Two-factor authentication has been disabled.'),
67 'pretix.user.settings.2fa.regenemergency': _('Your two-factor emergency codes have been regenerated.'),
68 'pretix.control.auth.user.forgot_password.mail_sent': _('Password reset mail sent.'),
69 'pretix.control.auth.user.forgot_password.recovered': _('The password has been reset.')
70
71 }
72 if logentry.action_type in plains:
73 return plains[logentry.action_type]
74
75 if logentry.action_type.startswith('pretix.event.order.changed'):
76 return _display_order_changed(sender, logentry)
77
78 if logentry.action_type == 'pretix.user.settings.2fa.device.added':
79 data = json.loads(logentry.data)
80 return _('A new two-factor authentication device "{name}" has been added to your account.').format(
81 name=data['name']
82 )
83 if logentry.action_type == 'pretix.user.settings.2fa.device.deleted':
84 data = json.loads(logentry.data)
85 return _('The two-factor authentication device "{name}" has been removed from your account.').format(
86 name=data['name']
87 )
88 if logentry.action_type == 'pretix.user.settings.changed':
89 data = json.loads(logentry.data)
90 text = str(_('Your account settings have been changed.'))
91 if 'email' in data:
92 text = text + ' ' + str(_('Your email address has been changed to {email}.').format(email=data['email']))
93 if 'new_pw' in data:
94 text = text + ' ' + str(_('Your password has been changed.'))
95 return text
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pretix/control/logdisplay.py b/src/pretix/control/logdisplay.py
--- a/src/pretix/control/logdisplay.py
+++ b/src/pretix/control/logdisplay.py
@@ -59,7 +59,6 @@
'pretix.event.order.invoice.regenerated': _('The invoice has been regenerated.'),
'pretix.event.order.invoice.reissued': _('The invoice has been reissued.'),
'pretix.event.order.comment': _('The order\'s internal comment has been updated.'),
- 'pretix.event.order.contact.changed': _('The email address has been changed.'),
'pretix.event.order.payment.changed': _('The payment method has been changed.'),
'pretix.event.order.expire_warning_sent': _('An email has been sent with a warning that the order is about to expire.'),
'pretix.user.settings.2fa.enabled': _('Two-factor authentication has been enabled.'),
@@ -75,6 +74,13 @@
if logentry.action_type.startswith('pretix.event.order.changed'):
return _display_order_changed(sender, logentry)
+ if logentry.action_type.startswith('pretix.event.order.contact.changed'):
+ data = json.loads(logentry.data)
+ return _('The email address has been changed from "{old}" to "{new}".').format(
+ old=data['old_email'],
+ new=data['new_email'],
+ )
+
if logentry.action_type == 'pretix.user.settings.2fa.device.added':
data = json.loads(logentry.data)
return _('A new two-factor authentication device "{name}" has been added to your account.').format(
|
{"golden_diff": "diff --git a/src/pretix/control/logdisplay.py b/src/pretix/control/logdisplay.py\n--- a/src/pretix/control/logdisplay.py\n+++ b/src/pretix/control/logdisplay.py\n@@ -59,7 +59,6 @@\n 'pretix.event.order.invoice.regenerated': _('The invoice has been regenerated.'),\n 'pretix.event.order.invoice.reissued': _('The invoice has been reissued.'),\n 'pretix.event.order.comment': _('The order\\'s internal comment has been updated.'),\n- 'pretix.event.order.contact.changed': _('The email address has been changed.'),\n 'pretix.event.order.payment.changed': _('The payment method has been changed.'),\n 'pretix.event.order.expire_warning_sent': _('An email has been sent with a warning that the order is about to expire.'),\n 'pretix.user.settings.2fa.enabled': _('Two-factor authentication has been enabled.'),\n@@ -75,6 +74,13 @@\n if logentry.action_type.startswith('pretix.event.order.changed'):\n return _display_order_changed(sender, logentry)\n \n+ if logentry.action_type.startswith('pretix.event.order.contact.changed'):\n+ data = json.loads(logentry.data)\n+ return _('The email address has been changed from \"{old}\" to \"{new}\".').format(\n+ old=data['old_email'],\n+ new=data['new_email'],\n+ )\n+\n if logentry.action_type == 'pretix.user.settings.2fa.device.added':\n data = json.loads(logentry.data)\n return _('A new two-factor authentication device \"{name}\" has been added to your account.').format(\n", "issue": "Log old email when changing an order's email address\n\u2026 because otherwise it's completely lost.\n", "before_files": [{"content": "import json\nfrom decimal import Decimal\n\nfrom django.dispatch import receiver\nfrom django.utils import formats\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pretix.base.models import Event, ItemVariation, LogEntry\nfrom pretix.base.signals import logentry_display\n\n\ndef _display_order_changed(event: Event, logentry: LogEntry):\n data = json.loads(logentry.data)\n\n text = _('The order has been changed:')\n if logentry.action_type == 'pretix.event.order.changed.item':\n old_item = str(event.items.get(pk=data['old_item']))\n if data['old_variation']:\n old_item += ' - ' + str(event.itemvariations.get(pk=data['old_variation']))\n new_item = str(event.items.get(pk=data['new_item']))\n if data['new_variation']:\n new_item += ' - ' + str(event.itemvariations.get(pk=data['new_variation']))\n return text + ' ' + _('{old_item} ({old_price} {currency}) changed to {new_item} ({new_price} {currency}).').format(\n old_item=old_item, new_item=new_item,\n old_price=formats.localize(Decimal(data['old_price'])),\n new_price=formats.localize(Decimal(data['new_price'])),\n currency=event.currency\n )\n elif logentry.action_type == 'pretix.event.order.changed.price':\n return text + ' ' + _('Price of a position changed from {old_price} {currency} to {new_price} {currency}.').format(\n old_price=formats.localize(Decimal(data['old_price'])),\n new_price=formats.localize(Decimal(data['new_price'])),\n currency=event.currency\n )\n elif logentry.action_type == 'pretix.event.order.changed.cancel':\n old_item = str(event.items.get(pk=data['old_item']))\n if data['old_variation']:\n old_item += ' - ' + str(ItemVariation.objects.get(pk=data['old_variation']))\n return text + ' ' + _('{old_item} ({old_price} {currency}) removed.').format(\n old_item=old_item,\n old_price=formats.localize(Decimal(data['old_price'])),\n currency=event.currency\n )\n\n\n@receiver(signal=logentry_display, dispatch_uid=\"pretixcontrol_logentry_display\")\ndef pretixcontrol_logentry_display(sender: Event, logentry: LogEntry, **kwargs):\n plains = {\n 'pretix.event.order.modified': _('The order details have been modified.'),\n 'pretix.event.order.unpaid': _('The order has been marked as unpaid.'),\n 'pretix.event.order.resend': _('The link to the order detail page has been resent to the user.'),\n 'pretix.event.order.expirychanged': _('The order\\'s expiry date has been changed.'),\n 'pretix.event.order.expired': _('The order has been marked as expired.'),\n 'pretix.event.order.paid': _('The order has been marked as paid.'),\n 'pretix.event.order.refunded': _('The order has been refunded.'),\n 'pretix.event.order.canceled': _('The order has been canceled.'),\n 'pretix.event.order.placed': _('The order has been created.'),\n 'pretix.event.order.invoice.generated': _('The invoice has been generated.'),\n 'pretix.event.order.invoice.regenerated': _('The invoice has been regenerated.'),\n 'pretix.event.order.invoice.reissued': _('The invoice has been reissued.'),\n 'pretix.event.order.comment': _('The order\\'s internal comment has been updated.'),\n 'pretix.event.order.contact.changed': _('The email address has been changed.'),\n 'pretix.event.order.payment.changed': _('The payment method has been changed.'),\n 'pretix.event.order.expire_warning_sent': _('An email has been sent with a warning that the order is about to expire.'),\n 'pretix.user.settings.2fa.enabled': _('Two-factor authentication has been enabled.'),\n 'pretix.user.settings.2fa.disabled': _('Two-factor authentication has been disabled.'),\n 'pretix.user.settings.2fa.regenemergency': _('Your two-factor emergency codes have been regenerated.'),\n 'pretix.control.auth.user.forgot_password.mail_sent': _('Password reset mail sent.'),\n 'pretix.control.auth.user.forgot_password.recovered': _('The password has been reset.')\n\n }\n if logentry.action_type in plains:\n return plains[logentry.action_type]\n\n if logentry.action_type.startswith('pretix.event.order.changed'):\n return _display_order_changed(sender, logentry)\n\n if logentry.action_type == 'pretix.user.settings.2fa.device.added':\n data = json.loads(logentry.data)\n return _('A new two-factor authentication device \"{name}\" has been added to your account.').format(\n name=data['name']\n )\n if logentry.action_type == 'pretix.user.settings.2fa.device.deleted':\n data = json.loads(logentry.data)\n return _('The two-factor authentication device \"{name}\" has been removed from your account.').format(\n name=data['name']\n )\n if logentry.action_type == 'pretix.user.settings.changed':\n data = json.loads(logentry.data)\n text = str(_('Your account settings have been changed.'))\n if 'email' in data:\n text = text + ' ' + str(_('Your email address has been changed to {email}.').format(email=data['email']))\n if 'new_pw' in data:\n text = text + ' ' + str(_('Your password has been changed.'))\n return text\n", "path": "src/pretix/control/logdisplay.py"}], "after_files": [{"content": "import json\nfrom decimal import Decimal\n\nfrom django.dispatch import receiver\nfrom django.utils import formats\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pretix.base.models import Event, ItemVariation, LogEntry\nfrom pretix.base.signals import logentry_display\n\n\ndef _display_order_changed(event: Event, logentry: LogEntry):\n data = json.loads(logentry.data)\n\n text = _('The order has been changed:')\n if logentry.action_type == 'pretix.event.order.changed.item':\n old_item = str(event.items.get(pk=data['old_item']))\n if data['old_variation']:\n old_item += ' - ' + str(event.itemvariations.get(pk=data['old_variation']))\n new_item = str(event.items.get(pk=data['new_item']))\n if data['new_variation']:\n new_item += ' - ' + str(event.itemvariations.get(pk=data['new_variation']))\n return text + ' ' + _('{old_item} ({old_price} {currency}) changed to {new_item} ({new_price} {currency}).').format(\n old_item=old_item, new_item=new_item,\n old_price=formats.localize(Decimal(data['old_price'])),\n new_price=formats.localize(Decimal(data['new_price'])),\n currency=event.currency\n )\n elif logentry.action_type == 'pretix.event.order.changed.price':\n return text + ' ' + _('Price of a position changed from {old_price} {currency} to {new_price} {currency}.').format(\n old_price=formats.localize(Decimal(data['old_price'])),\n new_price=formats.localize(Decimal(data['new_price'])),\n currency=event.currency\n )\n elif logentry.action_type == 'pretix.event.order.changed.cancel':\n old_item = str(event.items.get(pk=data['old_item']))\n if data['old_variation']:\n old_item += ' - ' + str(ItemVariation.objects.get(pk=data['old_variation']))\n return text + ' ' + _('{old_item} ({old_price} {currency}) removed.').format(\n old_item=old_item,\n old_price=formats.localize(Decimal(data['old_price'])),\n currency=event.currency\n )\n\n\n@receiver(signal=logentry_display, dispatch_uid=\"pretixcontrol_logentry_display\")\ndef pretixcontrol_logentry_display(sender: Event, logentry: LogEntry, **kwargs):\n plains = {\n 'pretix.event.order.modified': _('The order details have been modified.'),\n 'pretix.event.order.unpaid': _('The order has been marked as unpaid.'),\n 'pretix.event.order.resend': _('The link to the order detail page has been resent to the user.'),\n 'pretix.event.order.expirychanged': _('The order\\'s expiry date has been changed.'),\n 'pretix.event.order.expired': _('The order has been marked as expired.'),\n 'pretix.event.order.paid': _('The order has been marked as paid.'),\n 'pretix.event.order.refunded': _('The order has been refunded.'),\n 'pretix.event.order.canceled': _('The order has been canceled.'),\n 'pretix.event.order.placed': _('The order has been created.'),\n 'pretix.event.order.invoice.generated': _('The invoice has been generated.'),\n 'pretix.event.order.invoice.regenerated': _('The invoice has been regenerated.'),\n 'pretix.event.order.invoice.reissued': _('The invoice has been reissued.'),\n 'pretix.event.order.comment': _('The order\\'s internal comment has been updated.'),\n 'pretix.event.order.payment.changed': _('The payment method has been changed.'),\n 'pretix.event.order.expire_warning_sent': _('An email has been sent with a warning that the order is about to expire.'),\n 'pretix.user.settings.2fa.enabled': _('Two-factor authentication has been enabled.'),\n 'pretix.user.settings.2fa.disabled': _('Two-factor authentication has been disabled.'),\n 'pretix.user.settings.2fa.regenemergency': _('Your two-factor emergency codes have been regenerated.'),\n 'pretix.control.auth.user.forgot_password.mail_sent': _('Password reset mail sent.'),\n 'pretix.control.auth.user.forgot_password.recovered': _('The password has been reset.')\n\n }\n if logentry.action_type in plains:\n return plains[logentry.action_type]\n\n if logentry.action_type.startswith('pretix.event.order.changed'):\n return _display_order_changed(sender, logentry)\n\n if logentry.action_type.startswith('pretix.event.order.contact.changed'):\n data = json.loads(logentry.data)\n return _('The email address has been changed from \"{old}\" to \"{new}\".').format(\n old=data['old_email'],\n new=data['new_email'],\n )\n\n if logentry.action_type == 'pretix.user.settings.2fa.device.added':\n data = json.loads(logentry.data)\n return _('A new two-factor authentication device \"{name}\" has been added to your account.').format(\n name=data['name']\n )\n if logentry.action_type == 'pretix.user.settings.2fa.device.deleted':\n data = json.loads(logentry.data)\n return _('The two-factor authentication device \"{name}\" has been removed from your account.').format(\n name=data['name']\n )\n if logentry.action_type == 'pretix.user.settings.changed':\n data = json.loads(logentry.data)\n text = str(_('Your account settings have been changed.'))\n if 'email' in data:\n text = text + ' ' + str(_('Your email address has been changed to {email}.').format(email=data['email']))\n if 'new_pw' in data:\n text = text + ' ' + str(_('Your password has been changed.'))\n return text\n", "path": "src/pretix/control/logdisplay.py"}]}
| 1,634 | 348 |
gh_patches_debug_58561
|
rasdani/github-patches
|
git_diff
|
codespell-project__codespell-86
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
codespell.py does nothng if [fileN] is not specified
Previously running `codespell` without file parameter starts the check. Now `codespell.py` does nothing. The behavior should stay the same as before - if file/dir argument is not specefied then current directory should be used as a default parameter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/codespell.py`
Content:
```
1 #!/usr/bin/env python
2
3 import sys
4
5 if __name__ == '__main__':
6 import codespell_lib
7 sys.exit(codespell_lib.main(*sys.argv))
8
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bin/codespell.py b/bin/codespell.py
--- a/bin/codespell.py
+++ b/bin/codespell.py
@@ -4,4 +4,4 @@
if __name__ == '__main__':
import codespell_lib
- sys.exit(codespell_lib.main(*sys.argv))
+ sys.exit(codespell_lib.main(*sys.argv[1:]))
|
{"golden_diff": "diff --git a/bin/codespell.py b/bin/codespell.py\n--- a/bin/codespell.py\n+++ b/bin/codespell.py\n@@ -4,4 +4,4 @@\n \n if __name__ == '__main__':\n import codespell_lib\n- sys.exit(codespell_lib.main(*sys.argv))\n+ sys.exit(codespell_lib.main(*sys.argv[1:]))\n", "issue": "codespell.py does nothng if [fileN] is not specified\nPreviously running `codespell` without file parameter starts the check. Now `codespell.py` does nothing. The behavior should stay the same as before - if file/dir argument is not specefied then current directory should be used as a default parameter.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport sys\n\nif __name__ == '__main__':\n import codespell_lib\n sys.exit(codespell_lib.main(*sys.argv))\n", "path": "bin/codespell.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport sys\n\nif __name__ == '__main__':\n import codespell_lib\n sys.exit(codespell_lib.main(*sys.argv[1:]))\n", "path": "bin/codespell.py"}]}
| 372 | 86 |
gh_patches_debug_6744
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-3626
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_DOCKER_10 mistakes quoted absolute paths for relative paths
**Describe the issue**
CKV_DOCKER_10 mistakes quoted absolute paths for relative paths.
**Examples**
```
cat << EOF > Dockerfile
FROM alpine:3.16
WORKDIR "/app"
EOF
checkov --check CKV_DOCKER_10 --file Dockerfile
```

**Version (please complete the following information):**
2.1.258
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/dockerfile/checks/WorkdirIsAbsolute.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4
5 from checkov.common.models.enums import CheckCategories, CheckResult
6 from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
7
8 ISABSOLUTE = re.compile("(^/[A-Za-z0-9-_+]*)|(^[A-Za-z0-9-_+]:\\\\.*)|(^\\$[{}A-Za-z0-9-_+].*)")
9
10
11 class WorkdirIsAbsolute(BaseDockerfileCheck):
12 def __init__(self) -> None:
13 """
14 For clarity and reliability, you should always use absolute paths for your WORKDIR.
15 """
16 name = "Ensure that WORKDIR values are absolute paths"
17 id = "CKV_DOCKER_10"
18 supported_instructions = ("WORKDIR",)
19 categories = (CheckCategories.CONVENTION,)
20 super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)
21
22 def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:
23 workdirs = []
24 for workdir in conf:
25 path = workdir["value"]
26 if not re.match(ISABSOLUTE, path):
27 workdirs.append(workdir)
28
29 if workdirs:
30 return CheckResult.FAILED, workdirs
31
32 return CheckResult.PASSED, None
33
34
35 check = WorkdirIsAbsolute()
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py
+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py
@@ -5,7 +5,7 @@
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck
-ISABSOLUTE = re.compile("(^/[A-Za-z0-9-_+]*)|(^[A-Za-z0-9-_+]:\\\\.*)|(^\\$[{}A-Za-z0-9-_+].*)")
+ISABSOLUTE = re.compile("^\"?((/[A-Za-z0-9-_+]*)|([A-Za-z0-9-_+]:\\\\.*)|(\\$[{}A-Za-z0-9-_+].*))")
class WorkdirIsAbsolute(BaseDockerfileCheck):
|
{"golden_diff": "diff --git a/checkov/dockerfile/checks/WorkdirIsAbsolute.py b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n--- a/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n+++ b/checkov/dockerfile/checks/WorkdirIsAbsolute.py\n@@ -5,7 +5,7 @@\n from checkov.common.models.enums import CheckCategories, CheckResult\n from checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n \n-ISABSOLUTE = re.compile(\"(^/[A-Za-z0-9-_+]*)|(^[A-Za-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-Za-z0-9-_+].*)\")\n+ISABSOLUTE = re.compile(\"^\\\"?((/[A-Za-z0-9-_+]*)|([A-Za-z0-9-_+]:\\\\\\\\.*)|(\\\\$[{}A-Za-z0-9-_+].*))\")\n \n \n class WorkdirIsAbsolute(BaseDockerfileCheck):\n", "issue": "CKV_DOCKER_10 mistakes quoted absolute paths for relative paths\n**Describe the issue**\r\nCKV_DOCKER_10 mistakes quoted absolute paths for relative paths.\r\n\r\n**Examples**\r\n```\r\ncat << EOF > Dockerfile\r\nFROM alpine:3.16\r\nWORKDIR \"/app\"\r\nEOF\r\n\r\ncheckov --check CKV_DOCKER_10 --file Dockerfile\r\n```\r\n\r\n\r\n\r\n**Version (please complete the following information):**\r\n2.1.258\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"(^/[A-Za-z0-9-_+]*)|(^[A-Za-z0-9-_+]:\\\\\\\\.*)|(^\\\\$[{}A-Za-z0-9-_+].*)\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = (\"WORKDIR\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n workdirs = []\n for workdir in conf:\n path = workdir[\"value\"]\n if not re.match(ISABSOLUTE, path):\n workdirs.append(workdir)\n\n if workdirs:\n return CheckResult.FAILED, workdirs\n\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.dockerfile.base_dockerfile_check import BaseDockerfileCheck\n\nISABSOLUTE = re.compile(\"^\\\"?((/[A-Za-z0-9-_+]*)|([A-Za-z0-9-_+]:\\\\\\\\.*)|(\\\\$[{}A-Za-z0-9-_+].*))\")\n\n\nclass WorkdirIsAbsolute(BaseDockerfileCheck):\n def __init__(self) -> None:\n \"\"\"\n For clarity and reliability, you should always use absolute paths for your WORKDIR.\n \"\"\"\n name = \"Ensure that WORKDIR values are absolute paths\"\n id = \"CKV_DOCKER_10\"\n supported_instructions = (\"WORKDIR\",)\n categories = (CheckCategories.CONVENTION,)\n super().__init__(name=name, id=id, categories=categories, supported_instructions=supported_instructions)\n\n def scan_entity_conf(self, conf: list[dict[str, int | str]]) -> tuple[CheckResult, list[dict[str, int | str]] | None]:\n workdirs = []\n for workdir in conf:\n path = workdir[\"value\"]\n if not re.match(ISABSOLUTE, path):\n workdirs.append(workdir)\n\n if workdirs:\n return CheckResult.FAILED, workdirs\n\n return CheckResult.PASSED, None\n\n\ncheck = WorkdirIsAbsolute()\n", "path": "checkov/dockerfile/checks/WorkdirIsAbsolute.py"}]}
| 810 | 210 |
gh_patches_debug_11182
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-24512
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Prefer ffmpeg HLS decoder for v.redd.it
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.11.15*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.11.15**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
### Description of your *issue*, suggested solution and other information
Site v.reddit.com should prefer ffmpeg HLS downloader instead of native. Using the native downloader results in some video corruption and artifacting, e.g. visible within the first few seconds of this source: `youtube-dl https://v.redd.it/poqkxthgcpxz`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/downloader/hls.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import re
4 import binascii
5 try:
6 from Crypto.Cipher import AES
7 can_decrypt_frag = True
8 except ImportError:
9 can_decrypt_frag = False
10
11 from .fragment import FragmentFD
12 from .external import FFmpegFD
13
14 from ..compat import (
15 compat_urllib_error,
16 compat_urlparse,
17 compat_struct_pack,
18 )
19 from ..utils import (
20 parse_m3u8_attributes,
21 update_url_query,
22 )
23
24
25 class HlsFD(FragmentFD):
26 """ A limited implementation that does not require ffmpeg """
27
28 FD_NAME = 'hlsnative'
29
30 @staticmethod
31 def can_download(manifest, info_dict):
32 UNSUPPORTED_FEATURES = (
33 r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]
34 # r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]
35
36 # Live streams heuristic does not always work (e.g. geo restricted to Germany
37 # http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)
38 # r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]
39
40 # This heuristic also is not correct since segments may not be appended as well.
41 # Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite
42 # no segments will definitely be appended to the end of the playlist.
43 # r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of
44 # # event media playlists [4]
45
46 # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4
47 # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2
48 # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2
49 # 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5
50 )
51 check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]
52 is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest
53 check_results.append(can_decrypt_frag or not is_aes128_enc)
54 check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest))
55 check_results.append(not info_dict.get('is_live'))
56 return all(check_results)
57
58 def real_download(self, filename, info_dict):
59 man_url = info_dict['url']
60 self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
61
62 urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
63 man_url = urlh.geturl()
64 s = urlh.read().decode('utf-8', 'ignore')
65
66 if not self.can_download(s, info_dict):
67 if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):
68 self.report_error('pycrypto not found. Please install it.')
69 return False
70 self.report_warning(
71 'hlsnative has detected features it does not support, '
72 'extraction will be delegated to ffmpeg')
73 fd = FFmpegFD(self.ydl, self.params)
74 for ph in self._progress_hooks:
75 fd.add_progress_hook(ph)
76 return fd.real_download(filename, info_dict)
77
78 def is_ad_fragment_start(s):
79 return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
80 or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
81
82 def is_ad_fragment_end(s):
83 return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
84 or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
85
86 media_frags = 0
87 ad_frags = 0
88 ad_frag_next = False
89 for line in s.splitlines():
90 line = line.strip()
91 if not line:
92 continue
93 if line.startswith('#'):
94 if is_ad_fragment_start(line):
95 ad_frag_next = True
96 elif is_ad_fragment_end(line):
97 ad_frag_next = False
98 continue
99 if ad_frag_next:
100 ad_frags += 1
101 continue
102 media_frags += 1
103
104 ctx = {
105 'filename': filename,
106 'total_frags': media_frags,
107 'ad_frags': ad_frags,
108 }
109
110 self._prepare_and_start_frag_download(ctx)
111
112 fragment_retries = self.params.get('fragment_retries', 0)
113 skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
114 test = self.params.get('test', False)
115
116 extra_query = None
117 extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
118 if extra_param_to_segment_url:
119 extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
120 i = 0
121 media_sequence = 0
122 decrypt_info = {'METHOD': 'NONE'}
123 byte_range = {}
124 frag_index = 0
125 ad_frag_next = False
126 for line in s.splitlines():
127 line = line.strip()
128 if line:
129 if not line.startswith('#'):
130 if ad_frag_next:
131 continue
132 frag_index += 1
133 if frag_index <= ctx['fragment_index']:
134 continue
135 frag_url = (
136 line
137 if re.match(r'^https?://', line)
138 else compat_urlparse.urljoin(man_url, line))
139 if extra_query:
140 frag_url = update_url_query(frag_url, extra_query)
141 count = 0
142 headers = info_dict.get('http_headers', {})
143 if byte_range:
144 headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'])
145 while count <= fragment_retries:
146 try:
147 success, frag_content = self._download_fragment(
148 ctx, frag_url, info_dict, headers)
149 if not success:
150 return False
151 break
152 except compat_urllib_error.HTTPError as err:
153 # Unavailable (possibly temporary) fragments may be served.
154 # First we try to retry then either skip or abort.
155 # See https://github.com/ytdl-org/youtube-dl/issues/10165,
156 # https://github.com/ytdl-org/youtube-dl/issues/10448).
157 count += 1
158 if count <= fragment_retries:
159 self.report_retry_fragment(err, frag_index, count, fragment_retries)
160 if count > fragment_retries:
161 if skip_unavailable_fragments:
162 i += 1
163 media_sequence += 1
164 self.report_skip_fragment(frag_index)
165 continue
166 self.report_error(
167 'giving up after %s fragment retries' % fragment_retries)
168 return False
169 if decrypt_info['METHOD'] == 'AES-128':
170 iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)
171 decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(
172 self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()
173 frag_content = AES.new(
174 decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)
175 self._append_fragment(ctx, frag_content)
176 # We only download the first fragment during the test
177 if test:
178 break
179 i += 1
180 media_sequence += 1
181 elif line.startswith('#EXT-X-KEY'):
182 decrypt_url = decrypt_info.get('URI')
183 decrypt_info = parse_m3u8_attributes(line[11:])
184 if decrypt_info['METHOD'] == 'AES-128':
185 if 'IV' in decrypt_info:
186 decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))
187 if not re.match(r'^https?://', decrypt_info['URI']):
188 decrypt_info['URI'] = compat_urlparse.urljoin(
189 man_url, decrypt_info['URI'])
190 if extra_query:
191 decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
192 if decrypt_url != decrypt_info['URI']:
193 decrypt_info['KEY'] = None
194 elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):
195 media_sequence = int(line[22:])
196 elif line.startswith('#EXT-X-BYTERANGE'):
197 splitted_byte_range = line[17:].split('@')
198 sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
199 byte_range = {
200 'start': sub_range_start,
201 'end': sub_range_start + int(splitted_byte_range[0]),
202 }
203 elif is_ad_fragment_start(line):
204 ad_frag_next = True
205 elif is_ad_fragment_end(line):
206 ad_frag_next = False
207
208 self._finish_frag_download(ctx)
209
210 return True
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py
--- a/youtube_dl/downloader/hls.py
+++ b/youtube_dl/downloader/hls.py
@@ -141,7 +141,7 @@
count = 0
headers = info_dict.get('http_headers', {})
if byte_range:
- headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'])
+ headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
while count <= fragment_retries:
try:
success, frag_content = self._download_fragment(
|
{"golden_diff": "diff --git a/youtube_dl/downloader/hls.py b/youtube_dl/downloader/hls.py\n--- a/youtube_dl/downloader/hls.py\n+++ b/youtube_dl/downloader/hls.py\n@@ -141,7 +141,7 @@\n count = 0\n headers = info_dict.get('http_headers', {})\n if byte_range:\n- headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'])\n+ headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)\n while count <= fragment_retries:\n try:\n success, frag_content = self._download_fragment(\n", "issue": "Prefer ffmpeg HLS decoder for v.redd.it\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.11.15*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.11.15**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nSite v.reddit.com should prefer ffmpeg HLS downloader instead of native. Using the native downloader results in some video corruption and artifacting, e.g. visible within the first few seconds of this source: `youtube-dl https://v.redd.it/poqkxthgcpxz`\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\nimport binascii\ntry:\n from Crypto.Cipher import AES\n can_decrypt_frag = True\nexcept ImportError:\n can_decrypt_frag = False\n\nfrom .fragment import FragmentFD\nfrom .external import FFmpegFD\n\nfrom ..compat import (\n compat_urllib_error,\n compat_urlparse,\n compat_struct_pack,\n)\nfrom ..utils import (\n parse_m3u8_attributes,\n update_url_query,\n)\n\n\nclass HlsFD(FragmentFD):\n \"\"\" A limited implementation that does not require ffmpeg \"\"\"\n\n FD_NAME = 'hlsnative'\n\n @staticmethod\n def can_download(manifest, info_dict):\n UNSUPPORTED_FEATURES = (\n r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]\n # r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]\n\n # Live streams heuristic does not always work (e.g. geo restricted to Germany\n # http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)\n # r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]\n\n # This heuristic also is not correct since segments may not be appended as well.\n # Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite\n # no segments will definitely be appended to the end of the playlist.\n # r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of\n # # event media playlists [4]\n\n # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4\n # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2\n # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2\n # 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5\n )\n check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]\n is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest\n check_results.append(can_decrypt_frag or not is_aes128_enc)\n check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest))\n check_results.append(not info_dict.get('is_live'))\n return all(check_results)\n\n def real_download(self, filename, info_dict):\n man_url = info_dict['url']\n self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)\n\n urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))\n man_url = urlh.geturl()\n s = urlh.read().decode('utf-8', 'ignore')\n\n if not self.can_download(s, info_dict):\n if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):\n self.report_error('pycrypto not found. Please install it.')\n return False\n self.report_warning(\n 'hlsnative has detected features it does not support, '\n 'extraction will be delegated to ffmpeg')\n fd = FFmpegFD(self.ydl, self.params)\n for ph in self._progress_hooks:\n fd.add_progress_hook(ph)\n return fd.real_download(filename, info_dict)\n\n def is_ad_fragment_start(s):\n return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s\n or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))\n\n def is_ad_fragment_end(s):\n return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s\n or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))\n\n media_frags = 0\n ad_frags = 0\n ad_frag_next = False\n for line in s.splitlines():\n line = line.strip()\n if not line:\n continue\n if line.startswith('#'):\n if is_ad_fragment_start(line):\n ad_frag_next = True\n elif is_ad_fragment_end(line):\n ad_frag_next = False\n continue\n if ad_frag_next:\n ad_frags += 1\n continue\n media_frags += 1\n\n ctx = {\n 'filename': filename,\n 'total_frags': media_frags,\n 'ad_frags': ad_frags,\n }\n\n self._prepare_and_start_frag_download(ctx)\n\n fragment_retries = self.params.get('fragment_retries', 0)\n skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)\n test = self.params.get('test', False)\n\n extra_query = None\n extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')\n if extra_param_to_segment_url:\n extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)\n i = 0\n media_sequence = 0\n decrypt_info = {'METHOD': 'NONE'}\n byte_range = {}\n frag_index = 0\n ad_frag_next = False\n for line in s.splitlines():\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if ad_frag_next:\n continue\n frag_index += 1\n if frag_index <= ctx['fragment_index']:\n continue\n frag_url = (\n line\n if re.match(r'^https?://', line)\n else compat_urlparse.urljoin(man_url, line))\n if extra_query:\n frag_url = update_url_query(frag_url, extra_query)\n count = 0\n headers = info_dict.get('http_headers', {})\n if byte_range:\n headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'])\n while count <= fragment_retries:\n try:\n success, frag_content = self._download_fragment(\n ctx, frag_url, info_dict, headers)\n if not success:\n return False\n break\n except compat_urllib_error.HTTPError as err:\n # Unavailable (possibly temporary) fragments may be served.\n # First we try to retry then either skip or abort.\n # See https://github.com/ytdl-org/youtube-dl/issues/10165,\n # https://github.com/ytdl-org/youtube-dl/issues/10448).\n count += 1\n if count <= fragment_retries:\n self.report_retry_fragment(err, frag_index, count, fragment_retries)\n if count > fragment_retries:\n if skip_unavailable_fragments:\n i += 1\n media_sequence += 1\n self.report_skip_fragment(frag_index)\n continue\n self.report_error(\n 'giving up after %s fragment retries' % fragment_retries)\n return False\n if decrypt_info['METHOD'] == 'AES-128':\n iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)\n decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(\n self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()\n frag_content = AES.new(\n decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)\n self._append_fragment(ctx, frag_content)\n # We only download the first fragment during the test\n if test:\n break\n i += 1\n media_sequence += 1\n elif line.startswith('#EXT-X-KEY'):\n decrypt_url = decrypt_info.get('URI')\n decrypt_info = parse_m3u8_attributes(line[11:])\n if decrypt_info['METHOD'] == 'AES-128':\n if 'IV' in decrypt_info:\n decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))\n if not re.match(r'^https?://', decrypt_info['URI']):\n decrypt_info['URI'] = compat_urlparse.urljoin(\n man_url, decrypt_info['URI'])\n if extra_query:\n decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)\n if decrypt_url != decrypt_info['URI']:\n decrypt_info['KEY'] = None\n elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):\n media_sequence = int(line[22:])\n elif line.startswith('#EXT-X-BYTERANGE'):\n splitted_byte_range = line[17:].split('@')\n sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']\n byte_range = {\n 'start': sub_range_start,\n 'end': sub_range_start + int(splitted_byte_range[0]),\n }\n elif is_ad_fragment_start(line):\n ad_frag_next = True\n elif is_ad_fragment_end(line):\n ad_frag_next = False\n\n self._finish_frag_download(ctx)\n\n return True\n", "path": "youtube_dl/downloader/hls.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport re\nimport binascii\ntry:\n from Crypto.Cipher import AES\n can_decrypt_frag = True\nexcept ImportError:\n can_decrypt_frag = False\n\nfrom .fragment import FragmentFD\nfrom .external import FFmpegFD\n\nfrom ..compat import (\n compat_urllib_error,\n compat_urlparse,\n compat_struct_pack,\n)\nfrom ..utils import (\n parse_m3u8_attributes,\n update_url_query,\n)\n\n\nclass HlsFD(FragmentFD):\n \"\"\" A limited implementation that does not require ffmpeg \"\"\"\n\n FD_NAME = 'hlsnative'\n\n @staticmethod\n def can_download(manifest, info_dict):\n UNSUPPORTED_FEATURES = (\n r'#EXT-X-KEY:METHOD=(?!NONE|AES-128)', # encrypted streams [1]\n # r'#EXT-X-BYTERANGE', # playlists composed of byte ranges of media files [2]\n\n # Live streams heuristic does not always work (e.g. geo restricted to Germany\n # http://hls-geo.daserste.de/i/videoportal/Film/c_620000/622873/format,716451,716457,716450,716458,716459,.mp4.csmil/index_4_av.m3u8?null=0)\n # r'#EXT-X-MEDIA-SEQUENCE:(?!0$)', # live streams [3]\n\n # This heuristic also is not correct since segments may not be appended as well.\n # Twitch vods of finished streams have EXT-X-PLAYLIST-TYPE:EVENT despite\n # no segments will definitely be appended to the end of the playlist.\n # r'#EXT-X-PLAYLIST-TYPE:EVENT', # media segments may be appended to the end of\n # # event media playlists [4]\n\n # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.4\n # 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.2.2\n # 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.2\n # 4. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.5\n )\n check_results = [not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES]\n is_aes128_enc = '#EXT-X-KEY:METHOD=AES-128' in manifest\n check_results.append(can_decrypt_frag or not is_aes128_enc)\n check_results.append(not (is_aes128_enc and r'#EXT-X-BYTERANGE' in manifest))\n check_results.append(not info_dict.get('is_live'))\n return all(check_results)\n\n def real_download(self, filename, info_dict):\n man_url = info_dict['url']\n self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)\n\n urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))\n man_url = urlh.geturl()\n s = urlh.read().decode('utf-8', 'ignore')\n\n if not self.can_download(s, info_dict):\n if info_dict.get('extra_param_to_segment_url') or info_dict.get('_decryption_key_url'):\n self.report_error('pycrypto not found. Please install it.')\n return False\n self.report_warning(\n 'hlsnative has detected features it does not support, '\n 'extraction will be delegated to ffmpeg')\n fd = FFmpegFD(self.ydl, self.params)\n for ph in self._progress_hooks:\n fd.add_progress_hook(ph)\n return fd.real_download(filename, info_dict)\n\n def is_ad_fragment_start(s):\n return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s\n or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))\n\n def is_ad_fragment_end(s):\n return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s\n or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))\n\n media_frags = 0\n ad_frags = 0\n ad_frag_next = False\n for line in s.splitlines():\n line = line.strip()\n if not line:\n continue\n if line.startswith('#'):\n if is_ad_fragment_start(line):\n ad_frag_next = True\n elif is_ad_fragment_end(line):\n ad_frag_next = False\n continue\n if ad_frag_next:\n ad_frags += 1\n continue\n media_frags += 1\n\n ctx = {\n 'filename': filename,\n 'total_frags': media_frags,\n 'ad_frags': ad_frags,\n }\n\n self._prepare_and_start_frag_download(ctx)\n\n fragment_retries = self.params.get('fragment_retries', 0)\n skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)\n test = self.params.get('test', False)\n\n extra_query = None\n extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')\n if extra_param_to_segment_url:\n extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)\n i = 0\n media_sequence = 0\n decrypt_info = {'METHOD': 'NONE'}\n byte_range = {}\n frag_index = 0\n ad_frag_next = False\n for line in s.splitlines():\n line = line.strip()\n if line:\n if not line.startswith('#'):\n if ad_frag_next:\n continue\n frag_index += 1\n if frag_index <= ctx['fragment_index']:\n continue\n frag_url = (\n line\n if re.match(r'^https?://', line)\n else compat_urlparse.urljoin(man_url, line))\n if extra_query:\n frag_url = update_url_query(frag_url, extra_query)\n count = 0\n headers = info_dict.get('http_headers', {})\n if byte_range:\n headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)\n while count <= fragment_retries:\n try:\n success, frag_content = self._download_fragment(\n ctx, frag_url, info_dict, headers)\n if not success:\n return False\n break\n except compat_urllib_error.HTTPError as err:\n # Unavailable (possibly temporary) fragments may be served.\n # First we try to retry then either skip or abort.\n # See https://github.com/ytdl-org/youtube-dl/issues/10165,\n # https://github.com/ytdl-org/youtube-dl/issues/10448).\n count += 1\n if count <= fragment_retries:\n self.report_retry_fragment(err, frag_index, count, fragment_retries)\n if count > fragment_retries:\n if skip_unavailable_fragments:\n i += 1\n media_sequence += 1\n self.report_skip_fragment(frag_index)\n continue\n self.report_error(\n 'giving up after %s fragment retries' % fragment_retries)\n return False\n if decrypt_info['METHOD'] == 'AES-128':\n iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', media_sequence)\n decrypt_info['KEY'] = decrypt_info.get('KEY') or self.ydl.urlopen(\n self._prepare_url(info_dict, info_dict.get('_decryption_key_url') or decrypt_info['URI'])).read()\n frag_content = AES.new(\n decrypt_info['KEY'], AES.MODE_CBC, iv).decrypt(frag_content)\n self._append_fragment(ctx, frag_content)\n # We only download the first fragment during the test\n if test:\n break\n i += 1\n media_sequence += 1\n elif line.startswith('#EXT-X-KEY'):\n decrypt_url = decrypt_info.get('URI')\n decrypt_info = parse_m3u8_attributes(line[11:])\n if decrypt_info['METHOD'] == 'AES-128':\n if 'IV' in decrypt_info:\n decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))\n if not re.match(r'^https?://', decrypt_info['URI']):\n decrypt_info['URI'] = compat_urlparse.urljoin(\n man_url, decrypt_info['URI'])\n if extra_query:\n decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)\n if decrypt_url != decrypt_info['URI']:\n decrypt_info['KEY'] = None\n elif line.startswith('#EXT-X-MEDIA-SEQUENCE'):\n media_sequence = int(line[22:])\n elif line.startswith('#EXT-X-BYTERANGE'):\n splitted_byte_range = line[17:].split('@')\n sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']\n byte_range = {\n 'start': sub_range_start,\n 'end': sub_range_start + int(splitted_byte_range[0]),\n }\n elif is_ad_fragment_start(line):\n ad_frag_next = True\n elif is_ad_fragment_end(line):\n ad_frag_next = False\n\n self._finish_frag_download(ctx)\n\n return True\n", "path": "youtube_dl/downloader/hls.py"}]}
| 3,288 | 158 |
gh_patches_debug_2508
|
rasdani/github-patches
|
git_diff
|
coala__coala-6088
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Small typo in coalib/output/printers/LogPrinter.py
Should read responsibility instead of reponsibility.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `coalib/output/printers/LogPrinter.py`
Content:
```
1 import traceback
2 import logging
3
4 from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
5 from coalib.processes.communication.LogMessage import LogMessage
6
7
8 class LogPrinterMixin:
9 """
10 Provides access to the logging interfaces (e.g. err, warn, info) by routing
11 them to the log_message method, which should be implemented by descendants
12 of this class.
13 """
14
15 def debug(self, *messages, delimiter=' ', timestamp=None, **kwargs):
16 self.log_message(LogMessage(LOG_LEVEL.DEBUG,
17 *messages,
18 delimiter=delimiter,
19 timestamp=timestamp),
20 **kwargs)
21
22 def info(self, *messages, delimiter=' ', timestamp=None, **kwargs):
23 self.log_message(LogMessage(LOG_LEVEL.INFO,
24 *messages,
25 delimiter=delimiter,
26 timestamp=timestamp),
27 **kwargs)
28
29 def warn(self, *messages, delimiter=' ', timestamp=None, **kwargs):
30 self.log_message(LogMessage(LOG_LEVEL.WARNING,
31 *messages,
32 delimiter=delimiter,
33 timestamp=timestamp),
34 **kwargs)
35
36 def err(self, *messages, delimiter=' ', timestamp=None, **kwargs):
37 self.log_message(LogMessage(LOG_LEVEL.ERROR,
38 *messages,
39 delimiter=delimiter,
40 timestamp=timestamp),
41 **kwargs)
42
43 def log(self, log_level, message, timestamp=None, **kwargs):
44 self.log_message(LogMessage(log_level,
45 message,
46 timestamp=timestamp),
47 **kwargs)
48
49 def log_exception(self,
50 message,
51 exception,
52 log_level=LOG_LEVEL.ERROR,
53 timestamp=None,
54 **kwargs):
55 """
56 If the log_level of the printer is greater than DEBUG, it prints
57 only the message. If it is DEBUG or lower, it shows the message
58 along with the traceback of the exception.
59
60 :param message: The message to print.
61 :param exception: The exception to print.
62 :param log_level: The log_level of this message (not used when
63 logging the traceback. Tracebacks always have
64 a level of DEBUG).
65 :param timestamp: The time at which this log occurred. Defaults to
66 the current time.
67 :param kwargs: Keyword arguments to be passed when logging the
68 message (not used when logging the traceback).
69 """
70 if not isinstance(exception, BaseException):
71 raise TypeError('log_exception can only log derivatives of '
72 'BaseException.')
73
74 traceback_str = '\n'.join(
75 traceback.format_exception(type(exception),
76 exception,
77 exception.__traceback__))
78
79 self.log(log_level, message, timestamp=timestamp, **kwargs)
80 self.log_message(
81 LogMessage(LOG_LEVEL.INFO,
82 'Exception was:' + '\n' + traceback_str,
83 timestamp=timestamp),
84 **kwargs)
85
86 def log_message(self, log_message, **kwargs):
87 """
88 It is your reponsibility to implement this method, if you're using this
89 mixin.
90 """
91 raise NotImplementedError
92
93
94 class LogPrinter(LogPrinterMixin):
95 """
96 This class is deprecated and will be soon removed. To get logger use
97 logging.getLogger(__name__). Make sure that you're getting it when the
98 logging configuration is loaded.
99
100 The LogPrinter class allows to print log messages to an underlying Printer.
101
102 This class is an adapter, means you can create a LogPrinter from every
103 existing Printer instance.
104 """
105
106 def __init__(self,
107 printer=None,
108 log_level=LOG_LEVEL.DEBUG,
109 timestamp_format='%X'):
110 """
111 Creates a new log printer from an existing Printer.
112
113 :param printer: The underlying Printer where log messages
114 shall be written to. If you inherit from
115 LogPrinter, set it to self.
116 :param log_level: The minimum log level, everything below will
117 not be logged.
118 :param timestamp_format: The format string for the
119 datetime.today().strftime(format) method.
120 """
121 self.logger = logging.getLogger()
122
123 self._printer = printer
124 self.log_level = log_level
125 self.timestamp_format = timestamp_format
126
127 @property
128 def log_level(self):
129 """
130 Returns current log_level used in logger.
131 """
132 return self.logger.getEffectiveLevel()
133
134 @log_level.setter
135 def log_level(self, log_level):
136 """
137 Sets log_level for logger.
138 """
139 self.logger.setLevel(log_level)
140
141 @property
142 def printer(self):
143 """
144 Returns the underlying printer where logs are printed to.
145 """
146 return self._printer
147
148 def log_message(self, log_message, **kwargs):
149 if not isinstance(log_message, LogMessage):
150 raise TypeError('log_message should be of type LogMessage.')
151 self.logger.log(log_message.log_level, log_message.message)
152
153 def __getstate__(self):
154 # on Windows there are problems with serializing loggers, so omit it
155 oldict = self.__dict__.copy()
156 del oldict['logger']
157 return oldict
158
159 def __setstate__(self, newdict):
160 self.__dict__.update(newdict)
161 # restore logger by name
162 self.logger = logging.getLogger()
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/coalib/output/printers/LogPrinter.py b/coalib/output/printers/LogPrinter.py
--- a/coalib/output/printers/LogPrinter.py
+++ b/coalib/output/printers/LogPrinter.py
@@ -85,7 +85,7 @@
def log_message(self, log_message, **kwargs):
"""
- It is your reponsibility to implement this method, if you're using this
+ It is your responsibility to implement this method, if you're using this
mixin.
"""
raise NotImplementedError
|
{"golden_diff": "diff --git a/coalib/output/printers/LogPrinter.py b/coalib/output/printers/LogPrinter.py\n--- a/coalib/output/printers/LogPrinter.py\n+++ b/coalib/output/printers/LogPrinter.py\n@@ -85,7 +85,7 @@\n \n def log_message(self, log_message, **kwargs):\n \"\"\"\n- It is your reponsibility to implement this method, if you're using this\n+ It is your responsibility to implement this method, if you're using this\n mixin.\n \"\"\"\n raise NotImplementedError\n", "issue": "Small typo in coalib/output/printers/LogPrinter.py\nShould read responsibility instead of reponsibility.\r\n\n", "before_files": [{"content": "import traceback\nimport logging\n\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL\nfrom coalib.processes.communication.LogMessage import LogMessage\n\n\nclass LogPrinterMixin:\n \"\"\"\n Provides access to the logging interfaces (e.g. err, warn, info) by routing\n them to the log_message method, which should be implemented by descendants\n of this class.\n \"\"\"\n\n def debug(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.DEBUG,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def info(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.INFO,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def warn(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.WARNING,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def err(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.ERROR,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def log(self, log_level, message, timestamp=None, **kwargs):\n self.log_message(LogMessage(log_level,\n message,\n timestamp=timestamp),\n **kwargs)\n\n def log_exception(self,\n message,\n exception,\n log_level=LOG_LEVEL.ERROR,\n timestamp=None,\n **kwargs):\n \"\"\"\n If the log_level of the printer is greater than DEBUG, it prints\n only the message. If it is DEBUG or lower, it shows the message\n along with the traceback of the exception.\n\n :param message: The message to print.\n :param exception: The exception to print.\n :param log_level: The log_level of this message (not used when\n logging the traceback. Tracebacks always have\n a level of DEBUG).\n :param timestamp: The time at which this log occurred. Defaults to\n the current time.\n :param kwargs: Keyword arguments to be passed when logging the\n message (not used when logging the traceback).\n \"\"\"\n if not isinstance(exception, BaseException):\n raise TypeError('log_exception can only log derivatives of '\n 'BaseException.')\n\n traceback_str = '\\n'.join(\n traceback.format_exception(type(exception),\n exception,\n exception.__traceback__))\n\n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n LogMessage(LOG_LEVEL.INFO,\n 'Exception was:' + '\\n' + traceback_str,\n timestamp=timestamp),\n **kwargs)\n\n def log_message(self, log_message, **kwargs):\n \"\"\"\n It is your reponsibility to implement this method, if you're using this\n mixin.\n \"\"\"\n raise NotImplementedError\n\n\nclass LogPrinter(LogPrinterMixin):\n \"\"\"\n This class is deprecated and will be soon removed. To get logger use\n logging.getLogger(__name__). Make sure that you're getting it when the\n logging configuration is loaded.\n\n The LogPrinter class allows to print log messages to an underlying Printer.\n\n This class is an adapter, means you can create a LogPrinter from every\n existing Printer instance.\n \"\"\"\n\n def __init__(self,\n printer=None,\n log_level=LOG_LEVEL.DEBUG,\n timestamp_format='%X'):\n \"\"\"\n Creates a new log printer from an existing Printer.\n\n :param printer: The underlying Printer where log messages\n shall be written to. If you inherit from\n LogPrinter, set it to self.\n :param log_level: The minimum log level, everything below will\n not be logged.\n :param timestamp_format: The format string for the\n datetime.today().strftime(format) method.\n \"\"\"\n self.logger = logging.getLogger()\n\n self._printer = printer\n self.log_level = log_level\n self.timestamp_format = timestamp_format\n\n @property\n def log_level(self):\n \"\"\"\n Returns current log_level used in logger.\n \"\"\"\n return self.logger.getEffectiveLevel()\n\n @log_level.setter\n def log_level(self, log_level):\n \"\"\"\n Sets log_level for logger.\n \"\"\"\n self.logger.setLevel(log_level)\n\n @property\n def printer(self):\n \"\"\"\n Returns the underlying printer where logs are printed to.\n \"\"\"\n return self._printer\n\n def log_message(self, log_message, **kwargs):\n if not isinstance(log_message, LogMessage):\n raise TypeError('log_message should be of type LogMessage.')\n self.logger.log(log_message.log_level, log_message.message)\n\n def __getstate__(self):\n # on Windows there are problems with serializing loggers, so omit it\n oldict = self.__dict__.copy()\n del oldict['logger']\n return oldict\n\n def __setstate__(self, newdict):\n self.__dict__.update(newdict)\n # restore logger by name\n self.logger = logging.getLogger()\n", "path": "coalib/output/printers/LogPrinter.py"}], "after_files": [{"content": "import traceback\nimport logging\n\nfrom coalib.output.printers.LOG_LEVEL import LOG_LEVEL\nfrom coalib.processes.communication.LogMessage import LogMessage\n\n\nclass LogPrinterMixin:\n \"\"\"\n Provides access to the logging interfaces (e.g. err, warn, info) by routing\n them to the log_message method, which should be implemented by descendants\n of this class.\n \"\"\"\n\n def debug(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.DEBUG,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def info(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.INFO,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def warn(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.WARNING,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def err(self, *messages, delimiter=' ', timestamp=None, **kwargs):\n self.log_message(LogMessage(LOG_LEVEL.ERROR,\n *messages,\n delimiter=delimiter,\n timestamp=timestamp),\n **kwargs)\n\n def log(self, log_level, message, timestamp=None, **kwargs):\n self.log_message(LogMessage(log_level,\n message,\n timestamp=timestamp),\n **kwargs)\n\n def log_exception(self,\n message,\n exception,\n log_level=LOG_LEVEL.ERROR,\n timestamp=None,\n **kwargs):\n \"\"\"\n If the log_level of the printer is greater than DEBUG, it prints\n only the message. If it is DEBUG or lower, it shows the message\n along with the traceback of the exception.\n\n :param message: The message to print.\n :param exception: The exception to print.\n :param log_level: The log_level of this message (not used when\n logging the traceback. Tracebacks always have\n a level of DEBUG).\n :param timestamp: The time at which this log occurred. Defaults to\n the current time.\n :param kwargs: Keyword arguments to be passed when logging the\n message (not used when logging the traceback).\n \"\"\"\n if not isinstance(exception, BaseException):\n raise TypeError('log_exception can only log derivatives of '\n 'BaseException.')\n\n traceback_str = '\\n'.join(\n traceback.format_exception(type(exception),\n exception,\n exception.__traceback__))\n\n self.log(log_level, message, timestamp=timestamp, **kwargs)\n self.log_message(\n LogMessage(LOG_LEVEL.INFO,\n 'Exception was:' + '\\n' + traceback_str,\n timestamp=timestamp),\n **kwargs)\n\n def log_message(self, log_message, **kwargs):\n \"\"\"\n It is your responsibility to implement this method, if you're using this\n mixin.\n \"\"\"\n raise NotImplementedError\n\n\nclass LogPrinter(LogPrinterMixin):\n \"\"\"\n This class is deprecated and will be soon removed. To get logger use\n logging.getLogger(__name__). Make sure that you're getting it when the\n logging configuration is loaded.\n\n The LogPrinter class allows to print log messages to an underlying Printer.\n\n This class is an adapter, means you can create a LogPrinter from every\n existing Printer instance.\n \"\"\"\n\n def __init__(self,\n printer=None,\n log_level=LOG_LEVEL.DEBUG,\n timestamp_format='%X'):\n \"\"\"\n Creates a new log printer from an existing Printer.\n\n :param printer: The underlying Printer where log messages\n shall be written to. If you inherit from\n LogPrinter, set it to self.\n :param log_level: The minimum log level, everything below will\n not be logged.\n :param timestamp_format: The format string for the\n datetime.today().strftime(format) method.\n \"\"\"\n self.logger = logging.getLogger()\n\n self._printer = printer\n self.log_level = log_level\n self.timestamp_format = timestamp_format\n\n @property\n def log_level(self):\n \"\"\"\n Returns current log_level used in logger.\n \"\"\"\n return self.logger.getEffectiveLevel()\n\n @log_level.setter\n def log_level(self, log_level):\n \"\"\"\n Sets log_level for logger.\n \"\"\"\n self.logger.setLevel(log_level)\n\n @property\n def printer(self):\n \"\"\"\n Returns the underlying printer where logs are printed to.\n \"\"\"\n return self._printer\n\n def log_message(self, log_message, **kwargs):\n if not isinstance(log_message, LogMessage):\n raise TypeError('log_message should be of type LogMessage.')\n self.logger.log(log_message.log_level, log_message.message)\n\n def __getstate__(self):\n # on Windows there are problems with serializing loggers, so omit it\n oldict = self.__dict__.copy()\n del oldict['logger']\n return oldict\n\n def __setstate__(self, newdict):\n self.__dict__.update(newdict)\n # restore logger by name\n self.logger = logging.getLogger()\n", "path": "coalib/output/printers/LogPrinter.py"}]}
| 1,759 | 124 |
gh_patches_debug_32548
|
rasdani/github-patches
|
git_diff
|
pypa__pip-6313
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pip processes corrupt packages containing using ../ chars placing files outside target directory
- Pip version: tested on pip versions 6.0.8 and 8.0.2,
- Python version: 2.7
- Operating System: Linux / Windows
### Description:
pip processes invalid packages .tar/.zip archives containing "../" directory traversal chars and places package files outside of target directories even after pip complains and throws errors and aborts the install.
Pip processes corrupt packages containing using ../ chars placing files outside target directory
- Pip version: tested on pip versions 6.0.8 and 8.0.2,
- Python version: 2.7
- Operating System: Linux / Windows
### Description:
pip processes invalid packages .tar/.zip archives containing "../" directory traversal chars and places package files outside of target directories even after pip complains and throws errors and aborts the install.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/utils/unpacking.py`
Content:
```
1 """Utilities related archives.
2 """
3
4 # The following comment should be removed at some point in the future.
5 # mypy: strict-optional=False
6
7 from __future__ import absolute_import
8
9 import logging
10 import os
11 import shutil
12 import stat
13 import tarfile
14 import zipfile
15
16 from pip._internal.exceptions import InstallationError
17 from pip._internal.utils.filetypes import (
18 BZ2_EXTENSIONS,
19 TAR_EXTENSIONS,
20 XZ_EXTENSIONS,
21 ZIP_EXTENSIONS,
22 )
23 from pip._internal.utils.misc import ensure_dir
24 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
25
26 if MYPY_CHECK_RUNNING:
27 from typing import Iterable, List, Optional, Text, Union
28
29
30 logger = logging.getLogger(__name__)
31
32
33 SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
34
35 try:
36 import bz2 # noqa
37 SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
38 except ImportError:
39 logger.debug('bz2 module is not available')
40
41 try:
42 # Only for Python 3.3+
43 import lzma # noqa
44 SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
45 except ImportError:
46 logger.debug('lzma module is not available')
47
48
49 def current_umask():
50 """Get the current umask which involves having to set it temporarily."""
51 mask = os.umask(0)
52 os.umask(mask)
53 return mask
54
55
56 def split_leading_dir(path):
57 # type: (Union[str, Text]) -> List[Union[str, Text]]
58 path = path.lstrip('/').lstrip('\\')
59 if (
60 '/' in path and (
61 ('\\' in path and path.find('/') < path.find('\\')) or
62 '\\' not in path
63 )
64 ):
65 return path.split('/', 1)
66 elif '\\' in path:
67 return path.split('\\', 1)
68 else:
69 return [path, '']
70
71
72 def has_leading_dir(paths):
73 # type: (Iterable[Union[str, Text]]) -> bool
74 """Returns true if all the paths have the same leading path name
75 (i.e., everything is in one subdirectory in an archive)"""
76 common_prefix = None
77 for path in paths:
78 prefix, rest = split_leading_dir(path)
79 if not prefix:
80 return False
81 elif common_prefix is None:
82 common_prefix = prefix
83 elif prefix != common_prefix:
84 return False
85 return True
86
87
88 def unzip_file(filename, location, flatten=True):
89 # type: (str, str, bool) -> None
90 """
91 Unzip the file (with path `filename`) to the destination `location`. All
92 files are written based on system defaults and umask (i.e. permissions are
93 not preserved), except that regular file members with any execute
94 permissions (user, group, or world) have "chmod +x" applied after being
95 written. Note that for windows, any execute changes using os.chmod are
96 no-ops per the python docs.
97 """
98 ensure_dir(location)
99 zipfp = open(filename, 'rb')
100 try:
101 zip = zipfile.ZipFile(zipfp, allowZip64=True)
102 leading = has_leading_dir(zip.namelist()) and flatten
103 for info in zip.infolist():
104 name = info.filename
105 fn = name
106 if leading:
107 fn = split_leading_dir(name)[1]
108 fn = os.path.join(location, fn)
109 dir = os.path.dirname(fn)
110 if fn.endswith('/') or fn.endswith('\\'):
111 # A directory
112 ensure_dir(fn)
113 else:
114 ensure_dir(dir)
115 # Don't use read() to avoid allocating an arbitrarily large
116 # chunk of memory for the file's content
117 fp = zip.open(name)
118 try:
119 with open(fn, 'wb') as destfp:
120 shutil.copyfileobj(fp, destfp)
121 finally:
122 fp.close()
123 mode = info.external_attr >> 16
124 # if mode and regular file and any execute permissions for
125 # user/group/world?
126 if mode and stat.S_ISREG(mode) and mode & 0o111:
127 # make dest file have execute for user/group/world
128 # (chmod +x) no-op on windows per python docs
129 os.chmod(fn, (0o777 - current_umask() | 0o111))
130 finally:
131 zipfp.close()
132
133
134 def untar_file(filename, location):
135 # type: (str, str) -> None
136 """
137 Untar the file (with path `filename`) to the destination `location`.
138 All files are written based on system defaults and umask (i.e. permissions
139 are not preserved), except that regular file members with any execute
140 permissions (user, group, or world) have "chmod +x" applied after being
141 written. Note that for windows, any execute changes using os.chmod are
142 no-ops per the python docs.
143 """
144 ensure_dir(location)
145 if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
146 mode = 'r:gz'
147 elif filename.lower().endswith(BZ2_EXTENSIONS):
148 mode = 'r:bz2'
149 elif filename.lower().endswith(XZ_EXTENSIONS):
150 mode = 'r:xz'
151 elif filename.lower().endswith('.tar'):
152 mode = 'r'
153 else:
154 logger.warning(
155 'Cannot determine compression type for file %s', filename,
156 )
157 mode = 'r:*'
158 tar = tarfile.open(filename, mode)
159 try:
160 leading = has_leading_dir([
161 member.name for member in tar.getmembers()
162 ])
163 for member in tar.getmembers():
164 fn = member.name
165 if leading:
166 # https://github.com/python/mypy/issues/1174
167 fn = split_leading_dir(fn)[1] # type: ignore
168 path = os.path.join(location, fn)
169 if member.isdir():
170 ensure_dir(path)
171 elif member.issym():
172 try:
173 # https://github.com/python/typeshed/issues/2673
174 tar._extract_member(member, path) # type: ignore
175 except Exception as exc:
176 # Some corrupt tar files seem to produce this
177 # (specifically bad symlinks)
178 logger.warning(
179 'In the tar file %s the member %s is invalid: %s',
180 filename, member.name, exc,
181 )
182 continue
183 else:
184 try:
185 fp = tar.extractfile(member)
186 except (KeyError, AttributeError) as exc:
187 # Some corrupt tar files seem to produce this
188 # (specifically bad symlinks)
189 logger.warning(
190 'In the tar file %s the member %s is invalid: %s',
191 filename, member.name, exc,
192 )
193 continue
194 ensure_dir(os.path.dirname(path))
195 with open(path, 'wb') as destfp:
196 shutil.copyfileobj(fp, destfp)
197 fp.close()
198 # Update the timestamp (useful for cython compiled files)
199 # https://github.com/python/typeshed/issues/2673
200 tar.utime(member, path) # type: ignore
201 # member have any execute permissions for user/group/world?
202 if member.mode & 0o111:
203 # make dest file have execute for user/group/world
204 # no-op on windows per python docs
205 os.chmod(path, (0o777 - current_umask() | 0o111))
206 finally:
207 tar.close()
208
209
210 def unpack_file(
211 filename, # type: str
212 location, # type: str
213 content_type=None, # type: Optional[str]
214 ):
215 # type: (...) -> None
216 filename = os.path.realpath(filename)
217 if (
218 content_type == 'application/zip' or
219 filename.lower().endswith(ZIP_EXTENSIONS) or
220 zipfile.is_zipfile(filename)
221 ):
222 unzip_file(
223 filename,
224 location,
225 flatten=not filename.endswith('.whl')
226 )
227 elif (
228 content_type == 'application/x-gzip' or
229 tarfile.is_tarfile(filename) or
230 filename.lower().endswith(
231 TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS
232 )
233 ):
234 untar_file(filename, location)
235 else:
236 # FIXME: handle?
237 # FIXME: magic signatures?
238 logger.critical(
239 'Cannot unpack file %s (downloaded from %s, content-type: %s); '
240 'cannot detect archive format',
241 filename, location, content_type,
242 )
243 raise InstallationError(
244 'Cannot determine archive format of {}'.format(location)
245 )
246
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/utils/unpacking.py b/src/pip/_internal/utils/unpacking.py
--- a/src/pip/_internal/utils/unpacking.py
+++ b/src/pip/_internal/utils/unpacking.py
@@ -85,6 +85,18 @@
return True
+def is_within_directory(directory, target):
+ # type: ((Union[str, Text]), (Union[str, Text])) -> bool
+ """
+ Return true if the absolute path of target is within the directory
+ """
+ abs_directory = os.path.abspath(directory)
+ abs_target = os.path.abspath(target)
+
+ prefix = os.path.commonprefix([abs_directory, abs_target])
+ return prefix == abs_directory
+
+
def unzip_file(filename, location, flatten=True):
# type: (str, str, bool) -> None
"""
@@ -107,6 +119,12 @@
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
+ if not is_within_directory(location, fn):
+ message = (
+ 'The zip file ({}) has a file ({}) trying to install '
+ 'outside target directory ({})'
+ )
+ raise InstallationError(message.format(filename, fn, location))
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
@@ -166,6 +184,14 @@
# https://github.com/python/mypy/issues/1174
fn = split_leading_dir(fn)[1] # type: ignore
path = os.path.join(location, fn)
+ if not is_within_directory(location, path):
+ message = (
+ 'The tar file ({}) has a file ({}) trying to install '
+ 'outside target directory ({})'
+ )
+ raise InstallationError(
+ message.format(filename, path, location)
+ )
if member.isdir():
ensure_dir(path)
elif member.issym():
|
{"golden_diff": "diff --git a/src/pip/_internal/utils/unpacking.py b/src/pip/_internal/utils/unpacking.py\n--- a/src/pip/_internal/utils/unpacking.py\n+++ b/src/pip/_internal/utils/unpacking.py\n@@ -85,6 +85,18 @@\n return True\n \n \n+def is_within_directory(directory, target):\n+ # type: ((Union[str, Text]), (Union[str, Text])) -> bool\n+ \"\"\"\n+ Return true if the absolute path of target is within the directory\n+ \"\"\"\n+ abs_directory = os.path.abspath(directory)\n+ abs_target = os.path.abspath(target)\n+\n+ prefix = os.path.commonprefix([abs_directory, abs_target])\n+ return prefix == abs_directory\n+\n+\n def unzip_file(filename, location, flatten=True):\n # type: (str, str, bool) -> None\n \"\"\"\n@@ -107,6 +119,12 @@\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n+ if not is_within_directory(location, fn):\n+ message = (\n+ 'The zip file ({}) has a file ({}) trying to install '\n+ 'outside target directory ({})'\n+ )\n+ raise InstallationError(message.format(filename, fn, location))\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n ensure_dir(fn)\n@@ -166,6 +184,14 @@\n # https://github.com/python/mypy/issues/1174\n fn = split_leading_dir(fn)[1] # type: ignore\n path = os.path.join(location, fn)\n+ if not is_within_directory(location, path):\n+ message = (\n+ 'The tar file ({}) has a file ({}) trying to install '\n+ 'outside target directory ({})'\n+ )\n+ raise InstallationError(\n+ message.format(filename, path, location)\n+ )\n if member.isdir():\n ensure_dir(path)\n elif member.issym():\n", "issue": "Pip processes corrupt packages containing using ../ chars placing files outside target directory\n- Pip version: tested on pip versions 6.0.8 and 8.0.2,\n- Python version: 2.7\n- Operating System: Linux / Windows\n### Description:\n\npip processes invalid packages .tar/.zip archives containing \"../\" directory traversal chars and places package files outside of target directories even after pip complains and throws errors and aborts the install.\n\nPip processes corrupt packages containing using ../ chars placing files outside target directory\n- Pip version: tested on pip versions 6.0.8 and 8.0.2,\n- Python version: 2.7\n- Operating System: Linux / Windows\n### Description:\n\npip processes invalid packages .tar/.zip archives containing \"../\" directory traversal chars and places package files outside of target directories even after pip complains and throws errors and aborts the install.\n\n", "before_files": [{"content": "\"\"\"Utilities related archives.\n\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: strict-optional=False\n\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nimport shutil\nimport stat\nimport tarfile\nimport zipfile\n\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.utils.filetypes import (\n BZ2_EXTENSIONS,\n TAR_EXTENSIONS,\n XZ_EXTENSIONS,\n ZIP_EXTENSIONS,\n)\nfrom pip._internal.utils.misc import ensure_dir\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from typing import Iterable, List, Optional, Text, Union\n\n\nlogger = logging.getLogger(__name__)\n\n\nSUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS\n\ntry:\n import bz2 # noqa\n SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS\nexcept ImportError:\n logger.debug('bz2 module is not available')\n\ntry:\n # Only for Python 3.3+\n import lzma # noqa\n SUPPORTED_EXTENSIONS += XZ_EXTENSIONS\nexcept ImportError:\n logger.debug('lzma module is not available')\n\n\ndef current_umask():\n \"\"\"Get the current umask which involves having to set it temporarily.\"\"\"\n mask = os.umask(0)\n os.umask(mask)\n return mask\n\n\ndef split_leading_dir(path):\n # type: (Union[str, Text]) -> List[Union[str, Text]]\n path = path.lstrip('/').lstrip('\\\\')\n if (\n '/' in path and (\n ('\\\\' in path and path.find('/') < path.find('\\\\')) or\n '\\\\' not in path\n )\n ):\n return path.split('/', 1)\n elif '\\\\' in path:\n return path.split('\\\\', 1)\n else:\n return [path, '']\n\n\ndef has_leading_dir(paths):\n # type: (Iterable[Union[str, Text]]) -> bool\n \"\"\"Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)\"\"\"\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True\n\n\ndef unzip_file(filename, location, flatten=True):\n # type: (str, str, bool) -> None\n \"\"\"\n Unzip the file (with path `filename`) to the destination `location`. All\n files are written based on system defaults and umask (i.e. permissions are\n not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp, allowZip64=True)\n leading = has_leading_dir(zip.namelist()) and flatten\n for info in zip.infolist():\n name = info.filename\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n ensure_dir(fn)\n else:\n ensure_dir(dir)\n # Don't use read() to avoid allocating an arbitrarily large\n # chunk of memory for the file's content\n fp = zip.open(name)\n try:\n with open(fn, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n finally:\n fp.close()\n mode = info.external_attr >> 16\n # if mode and regular file and any execute permissions for\n # user/group/world?\n if mode and stat.S_ISREG(mode) and mode & 0o111:\n # make dest file have execute for user/group/world\n # (chmod +x) no-op on windows per python docs\n os.chmod(fn, (0o777 - current_umask() | 0o111))\n finally:\n zipfp.close()\n\n\ndef untar_file(filename, location):\n # type: (str, str) -> None\n \"\"\"\n Untar the file (with path `filename`) to the destination `location`.\n All files are written based on system defaults and umask (i.e. permissions\n are not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif filename.lower().endswith(BZ2_EXTENSIONS):\n mode = 'r:bz2'\n elif filename.lower().endswith(XZ_EXTENSIONS):\n mode = 'r:xz'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n logger.warning(\n 'Cannot determine compression type for file %s', filename,\n )\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([\n member.name for member in tar.getmembers()\n ])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n # https://github.com/python/mypy/issues/1174\n fn = split_leading_dir(fn)[1] # type: ignore\n path = os.path.join(location, fn)\n if member.isdir():\n ensure_dir(path)\n elif member.issym():\n try:\n # https://github.com/python/typeshed/issues/2673\n tar._extract_member(member, path) # type: ignore\n except Exception as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError) as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n ensure_dir(os.path.dirname(path))\n with open(path, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n fp.close()\n # Update the timestamp (useful for cython compiled files)\n # https://github.com/python/typeshed/issues/2673\n tar.utime(member, path) # type: ignore\n # member have any execute permissions for user/group/world?\n if member.mode & 0o111:\n # make dest file have execute for user/group/world\n # no-op on windows per python docs\n os.chmod(path, (0o777 - current_umask() | 0o111))\n finally:\n tar.close()\n\n\ndef unpack_file(\n filename, # type: str\n location, # type: str\n content_type=None, # type: Optional[str]\n):\n # type: (...) -> None\n filename = os.path.realpath(filename)\n if (\n content_type == 'application/zip' or\n filename.lower().endswith(ZIP_EXTENSIONS) or\n zipfile.is_zipfile(filename)\n ):\n unzip_file(\n filename,\n location,\n flatten=not filename.endswith('.whl')\n )\n elif (\n content_type == 'application/x-gzip' or\n tarfile.is_tarfile(filename) or\n filename.lower().endswith(\n TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS\n )\n ):\n untar_file(filename, location)\n else:\n # FIXME: handle?\n # FIXME: magic signatures?\n logger.critical(\n 'Cannot unpack file %s (downloaded from %s, content-type: %s); '\n 'cannot detect archive format',\n filename, location, content_type,\n )\n raise InstallationError(\n 'Cannot determine archive format of {}'.format(location)\n )\n", "path": "src/pip/_internal/utils/unpacking.py"}], "after_files": [{"content": "\"\"\"Utilities related archives.\n\"\"\"\n\n# The following comment should be removed at some point in the future.\n# mypy: strict-optional=False\n\nfrom __future__ import absolute_import\n\nimport logging\nimport os\nimport shutil\nimport stat\nimport tarfile\nimport zipfile\n\nfrom pip._internal.exceptions import InstallationError\nfrom pip._internal.utils.filetypes import (\n BZ2_EXTENSIONS,\n TAR_EXTENSIONS,\n XZ_EXTENSIONS,\n ZIP_EXTENSIONS,\n)\nfrom pip._internal.utils.misc import ensure_dir\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from typing import Iterable, List, Optional, Text, Union\n\n\nlogger = logging.getLogger(__name__)\n\n\nSUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS\n\ntry:\n import bz2 # noqa\n SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS\nexcept ImportError:\n logger.debug('bz2 module is not available')\n\ntry:\n # Only for Python 3.3+\n import lzma # noqa\n SUPPORTED_EXTENSIONS += XZ_EXTENSIONS\nexcept ImportError:\n logger.debug('lzma module is not available')\n\n\ndef current_umask():\n \"\"\"Get the current umask which involves having to set it temporarily.\"\"\"\n mask = os.umask(0)\n os.umask(mask)\n return mask\n\n\ndef split_leading_dir(path):\n # type: (Union[str, Text]) -> List[Union[str, Text]]\n path = path.lstrip('/').lstrip('\\\\')\n if (\n '/' in path and (\n ('\\\\' in path and path.find('/') < path.find('\\\\')) or\n '\\\\' not in path\n )\n ):\n return path.split('/', 1)\n elif '\\\\' in path:\n return path.split('\\\\', 1)\n else:\n return [path, '']\n\n\ndef has_leading_dir(paths):\n # type: (Iterable[Union[str, Text]]) -> bool\n \"\"\"Returns true if all the paths have the same leading path name\n (i.e., everything is in one subdirectory in an archive)\"\"\"\n common_prefix = None\n for path in paths:\n prefix, rest = split_leading_dir(path)\n if not prefix:\n return False\n elif common_prefix is None:\n common_prefix = prefix\n elif prefix != common_prefix:\n return False\n return True\n\n\ndef is_within_directory(directory, target):\n # type: ((Union[str, Text]), (Union[str, Text])) -> bool\n \"\"\"\n Return true if the absolute path of target is within the directory\n \"\"\"\n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n return prefix == abs_directory\n\n\ndef unzip_file(filename, location, flatten=True):\n # type: (str, str, bool) -> None\n \"\"\"\n Unzip the file (with path `filename`) to the destination `location`. All\n files are written based on system defaults and umask (i.e. permissions are\n not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp, allowZip64=True)\n leading = has_leading_dir(zip.namelist()) and flatten\n for info in zip.infolist():\n name = info.filename\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if not is_within_directory(location, fn):\n message = (\n 'The zip file ({}) has a file ({}) trying to install '\n 'outside target directory ({})'\n )\n raise InstallationError(message.format(filename, fn, location))\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n ensure_dir(fn)\n else:\n ensure_dir(dir)\n # Don't use read() to avoid allocating an arbitrarily large\n # chunk of memory for the file's content\n fp = zip.open(name)\n try:\n with open(fn, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n finally:\n fp.close()\n mode = info.external_attr >> 16\n # if mode and regular file and any execute permissions for\n # user/group/world?\n if mode and stat.S_ISREG(mode) and mode & 0o111:\n # make dest file have execute for user/group/world\n # (chmod +x) no-op on windows per python docs\n os.chmod(fn, (0o777 - current_umask() | 0o111))\n finally:\n zipfp.close()\n\n\ndef untar_file(filename, location):\n # type: (str, str) -> None\n \"\"\"\n Untar the file (with path `filename`) to the destination `location`.\n All files are written based on system defaults and umask (i.e. permissions\n are not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n \"\"\"\n ensure_dir(location)\n if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):\n mode = 'r:gz'\n elif filename.lower().endswith(BZ2_EXTENSIONS):\n mode = 'r:bz2'\n elif filename.lower().endswith(XZ_EXTENSIONS):\n mode = 'r:xz'\n elif filename.lower().endswith('.tar'):\n mode = 'r'\n else:\n logger.warning(\n 'Cannot determine compression type for file %s', filename,\n )\n mode = 'r:*'\n tar = tarfile.open(filename, mode)\n try:\n leading = has_leading_dir([\n member.name for member in tar.getmembers()\n ])\n for member in tar.getmembers():\n fn = member.name\n if leading:\n # https://github.com/python/mypy/issues/1174\n fn = split_leading_dir(fn)[1] # type: ignore\n path = os.path.join(location, fn)\n if not is_within_directory(location, path):\n message = (\n 'The tar file ({}) has a file ({}) trying to install '\n 'outside target directory ({})'\n )\n raise InstallationError(\n message.format(filename, path, location)\n )\n if member.isdir():\n ensure_dir(path)\n elif member.issym():\n try:\n # https://github.com/python/typeshed/issues/2673\n tar._extract_member(member, path) # type: ignore\n except Exception as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n else:\n try:\n fp = tar.extractfile(member)\n except (KeyError, AttributeError) as exc:\n # Some corrupt tar files seem to produce this\n # (specifically bad symlinks)\n logger.warning(\n 'In the tar file %s the member %s is invalid: %s',\n filename, member.name, exc,\n )\n continue\n ensure_dir(os.path.dirname(path))\n with open(path, 'wb') as destfp:\n shutil.copyfileobj(fp, destfp)\n fp.close()\n # Update the timestamp (useful for cython compiled files)\n # https://github.com/python/typeshed/issues/2673\n tar.utime(member, path) # type: ignore\n # member have any execute permissions for user/group/world?\n if member.mode & 0o111:\n # make dest file have execute for user/group/world\n # no-op on windows per python docs\n os.chmod(path, (0o777 - current_umask() | 0o111))\n finally:\n tar.close()\n\n\ndef unpack_file(\n filename, # type: str\n location, # type: str\n content_type=None, # type: Optional[str]\n):\n # type: (...) -> None\n filename = os.path.realpath(filename)\n if (\n content_type == 'application/zip' or\n filename.lower().endswith(ZIP_EXTENSIONS) or\n zipfile.is_zipfile(filename)\n ):\n unzip_file(\n filename,\n location,\n flatten=not filename.endswith('.whl')\n )\n elif (\n content_type == 'application/x-gzip' or\n tarfile.is_tarfile(filename) or\n filename.lower().endswith(\n TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS\n )\n ):\n untar_file(filename, location)\n else:\n # FIXME: handle?\n # FIXME: magic signatures?\n logger.critical(\n 'Cannot unpack file %s (downloaded from %s, content-type: %s); '\n 'cannot detect archive format',\n filename, location, content_type,\n )\n raise InstallationError(\n 'Cannot determine archive format of {}'.format(location)\n )\n", "path": "src/pip/_internal/utils/unpacking.py"}]}
| 2,949 | 449 |
gh_patches_debug_9459
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-980
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Encoding error : `UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte`
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: ubuntu 18.04
- **Modin installed from : pip install modin[ray]
- **Modin version**: 0.6.3
- **Python version**: 3.7.3
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
Hello,
i'm trying to use modin to reduce the memory peak due the volum of the data, so i change the pandas with modin.pandas, i try to do a simple read of a file but encoded in 'latin-1' (french) . With pandas all goes smoothly but using modin i got an error of encoding as follow :
`UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte`
the script used (which works fine on pandas but not in modin ) :
`caract = pd.read_csv(path, sep="\t", encoding = "ISO-8859-1")`
ps :: i tried other encoding and the same remark : works on pandas and not on modin (backed by ray) : ISO-8859-1, ISO-8859-9, latin-1
any solution ??
thanks
### Source code / logs
`RayTaskError: ray_worker (pid=10815, host=ubuntu)
File "pandas/_libs/parsers.pyx", line 1297, in pandas._libs.parsers.TextReader._string_convert
File "pandas/_libs/parsers.pyx", line 1520, in pandas._libs.parsers._string_box_utf8
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte
During handling of the above exception, another exception occurred:
ray_worker (pid=10815, host=ubuntu)
File "/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/modin/engines/ray/task_wrapper.py", line 8, in deploy_ray_func
return func(**args)
File "/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/modin/backends/pandas/parsers.py", line 69, in parse
pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)
File "/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py", line 685, in parser_f
return _read(filepath_or_buffer, kwds)
File "/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py", line 463, in _read
data = parser.read(nrows)
File "/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py", line 1154, in read
ret = self._engine.read(nrows)
File "/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py", line 2059, in read
data = self._reader.read(nrows)
File "pandas/_libs/parsers.pyx", line 881, in pandas._libs.parsers.TextReader.read
File "pandas/_libs/parsers.pyx", line 896, in pandas._libs.parsers.TextReader._read_low_memory
File "pandas/_libs/parsers.pyx", line 973, in pandas._libs.parsers.TextReader._read_rows
File "pandas/_libs/parsers.pyx", line 1105, in pandas._libs.parsers.TextReader._convert_column_data
File "pandas/_libs/parsers.pyx", line 1158, in pandas._libs.parsers.TextReader._convert_tokens
File "pandas/_libs/parsers.pyx", line 1281, in pandas._libs.parsers.TextReader._convert_with_dtype
File "pandas/_libs/parsers.pyx", line 1297, in pandas._libs.parsers.TextReader._string_convert
File "pandas/_libs/parsers.pyx", line 1520, in pandas._libs.parsers._string_box_utf8
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte`
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/backends/pandas/parsers.py`
Content:
```
1 import numpy as np
2 import pandas
3 from pandas.core.dtypes.cast import find_common_type
4 from pandas.core.dtypes.concat import union_categoricals
5 from pandas.io.common import _infer_compression
6 from modin.engines.base.io import FileReader
7 from modin.data_management.utils import split_result_of_axis_func_pandas
8 from modin.error_message import ErrorMessage
9 from io import BytesIO
10
11
12 def _split_result_for_readers(axis, num_splits, df): # pragma: no cover
13 """Splits the DataFrame read into smaller DataFrames and handles all edge cases.
14
15 Args:
16 axis: Which axis to split over.
17 num_splits: The number of splits to create.
18 df: The DataFrame after it has been read.
19
20 Returns:
21 A list of pandas DataFrames.
22 """
23 splits = split_result_of_axis_func_pandas(axis, num_splits, df)
24 if not isinstance(splits, list):
25 splits = [splits]
26 return splits
27
28
29 def find_common_type_cat(types):
30 if all(isinstance(t, pandas.CategoricalDtype) for t in types):
31 if all(t.ordered for t in types):
32 return pandas.CategoricalDtype(
33 np.sort(np.unique([c for t in types for c in t.categories])[0]),
34 ordered=True,
35 )
36 return union_categoricals(
37 [pandas.Categorical([], dtype=t) for t in types],
38 sort_categories=all(t.ordered for t in types),
39 ).dtype
40 else:
41 return find_common_type(types)
42
43
44 class PandasParser(object):
45 @classmethod
46 def get_dtypes(cls, dtypes_ids):
47 return (
48 pandas.concat(cls.materialize(dtypes_ids), axis=1)
49 .apply(lambda row: find_common_type_cat(row.values), axis=1)
50 .squeeze(axis=0)
51 )
52
53 @classmethod
54 def single_worker_read(cls, fname, **kwargs):
55 ErrorMessage.default_to_pandas("Parameters provided")
56 # Use default args for everything
57 pandas_frame = cls.parse(fname, **kwargs)
58 if isinstance(pandas_frame, pandas.io.parsers.TextFileReader):
59 pd_read = pandas_frame.read
60 pandas_frame.read = lambda *args, **kwargs: cls.query_compiler_cls.from_pandas(
61 pd_read(*args, **kwargs), cls.frame_cls
62 )
63 return pandas_frame
64 return cls.query_compiler_cls.from_pandas(pandas_frame, cls.frame_cls)
65
66 infer_compression = _infer_compression
67
68
69 class PandasCSVParser(PandasParser):
70 @staticmethod
71 def parse(fname, **kwargs):
72 num_splits = kwargs.pop("num_splits", None)
73 start = kwargs.pop("start", None)
74 end = kwargs.pop("end", None)
75 index_col = kwargs.get("index_col", None)
76 if start is not None and end is not None:
77 # pop "compression" from kwargs because bio is uncompressed
78 bio = FileReader.file_open(fname, "rb", kwargs.pop("compression", "infer"))
79 if kwargs.pop("encoding", False):
80 header = b"" + bio.readline()
81 else:
82 header = b""
83 bio.seek(start)
84 to_read = header + bio.read(end - start)
85 bio.close()
86 pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)
87 else:
88 # This only happens when we are reading with only one worker (Default)
89 return pandas.read_csv(fname, **kwargs)
90 if index_col is not None:
91 index = pandas_df.index
92 else:
93 # The lengths will become the RangeIndex
94 index = len(pandas_df)
95 return _split_result_for_readers(1, num_splits, pandas_df) + [
96 index,
97 pandas_df.dtypes,
98 ]
99
100
101 class PandasJSONParser(PandasParser):
102 @staticmethod
103 def parse(fname, **kwargs):
104 num_splits = kwargs.pop("num_splits", None)
105 start = kwargs.pop("start", None)
106 end = kwargs.pop("end", None)
107 if start is not None and end is not None:
108 # pop "compression" from kwargs because bio is uncompressed
109 bio = FileReader.file_open(fname, "rb", kwargs.pop("compression", "infer"))
110 bio.seek(start)
111 to_read = b"" + bio.read(end - start)
112 bio.close()
113 columns = kwargs.pop("columns")
114 pandas_df = pandas.read_json(BytesIO(to_read), **kwargs)
115 else:
116 # This only happens when we are reading with only one worker (Default)
117 return pandas.read_json(fname, **kwargs)
118 if not pandas_df.columns.equals(columns):
119 raise NotImplementedError("Columns must be the same across all rows.")
120 partition_columns = pandas_df.columns
121 return _split_result_for_readers(1, num_splits, pandas_df) + [
122 len(pandas_df),
123 pandas_df.dtypes,
124 partition_columns,
125 ]
126
127
128 class PandasParquetParser(PandasParser):
129 @staticmethod
130 def parse(fname, **kwargs):
131 import pyarrow.parquet as pq
132
133 num_splits = kwargs.pop("num_splits", None)
134 columns = kwargs.get("columns", None)
135 if num_splits is None:
136 return pandas.read_parquet(fname, **kwargs)
137 kwargs["use_pandas_metadata"] = True
138 df = pq.read_table(fname, **kwargs).to_pandas()
139 if columns is not None:
140 df = df[columns]
141 # Append the length of the index here to build it externally
142 return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]
143
144
145 class PandasHDFParser(PandasParser): # pragma: no cover
146 @staticmethod
147 def parse(fname, **kwargs):
148 kwargs["key"] = kwargs.pop("_key", None)
149 num_splits = kwargs.pop("num_splits", None)
150 if num_splits is None:
151 return pandas.read_hdf(fname, **kwargs)
152 df = pandas.read_hdf(fname, **kwargs)
153 # Append the length of the index here to build it externally
154 return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]
155
156
157 class PandasFeatherParser(PandasParser):
158 @staticmethod
159 def parse(fname, **kwargs):
160 from pyarrow import feather
161
162 num_splits = kwargs.pop("num_splits", None)
163 if num_splits is None:
164 return pandas.read_feather(fname, **kwargs)
165 df = feather.read_feather(fname, **kwargs)
166 # Append the length of the index here to build it externally
167 return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]
168
169
170 class PandasSQLParser(PandasParser):
171 @staticmethod
172 def parse(sql, con, index_col, **kwargs):
173 num_splits = kwargs.pop("num_splits", None)
174 if num_splits is None:
175 return pandas.read_sql(sql, con, index_col=index_col, **kwargs)
176 df = pandas.read_sql(sql, con, index_col=index_col, **kwargs)
177 if index_col is None:
178 index = len(df)
179 else:
180 index = df.index
181 return _split_result_for_readers(1, num_splits, df) + [index, df.dtypes]
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modin/backends/pandas/parsers.py b/modin/backends/pandas/parsers.py
--- a/modin/backends/pandas/parsers.py
+++ b/modin/backends/pandas/parsers.py
@@ -76,7 +76,7 @@
if start is not None and end is not None:
# pop "compression" from kwargs because bio is uncompressed
bio = FileReader.file_open(fname, "rb", kwargs.pop("compression", "infer"))
- if kwargs.pop("encoding", False):
+ if kwargs.get("encoding", None) is not None:
header = b"" + bio.readline()
else:
header = b""
|
{"golden_diff": "diff --git a/modin/backends/pandas/parsers.py b/modin/backends/pandas/parsers.py\n--- a/modin/backends/pandas/parsers.py\n+++ b/modin/backends/pandas/parsers.py\n@@ -76,7 +76,7 @@\n if start is not None and end is not None:\n # pop \"compression\" from kwargs because bio is uncompressed\n bio = FileReader.file_open(fname, \"rb\", kwargs.pop(\"compression\", \"infer\"))\n- if kwargs.pop(\"encoding\", False):\n+ if kwargs.get(\"encoding\", None) is not None:\n header = b\"\" + bio.readline()\n else:\n header = b\"\"\n", "issue": "Encoding error : `UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte`\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: ubuntu 18.04\r\n- **Modin installed from : pip install modin[ray]\r\n- **Modin version**: 0.6.3\r\n- **Python version**: 3.7.3\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nHello, \r\ni'm trying to use modin to reduce the memory peak due the volum of the data, so i change the pandas with modin.pandas, i try to do a simple read of a file but encoded in 'latin-1' (french) . With pandas all goes smoothly but using modin i got an error of encoding as follow : \r\n`UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte`\r\n\r\nthe script used (which works fine on pandas but not in modin ) : \r\n`caract = pd.read_csv(path, sep=\"\\t\", encoding = \"ISO-8859-1\")`\r\n\r\n\r\nps :: i tried other encoding and the same remark : works on pandas and not on modin (backed by ray) : ISO-8859-1, ISO-8859-9, latin-1\r\n\r\nany solution ??\r\n\r\nthanks \r\n### Source code / logs\r\n`RayTaskError: ray_worker (pid=10815, host=ubuntu)\r\n File \"pandas/_libs/parsers.pyx\", line 1297, in pandas._libs.parsers.TextReader._string_convert\r\n File \"pandas/_libs/parsers.pyx\", line 1520, in pandas._libs.parsers._string_box_utf8\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nray_worker (pid=10815, host=ubuntu)\r\n File \"/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/modin/engines/ray/task_wrapper.py\", line 8, in deploy_ray_func\r\n return func(**args)\r\n File \"/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/modin/backends/pandas/parsers.py\", line 69, in parse\r\n pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)\r\n File \"/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"/home/lasngd/.conda/envs/pytorch/lib/python3.7/site-packages/pandas/io/parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 881, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 896, in pandas._libs.parsers.TextReader._read_low_memory\r\n File \"pandas/_libs/parsers.pyx\", line 973, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 1105, in pandas._libs.parsers.TextReader._convert_column_data\r\n File \"pandas/_libs/parsers.pyx\", line 1158, in pandas._libs.parsers.TextReader._convert_tokens\r\n File \"pandas/_libs/parsers.pyx\", line 1281, in pandas._libs.parsers.TextReader._convert_with_dtype\r\n File \"pandas/_libs/parsers.pyx\", line 1297, in pandas._libs.parsers.TextReader._string_convert\r\n File \"pandas/_libs/parsers.pyx\", line 1520, in pandas._libs.parsers._string_box_utf8\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xe9 in position 2: invalid continuation byte`\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "import numpy as np\nimport pandas\nfrom pandas.core.dtypes.cast import find_common_type\nfrom pandas.core.dtypes.concat import union_categoricals\nfrom pandas.io.common import _infer_compression\nfrom modin.engines.base.io import FileReader\nfrom modin.data_management.utils import split_result_of_axis_func_pandas\nfrom modin.error_message import ErrorMessage\nfrom io import BytesIO\n\n\ndef _split_result_for_readers(axis, num_splits, df): # pragma: no cover\n \"\"\"Splits the DataFrame read into smaller DataFrames and handles all edge cases.\n\n Args:\n axis: Which axis to split over.\n num_splits: The number of splits to create.\n df: The DataFrame after it has been read.\n\n Returns:\n A list of pandas DataFrames.\n \"\"\"\n splits = split_result_of_axis_func_pandas(axis, num_splits, df)\n if not isinstance(splits, list):\n splits = [splits]\n return splits\n\n\ndef find_common_type_cat(types):\n if all(isinstance(t, pandas.CategoricalDtype) for t in types):\n if all(t.ordered for t in types):\n return pandas.CategoricalDtype(\n np.sort(np.unique([c for t in types for c in t.categories])[0]),\n ordered=True,\n )\n return union_categoricals(\n [pandas.Categorical([], dtype=t) for t in types],\n sort_categories=all(t.ordered for t in types),\n ).dtype\n else:\n return find_common_type(types)\n\n\nclass PandasParser(object):\n @classmethod\n def get_dtypes(cls, dtypes_ids):\n return (\n pandas.concat(cls.materialize(dtypes_ids), axis=1)\n .apply(lambda row: find_common_type_cat(row.values), axis=1)\n .squeeze(axis=0)\n )\n\n @classmethod\n def single_worker_read(cls, fname, **kwargs):\n ErrorMessage.default_to_pandas(\"Parameters provided\")\n # Use default args for everything\n pandas_frame = cls.parse(fname, **kwargs)\n if isinstance(pandas_frame, pandas.io.parsers.TextFileReader):\n pd_read = pandas_frame.read\n pandas_frame.read = lambda *args, **kwargs: cls.query_compiler_cls.from_pandas(\n pd_read(*args, **kwargs), cls.frame_cls\n )\n return pandas_frame\n return cls.query_compiler_cls.from_pandas(pandas_frame, cls.frame_cls)\n\n infer_compression = _infer_compression\n\n\nclass PandasCSVParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n num_splits = kwargs.pop(\"num_splits\", None)\n start = kwargs.pop(\"start\", None)\n end = kwargs.pop(\"end\", None)\n index_col = kwargs.get(\"index_col\", None)\n if start is not None and end is not None:\n # pop \"compression\" from kwargs because bio is uncompressed\n bio = FileReader.file_open(fname, \"rb\", kwargs.pop(\"compression\", \"infer\"))\n if kwargs.pop(\"encoding\", False):\n header = b\"\" + bio.readline()\n else:\n header = b\"\"\n bio.seek(start)\n to_read = header + bio.read(end - start)\n bio.close()\n pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)\n else:\n # This only happens when we are reading with only one worker (Default)\n return pandas.read_csv(fname, **kwargs)\n if index_col is not None:\n index = pandas_df.index\n else:\n # The lengths will become the RangeIndex\n index = len(pandas_df)\n return _split_result_for_readers(1, num_splits, pandas_df) + [\n index,\n pandas_df.dtypes,\n ]\n\n\nclass PandasJSONParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n num_splits = kwargs.pop(\"num_splits\", None)\n start = kwargs.pop(\"start\", None)\n end = kwargs.pop(\"end\", None)\n if start is not None and end is not None:\n # pop \"compression\" from kwargs because bio is uncompressed\n bio = FileReader.file_open(fname, \"rb\", kwargs.pop(\"compression\", \"infer\"))\n bio.seek(start)\n to_read = b\"\" + bio.read(end - start)\n bio.close()\n columns = kwargs.pop(\"columns\")\n pandas_df = pandas.read_json(BytesIO(to_read), **kwargs)\n else:\n # This only happens when we are reading with only one worker (Default)\n return pandas.read_json(fname, **kwargs)\n if not pandas_df.columns.equals(columns):\n raise NotImplementedError(\"Columns must be the same across all rows.\")\n partition_columns = pandas_df.columns\n return _split_result_for_readers(1, num_splits, pandas_df) + [\n len(pandas_df),\n pandas_df.dtypes,\n partition_columns,\n ]\n\n\nclass PandasParquetParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n import pyarrow.parquet as pq\n\n num_splits = kwargs.pop(\"num_splits\", None)\n columns = kwargs.get(\"columns\", None)\n if num_splits is None:\n return pandas.read_parquet(fname, **kwargs)\n kwargs[\"use_pandas_metadata\"] = True\n df = pq.read_table(fname, **kwargs).to_pandas()\n if columns is not None:\n df = df[columns]\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]\n\n\nclass PandasHDFParser(PandasParser): # pragma: no cover\n @staticmethod\n def parse(fname, **kwargs):\n kwargs[\"key\"] = kwargs.pop(\"_key\", None)\n num_splits = kwargs.pop(\"num_splits\", None)\n if num_splits is None:\n return pandas.read_hdf(fname, **kwargs)\n df = pandas.read_hdf(fname, **kwargs)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]\n\n\nclass PandasFeatherParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n from pyarrow import feather\n\n num_splits = kwargs.pop(\"num_splits\", None)\n if num_splits is None:\n return pandas.read_feather(fname, **kwargs)\n df = feather.read_feather(fname, **kwargs)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]\n\n\nclass PandasSQLParser(PandasParser):\n @staticmethod\n def parse(sql, con, index_col, **kwargs):\n num_splits = kwargs.pop(\"num_splits\", None)\n if num_splits is None:\n return pandas.read_sql(sql, con, index_col=index_col, **kwargs)\n df = pandas.read_sql(sql, con, index_col=index_col, **kwargs)\n if index_col is None:\n index = len(df)\n else:\n index = df.index\n return _split_result_for_readers(1, num_splits, df) + [index, df.dtypes]\n", "path": "modin/backends/pandas/parsers.py"}], "after_files": [{"content": "import numpy as np\nimport pandas\nfrom pandas.core.dtypes.cast import find_common_type\nfrom pandas.core.dtypes.concat import union_categoricals\nfrom pandas.io.common import _infer_compression\nfrom modin.engines.base.io import FileReader\nfrom modin.data_management.utils import split_result_of_axis_func_pandas\nfrom modin.error_message import ErrorMessage\nfrom io import BytesIO\n\n\ndef _split_result_for_readers(axis, num_splits, df): # pragma: no cover\n \"\"\"Splits the DataFrame read into smaller DataFrames and handles all edge cases.\n\n Args:\n axis: Which axis to split over.\n num_splits: The number of splits to create.\n df: The DataFrame after it has been read.\n\n Returns:\n A list of pandas DataFrames.\n \"\"\"\n splits = split_result_of_axis_func_pandas(axis, num_splits, df)\n if not isinstance(splits, list):\n splits = [splits]\n return splits\n\n\ndef find_common_type_cat(types):\n if all(isinstance(t, pandas.CategoricalDtype) for t in types):\n if all(t.ordered for t in types):\n return pandas.CategoricalDtype(\n np.sort(np.unique([c for t in types for c in t.categories])[0]),\n ordered=True,\n )\n return union_categoricals(\n [pandas.Categorical([], dtype=t) for t in types],\n sort_categories=all(t.ordered for t in types),\n ).dtype\n else:\n return find_common_type(types)\n\n\nclass PandasParser(object):\n @classmethod\n def get_dtypes(cls, dtypes_ids):\n return (\n pandas.concat(cls.materialize(dtypes_ids), axis=1)\n .apply(lambda row: find_common_type_cat(row.values), axis=1)\n .squeeze(axis=0)\n )\n\n @classmethod\n def single_worker_read(cls, fname, **kwargs):\n ErrorMessage.default_to_pandas(\"Parameters provided\")\n # Use default args for everything\n pandas_frame = cls.parse(fname, **kwargs)\n if isinstance(pandas_frame, pandas.io.parsers.TextFileReader):\n pd_read = pandas_frame.read\n pandas_frame.read = lambda *args, **kwargs: cls.query_compiler_cls.from_pandas(\n pd_read(*args, **kwargs), cls.frame_cls\n )\n return pandas_frame\n return cls.query_compiler_cls.from_pandas(pandas_frame, cls.frame_cls)\n\n infer_compression = _infer_compression\n\n\nclass PandasCSVParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n num_splits = kwargs.pop(\"num_splits\", None)\n start = kwargs.pop(\"start\", None)\n end = kwargs.pop(\"end\", None)\n index_col = kwargs.get(\"index_col\", None)\n if start is not None and end is not None:\n # pop \"compression\" from kwargs because bio is uncompressed\n bio = FileReader.file_open(fname, \"rb\", kwargs.pop(\"compression\", \"infer\"))\n if kwargs.get(\"encoding\", None) is not None:\n header = b\"\" + bio.readline()\n else:\n header = b\"\"\n bio.seek(start)\n to_read = header + bio.read(end - start)\n bio.close()\n pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs)\n else:\n # This only happens when we are reading with only one worker (Default)\n return pandas.read_csv(fname, **kwargs)\n if index_col is not None:\n index = pandas_df.index\n else:\n # The lengths will become the RangeIndex\n index = len(pandas_df)\n return _split_result_for_readers(1, num_splits, pandas_df) + [\n index,\n pandas_df.dtypes,\n ]\n\n\nclass PandasJSONParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n num_splits = kwargs.pop(\"num_splits\", None)\n start = kwargs.pop(\"start\", None)\n end = kwargs.pop(\"end\", None)\n if start is not None and end is not None:\n # pop \"compression\" from kwargs because bio is uncompressed\n bio = FileReader.file_open(fname, \"rb\", kwargs.pop(\"compression\", \"infer\"))\n bio.seek(start)\n to_read = b\"\" + bio.read(end - start)\n bio.close()\n columns = kwargs.pop(\"columns\")\n pandas_df = pandas.read_json(BytesIO(to_read), **kwargs)\n else:\n # This only happens when we are reading with only one worker (Default)\n return pandas.read_json(fname, **kwargs)\n if not pandas_df.columns.equals(columns):\n raise NotImplementedError(\"Columns must be the same across all rows.\")\n partition_columns = pandas_df.columns\n return _split_result_for_readers(1, num_splits, pandas_df) + [\n len(pandas_df),\n pandas_df.dtypes,\n partition_columns,\n ]\n\n\nclass PandasParquetParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n import pyarrow.parquet as pq\n\n num_splits = kwargs.pop(\"num_splits\", None)\n columns = kwargs.get(\"columns\", None)\n if num_splits is None:\n return pandas.read_parquet(fname, **kwargs)\n kwargs[\"use_pandas_metadata\"] = True\n df = pq.read_table(fname, **kwargs).to_pandas()\n if columns is not None:\n df = df[columns]\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]\n\n\nclass PandasHDFParser(PandasParser): # pragma: no cover\n @staticmethod\n def parse(fname, **kwargs):\n kwargs[\"key\"] = kwargs.pop(\"_key\", None)\n num_splits = kwargs.pop(\"num_splits\", None)\n if num_splits is None:\n return pandas.read_hdf(fname, **kwargs)\n df = pandas.read_hdf(fname, **kwargs)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]\n\n\nclass PandasFeatherParser(PandasParser):\n @staticmethod\n def parse(fname, **kwargs):\n from pyarrow import feather\n\n num_splits = kwargs.pop(\"num_splits\", None)\n if num_splits is None:\n return pandas.read_feather(fname, **kwargs)\n df = feather.read_feather(fname, **kwargs)\n # Append the length of the index here to build it externally\n return _split_result_for_readers(0, num_splits, df) + [len(df.index), df.dtypes]\n\n\nclass PandasSQLParser(PandasParser):\n @staticmethod\n def parse(sql, con, index_col, **kwargs):\n num_splits = kwargs.pop(\"num_splits\", None)\n if num_splits is None:\n return pandas.read_sql(sql, con, index_col=index_col, **kwargs)\n df = pandas.read_sql(sql, con, index_col=index_col, **kwargs)\n if index_col is None:\n index = len(df)\n else:\n index = df.index\n return _split_result_for_readers(1, num_splits, df) + [index, df.dtypes]\n", "path": "modin/backends/pandas/parsers.py"}]}
| 3,389 | 145 |
gh_patches_debug_5534
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1356
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode-objects must be encoded before hashing
Hello,
**Describe the bug**
When user type ssh user@server the honeypot produce an error.
**To Reproduce**
Steps to reproduce the behavior:
1. Lanch honeypot
2. Connect to fake system
3. type ssh [email protected]
4. See error in logs
**Server (please complete the following information):**
- OS: Ubutun 18.04
- Python: 3.6.9
**Additional context**
Traceback:
```
Traceback (most recent call last):
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/service.py", line 45, in packetReceived
return f(packet)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/connection.py", line 258, in ssh_CHANNEL_DATA
log.callWithLogger(channel, channel.dataReceived, data)
--- <exception caught here> ---
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger
return callWithContext({"system": lp}, func, *args, **kw)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext
return context.call({ILogContext: newCtx}, func, *args, **kw)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext
return self.currentContext().callWithContext(ctx, func, *args, **kw)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext
return func(*args,**kw)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/session.py", line 112, in dataReceived
self.client.transport.write(data)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/session.py", line 163, in write
self.proto.dataReceived(data)
File "/home/valentin/honeypot/cowrie/src/cowrie/insults/insults.py", line 98, in dataReceived
insults.ServerProtocol.dataReceived(self, data)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/insults/insults.py", line 537, in dataReceived
self.terminalProtocol.keystrokeReceived(ch, None)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/recvline.py", line 225, in keystrokeReceived
m()
File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 320, in handle_RETURN
return recvline.RecvLine.handle_RETURN(self)
File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/recvline.py", line 292, in handle_RETURN
self.lineReceived(line)
File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 182, in lineReceived
self.cmdstack[-1].lineReceived(line)
File "/home/valentin/honeypot/cowrie/src/cowrie/shell/honeypot.py", line 173, in lineReceived
self.runCommand()
File "/home/valentin/honeypot/cowrie/src/cowrie/shell/honeypot.py", line 289, in runCommand
self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])
File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 301, in call_command
HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)
File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 191, in call_command
obj.start()
File "/home/valentin/honeypot/cowrie/src/cowrie/commands/ssh.py", line 74, in start
s = hashlib.md5(host).hexdigest()
builtins.TypeError: Unicode-objects must be encoded before hashing
```
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/commands/ssh.py`
Content:
```
1 # Copyright (c) 2009 Upi Tamminen <[email protected]>
2 # See the COPYRIGHT file for more information
3
4 from __future__ import absolute_import, division
5
6 import getopt
7 import hashlib
8 import re
9 import socket
10 import time
11
12 from twisted.internet import reactor
13 from twisted.python import log
14
15 from cowrie.core.config import CowrieConfig
16 from cowrie.shell.command import HoneyPotCommand
17
18 commands = {}
19
20
21 OUTPUT = [
22 'usage: ssh [-46AaCfGgKkMNnqsTtVvXxYy] [-B bind_interface]',
23 ' [-b bind_address] [-c cipher_spec] [-D [bind_address:]port]',
24 ' [-E log_file] [-e escape_char] [-F configfile] [-I pkcs11]',
25 ' [-i identity_file] [-J [user@]host[:port]] [-L address]',
26 ' [-l login_name] [-m mac_spec] [-O ctl_cmd] [-o option] [-p port]', # noqa
27 ' [-Q query_option] [-R address] [-S ctl_path] [-W host:port]',
28 ' [-w local_tun[:remote_tun]] destination [command]'
29 ]
30
31
32 class command_ssh(HoneyPotCommand):
33
34 def valid_ip(self, address):
35 try:
36 socket.inet_aton(address)
37 return True
38 except Exception:
39 return False
40
41 def start(self):
42 try:
43 options = '-1246AaCfgKkMNnqsTtVvXxYb:c:D:e:F:i:L:l:m:O:o:p:R:S:w:'
44 optlist, args = getopt.getopt(self.args, options)
45 except getopt.GetoptError:
46 self.write('Unrecognized option\n')
47 self.exit()
48 for opt in optlist:
49 if opt[0] == '-V':
50 self.write(CowrieConfig().get('shell', 'ssh_version',
51 fallback="OpenSSH_7.9p1, OpenSSL 1.1.1a 20 Nov 2018")+"\n")
52 self.exit()
53 return
54 if not len(args):
55 for line in OUTPUT:
56 self.write('{0}\n'.format(line))
57 self.exit()
58 return
59 user, host = 'root', args[0]
60 for opt in optlist:
61 if opt[0] == '-l':
62 user = opt[1]
63 if args[0].count('@'):
64 user, host = args[0].split('@', 1)
65
66 if re.match('^[0-9.]+$', host):
67 if self.valid_ip(host):
68 self.ip = host
69 else:
70 self.write('ssh: Could not resolve hostname %s: \
71 Name or service not known\n' % (host,))
72 self.exit()
73 else:
74 s = hashlib.md5(host).hexdigest()
75 self.ip = '.'.join([str(int(x, 16)) for x in
76 (s[0:2], s[2:4], s[4:6], s[6:8])])
77
78 self.host = host
79 self.user = user
80
81 self.write('The authenticity of host \'%s (%s)\' \
82 can\'t be established.\n' % (self.host, self.ip))
83 self.write('RSA key fingerprint is \
84 9d:30:97:8a:9e:48:0d:de:04:8d:76:3a:7b:4b:30:f8.\n')
85 self.write('Are you sure you want to continue connecting (yes/no)? ')
86 self.callbacks = [self.yesno, self.wait]
87
88 def yesno(self, line):
89 self.write('Warning: Permanently added \'{}\' (RSA) to the \
90 list of known hosts.\n'.format(self.host))
91 self.write('%s@%s\'s password: ' % (self.user, self.host))
92 self.protocol.password_input = True
93
94 def wait(self, line):
95 reactor.callLater(2, self.finish, line)
96
97 def finish(self, line):
98 self.pause = False
99 rest, host = self.host, 'localhost'
100 rest = self.host.strip().split('.')
101 if len(rest) and rest[0].isalpha():
102 host = rest[0]
103 self.protocol.hostname = host
104 self.protocol.cwd = '/root'
105 if not self.fs.exists(self.protocol.cwd):
106 self.protocol.cwd = '/'
107 self.protocol.password_input = False
108 self.write('Linux {} 2.6.26-2-686 #1 SMP Wed Nov 4 20:45:37 \
109 UTC 2009 i686\n'.format(self.protocol.hostname))
110 self.write('Last login: %s from 192.168.9.4\n'
111 % (time.ctime(time.time() - 123123),))
112 self.exit()
113
114 def lineReceived(self, line):
115 log.msg('INPUT (ssh):', line)
116 if len(self.callbacks):
117 self.callbacks.pop(0)(line)
118
119
120 commands['/usr/bin/ssh'] = command_ssh
121 commands['ssh'] = command_ssh
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cowrie/commands/ssh.py b/src/cowrie/commands/ssh.py
--- a/src/cowrie/commands/ssh.py
+++ b/src/cowrie/commands/ssh.py
@@ -71,7 +71,7 @@
Name or service not known\n' % (host,))
self.exit()
else:
- s = hashlib.md5(host).hexdigest()
+ s = hashlib.md5(host.encode()).hexdigest()
self.ip = '.'.join([str(int(x, 16)) for x in
(s[0:2], s[2:4], s[4:6], s[6:8])])
|
{"golden_diff": "diff --git a/src/cowrie/commands/ssh.py b/src/cowrie/commands/ssh.py\n--- a/src/cowrie/commands/ssh.py\n+++ b/src/cowrie/commands/ssh.py\n@@ -71,7 +71,7 @@\n Name or service not known\\n' % (host,))\n self.exit()\n else:\n- s = hashlib.md5(host).hexdigest()\n+ s = hashlib.md5(host.encode()).hexdigest()\n self.ip = '.'.join([str(int(x, 16)) for x in\n (s[0:2], s[2:4], s[4:6], s[6:8])])\n", "issue": "Unicode-objects must be encoded before hashing\nHello,\r\n\r\n**Describe the bug**\r\nWhen user type ssh user@server the honeypot produce an error.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Lanch honeypot\r\n2. Connect to fake system\r\n3. type ssh [email protected]\r\n4. See error in logs\r\n\r\n\r\n**Server (please complete the following information):**\r\n - OS: Ubutun 18.04\r\n - Python: 3.6.9\r\n\r\n**Additional context**\r\nTraceback:\r\n```\r\n\tTraceback (most recent call last):\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py\", line 122, in callWithContext\r\n\t return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py\", line 85, in callWithContext\r\n\t return func(*args,**kw)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/service.py\", line 45, in packetReceived\r\n\t return f(packet)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/connection.py\", line 258, in ssh_CHANNEL_DATA\r\n\t log.callWithLogger(channel, channel.dataReceived, data)\r\n\t--- <exception caught here> ---\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/log.py\", line 103, in callWithLogger\r\n\t return callWithContext({\"system\": lp}, func, *args, **kw)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/log.py\", line 86, in callWithContext\r\n\t return context.call({ILogContext: newCtx}, func, *args, **kw)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py\", line 122, in callWithContext\r\n\t return self.currentContext().callWithContext(ctx, func, *args, **kw)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py\", line 85, in callWithContext\r\n\t return func(*args,**kw)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/session.py\", line 112, in dataReceived\r\n\t self.client.transport.write(data)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/session.py\", line 163, in write\r\n\t self.proto.dataReceived(data)\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/insults/insults.py\", line 98, in dataReceived\r\n\t insults.ServerProtocol.dataReceived(self, data)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/insults/insults.py\", line 537, in dataReceived\r\n\t self.terminalProtocol.keystrokeReceived(ch, None)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/recvline.py\", line 225, in keystrokeReceived\r\n\t m()\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py\", line 320, in handle_RETURN\r\n\t return recvline.RecvLine.handle_RETURN(self)\r\n\t File \"/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/recvline.py\", line 292, in handle_RETURN\r\n\t self.lineReceived(line)\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py\", line 182, in lineReceived\r\n\t self.cmdstack[-1].lineReceived(line)\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/shell/honeypot.py\", line 173, in lineReceived\r\n\t self.runCommand()\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/shell/honeypot.py\", line 289, in runCommand\r\n\t self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs'])\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py\", line 301, in call_command\r\n\t HoneyPotBaseProtocol.call_command(self, pp, cmd, *args)\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py\", line 191, in call_command\r\n\t obj.start()\r\n\t File \"/home/valentin/honeypot/cowrie/src/cowrie/commands/ssh.py\", line 74, in start\r\n\t s = hashlib.md5(host).hexdigest()\r\n\tbuiltins.TypeError: Unicode-objects must be encoded before hashing\r\n\t\r\n\r\n```\r\nThanks\n", "before_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nimport getopt\nimport hashlib\nimport re\nimport socket\nimport time\n\nfrom twisted.internet import reactor\nfrom twisted.python import log\n\nfrom cowrie.core.config import CowrieConfig\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\n\nOUTPUT = [\n 'usage: ssh [-46AaCfGgKkMNnqsTtVvXxYy] [-B bind_interface]',\n ' [-b bind_address] [-c cipher_spec] [-D [bind_address:]port]',\n ' [-E log_file] [-e escape_char] [-F configfile] [-I pkcs11]',\n ' [-i identity_file] [-J [user@]host[:port]] [-L address]',\n ' [-l login_name] [-m mac_spec] [-O ctl_cmd] [-o option] [-p port]', # noqa\n ' [-Q query_option] [-R address] [-S ctl_path] [-W host:port]',\n ' [-w local_tun[:remote_tun]] destination [command]'\n]\n\n\nclass command_ssh(HoneyPotCommand):\n\n def valid_ip(self, address):\n try:\n socket.inet_aton(address)\n return True\n except Exception:\n return False\n\n def start(self):\n try:\n options = '-1246AaCfgKkMNnqsTtVvXxYb:c:D:e:F:i:L:l:m:O:o:p:R:S:w:'\n optlist, args = getopt.getopt(self.args, options)\n except getopt.GetoptError:\n self.write('Unrecognized option\\n')\n self.exit()\n for opt in optlist:\n if opt[0] == '-V':\n self.write(CowrieConfig().get('shell', 'ssh_version',\n fallback=\"OpenSSH_7.9p1, OpenSSL 1.1.1a 20 Nov 2018\")+\"\\n\")\n self.exit()\n return\n if not len(args):\n for line in OUTPUT:\n self.write('{0}\\n'.format(line))\n self.exit()\n return\n user, host = 'root', args[0]\n for opt in optlist:\n if opt[0] == '-l':\n user = opt[1]\n if args[0].count('@'):\n user, host = args[0].split('@', 1)\n\n if re.match('^[0-9.]+$', host):\n if self.valid_ip(host):\n self.ip = host\n else:\n self.write('ssh: Could not resolve hostname %s: \\\n Name or service not known\\n' % (host,))\n self.exit()\n else:\n s = hashlib.md5(host).hexdigest()\n self.ip = '.'.join([str(int(x, 16)) for x in\n (s[0:2], s[2:4], s[4:6], s[6:8])])\n\n self.host = host\n self.user = user\n\n self.write('The authenticity of host \\'%s (%s)\\' \\\n can\\'t be established.\\n' % (self.host, self.ip))\n self.write('RSA key fingerprint is \\\n 9d:30:97:8a:9e:48:0d:de:04:8d:76:3a:7b:4b:30:f8.\\n')\n self.write('Are you sure you want to continue connecting (yes/no)? ')\n self.callbacks = [self.yesno, self.wait]\n\n def yesno(self, line):\n self.write('Warning: Permanently added \\'{}\\' (RSA) to the \\\n list of known hosts.\\n'.format(self.host))\n self.write('%s@%s\\'s password: ' % (self.user, self.host))\n self.protocol.password_input = True\n\n def wait(self, line):\n reactor.callLater(2, self.finish, line)\n\n def finish(self, line):\n self.pause = False\n rest, host = self.host, 'localhost'\n rest = self.host.strip().split('.')\n if len(rest) and rest[0].isalpha():\n host = rest[0]\n self.protocol.hostname = host\n self.protocol.cwd = '/root'\n if not self.fs.exists(self.protocol.cwd):\n self.protocol.cwd = '/'\n self.protocol.password_input = False\n self.write('Linux {} 2.6.26-2-686 #1 SMP Wed Nov 4 20:45:37 \\\n UTC 2009 i686\\n'.format(self.protocol.hostname))\n self.write('Last login: %s from 192.168.9.4\\n'\n % (time.ctime(time.time() - 123123),))\n self.exit()\n\n def lineReceived(self, line):\n log.msg('INPUT (ssh):', line)\n if len(self.callbacks):\n self.callbacks.pop(0)(line)\n\n\ncommands['/usr/bin/ssh'] = command_ssh\ncommands['ssh'] = command_ssh\n", "path": "src/cowrie/commands/ssh.py"}], "after_files": [{"content": "# Copyright (c) 2009 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\nfrom __future__ import absolute_import, division\n\nimport getopt\nimport hashlib\nimport re\nimport socket\nimport time\n\nfrom twisted.internet import reactor\nfrom twisted.python import log\n\nfrom cowrie.core.config import CowrieConfig\nfrom cowrie.shell.command import HoneyPotCommand\n\ncommands = {}\n\n\nOUTPUT = [\n 'usage: ssh [-46AaCfGgKkMNnqsTtVvXxYy] [-B bind_interface]',\n ' [-b bind_address] [-c cipher_spec] [-D [bind_address:]port]',\n ' [-E log_file] [-e escape_char] [-F configfile] [-I pkcs11]',\n ' [-i identity_file] [-J [user@]host[:port]] [-L address]',\n ' [-l login_name] [-m mac_spec] [-O ctl_cmd] [-o option] [-p port]', # noqa\n ' [-Q query_option] [-R address] [-S ctl_path] [-W host:port]',\n ' [-w local_tun[:remote_tun]] destination [command]'\n]\n\n\nclass command_ssh(HoneyPotCommand):\n\n def valid_ip(self, address):\n try:\n socket.inet_aton(address)\n return True\n except Exception:\n return False\n\n def start(self):\n try:\n options = '-1246AaCfgKkMNnqsTtVvXxYb:c:D:e:F:i:L:l:m:O:o:p:R:S:w:'\n optlist, args = getopt.getopt(self.args, options)\n except getopt.GetoptError:\n self.write('Unrecognized option\\n')\n self.exit()\n for opt in optlist:\n if opt[0] == '-V':\n self.write(CowrieConfig().get('shell', 'ssh_version',\n fallback=\"OpenSSH_7.9p1, OpenSSL 1.1.1a 20 Nov 2018\")+\"\\n\")\n self.exit()\n return\n if not len(args):\n for l in OUTPUT:\n self.write('{0}\\n'.format(l))\n self.exit()\n return\n user, host = 'root', args[0]\n for opt in optlist:\n if opt[0] == '-l':\n user = opt[1]\n if args[0].count('@'):\n user, host = args[0].split('@', 1)\n\n if re.match('^[0-9.]+$', host):\n if self.valid_ip(host):\n self.ip = host\n else:\n self.write('ssh: Could not resolve hostname %s: \\\n Name or service not known\\n' % (host,))\n self.exit()\n else:\n s = hashlib.md5(host.encode()).hexdigest()\n self.ip = '.'.join([str(int(x, 16)) for x in\n (s[0:2], s[2:4], s[4:6], s[6:8])])\n\n self.host = host\n self.user = user\n\n self.write('The authenticity of host \\'%s (%s)\\' \\\n can\\'t be established.\\n' % (self.host, self.ip))\n self.write('RSA key fingerprint is \\\n 9d:30:97:8a:9e:48:0d:de:04:8d:76:3a:7b:4b:30:f8.\\n')\n self.write('Are you sure you want to continue connecting (yes/no)? ')\n self.callbacks = [self.yesno, self.wait]\n\n def yesno(self, line):\n self.write('Warning: Permanently added \\'{}\\' (RSA) to the \\\n list of known hosts.\\n'.format(self.host))\n self.write('%s@%s\\'s password: ' % (self.user, self.host))\n self.protocol.password_input = True\n\n def wait(self, line):\n reactor.callLater(2, self.finish, line)\n\n def finish(self, line):\n self.pause = False\n rest, host = self.host, 'localhost'\n rest = self.host.strip().split('.')\n if len(rest) and rest[0].isalpha():\n host = rest[0]\n self.protocol.hostname = host\n self.protocol.cwd = '/root'\n if not self.fs.exists(self.protocol.cwd):\n self.protocol.cwd = '/'\n self.protocol.password_input = False\n self.write('Linux {} 2.6.26-2-686 #1 SMP Wed Nov 4 20:45:37 \\\n UTC 2009 i686\\n'.format(self.protocol.hostname))\n self.write('Last login: %s from 192.168.9.4\\n'\n % (time.ctime(time.time() - 123123),))\n self.exit()\n\n def lineReceived(self, line):\n log.msg('INPUT (ssh):', line)\n if len(self.callbacks):\n self.callbacks.pop(0)(line)\n\n\ncommands['/usr/bin/ssh'] = command_ssh\ncommands['ssh'] = command_ssh\n", "path": "src/cowrie/commands/ssh.py"}]}
| 2,911 | 149 |
gh_patches_debug_32576
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1735
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kinto onboarding experience (part 2)
These is a followup from #1733 with random feedback with onboarding when trying to use the [accounts plugin](http://docs.kinto-storage.org/en/stable/api/1.x/accounts.html).
Started with updating my `config/kinto.ini` with:
```
kinto.includes = kinto.plugins.default_bucket
kinto.plugins.admin
kinto.plugins.accounts
```
Restarting the server goes smoothly. The admin loads fine and renders a new entry for *Kinto Account Auth*. I never created any Account just yet, though out of curiosity I try to log in using silly:silly:

Tadaa:

Wait, what?
Oh. It seems it actually used the Basic Auth strategy instead of the account one for login. This is odd and confusing as fsck.
Actually, I didn't went further with toying around with the admin as it looks broken to me. This is a little sad.
Kinto onboarding experience (part 2)
These is a followup from #1733 with random feedback with onboarding when trying to use the [accounts plugin](http://docs.kinto-storage.org/en/stable/api/1.x/accounts.html).
Started with updating my `config/kinto.ini` with:
```
kinto.includes = kinto.plugins.default_bucket
kinto.plugins.admin
kinto.plugins.accounts
```
Restarting the server goes smoothly. The admin loads fine and renders a new entry for *Kinto Account Auth*. I never created any Account just yet, though out of curiosity I try to log in using silly:silly:

Tadaa:

Wait, what?
Oh. It seems it actually used the Basic Auth strategy instead of the account one for login. This is odd and confusing as fsck.
Actually, I didn't went further with toying around with the admin as it looks broken to me. This is a little sad.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/plugins/accounts/__init__.py`
Content:
```
1 from kinto.authorization import PERMISSIONS_INHERITANCE_TREE
2 from pyramid.exceptions import ConfigurationError
3
4 ACCOUNT_CACHE_KEY = 'accounts:{}:verified'
5 ACCOUNT_POLICY_NAME = 'account'
6
7
8 def includeme(config):
9 config.add_api_capability(
10 'accounts',
11 description='Manage user accounts.',
12 url='https://kinto.readthedocs.io/en/latest/api/1.x/accounts.html')
13
14 config.scan('kinto.plugins.accounts.views')
15
16 PERMISSIONS_INHERITANCE_TREE['root'].update({
17 'account:create': {}
18 })
19 PERMISSIONS_INHERITANCE_TREE['account'] = {
20 'write': {'account': ['write']},
21 'read': {'account': ['write', 'read']}
22 }
23
24 # Add some safety to avoid weird behaviour with basicauth default policy.
25 settings = config.get_settings()
26 auth_policies = settings['multiauth.policies']
27 if 'basicauth' in auth_policies and 'account' in auth_policies:
28 if auth_policies.index('basicauth') < auth_policies.index('account'):
29 error_msg = ("'basicauth' should not be mentioned before 'account' "
30 "in 'multiauth.policies' setting.")
31 raise ConfigurationError(error_msg)
32
33 # We assume anyone in account_create_principals is to create
34 # accounts for other people.
35 # No one can create accounts for other people unless they are an
36 # "admin", defined as someone matching account_write_principals.
37 # Therefore any account that is in account_create_principals
38 # should be in account_write_principals too.
39 creators = set(settings.get('account_create_principals', '').split())
40 admins = set(settings.get('account_write_principals', '').split())
41 cant_create_anything = creators.difference(admins)
42 # system.Everyone isn't an account.
43 cant_create_anything.discard('system.Everyone')
44 if cant_create_anything:
45 message = ('Configuration has some principals in account_create_principals '
46 'but not in account_write_principals. These principals will only be '
47 'able to create their own accounts. This may not be what you want.\n'
48 'If you want these users to be able to create accounts for other users, '
49 'add them to account_write_principals.\n'
50 'Affected users: {}'.format(list(cant_create_anything)))
51
52 raise ConfigurationError(message)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/plugins/accounts/__init__.py b/kinto/plugins/accounts/__init__.py
--- a/kinto/plugins/accounts/__init__.py
+++ b/kinto/plugins/accounts/__init__.py
@@ -1,9 +1,13 @@
+import re
+
from kinto.authorization import PERMISSIONS_INHERITANCE_TREE
from pyramid.exceptions import ConfigurationError
ACCOUNT_CACHE_KEY = 'accounts:{}:verified'
ACCOUNT_POLICY_NAME = 'account'
+DOCS_URL = "https://kinto.readthedocs.io/en/stable/api/1.x/accounts.html"
+
def includeme(config):
config.add_api_capability(
@@ -21,13 +25,28 @@
'read': {'account': ['write', 'read']}
}
- # Add some safety to avoid weird behaviour with basicauth default policy.
settings = config.get_settings()
+
+ # Check that the account policy is mentioned in config if included.
+ accountClass = 'AccountsAuthenticationPolicy'
+ policy = None
+ for k, v in settings.items():
+ m = re.match('multiauth\.policy\.(.*)\.use', k)
+ if m:
+ if v.endswith(accountClass):
+ policy = m.group(1)
+
+ if not policy:
+ error_msg = ("Account policy missing the 'multiauth.policy.*.use' "
+ "setting. See {} in docs {}.").format(accountClass, DOCS_URL)
+ raise ConfigurationError(error_msg)
+
+ # Add some safety to avoid weird behaviour with basicauth default policy.
auth_policies = settings['multiauth.policies']
- if 'basicauth' in auth_policies and 'account' in auth_policies:
- if auth_policies.index('basicauth') < auth_policies.index('account'):
- error_msg = ("'basicauth' should not be mentioned before 'account' "
- "in 'multiauth.policies' setting.")
+ if 'basicauth' in auth_policies and policy in auth_policies:
+ if auth_policies.index('basicauth') < auth_policies.index(policy):
+ error_msg = ("'basicauth' should not be mentioned before '%s' "
+ "in 'multiauth.policies' setting.") % policy
raise ConfigurationError(error_msg)
# We assume anyone in account_create_principals is to create
|
{"golden_diff": "diff --git a/kinto/plugins/accounts/__init__.py b/kinto/plugins/accounts/__init__.py\n--- a/kinto/plugins/accounts/__init__.py\n+++ b/kinto/plugins/accounts/__init__.py\n@@ -1,9 +1,13 @@\n+import re\n+\n from kinto.authorization import PERMISSIONS_INHERITANCE_TREE\n from pyramid.exceptions import ConfigurationError\n \n ACCOUNT_CACHE_KEY = 'accounts:{}:verified'\n ACCOUNT_POLICY_NAME = 'account'\n \n+DOCS_URL = \"https://kinto.readthedocs.io/en/stable/api/1.x/accounts.html\"\n+\n \n def includeme(config):\n config.add_api_capability(\n@@ -21,13 +25,28 @@\n 'read': {'account': ['write', 'read']}\n }\n \n- # Add some safety to avoid weird behaviour with basicauth default policy.\n settings = config.get_settings()\n+\n+ # Check that the account policy is mentioned in config if included.\n+ accountClass = 'AccountsAuthenticationPolicy'\n+ policy = None\n+ for k, v in settings.items():\n+ m = re.match('multiauth\\.policy\\.(.*)\\.use', k)\n+ if m:\n+ if v.endswith(accountClass):\n+ policy = m.group(1)\n+\n+ if not policy:\n+ error_msg = (\"Account policy missing the 'multiauth.policy.*.use' \"\n+ \"setting. See {} in docs {}.\").format(accountClass, DOCS_URL)\n+ raise ConfigurationError(error_msg)\n+\n+ # Add some safety to avoid weird behaviour with basicauth default policy.\n auth_policies = settings['multiauth.policies']\n- if 'basicauth' in auth_policies and 'account' in auth_policies:\n- if auth_policies.index('basicauth') < auth_policies.index('account'):\n- error_msg = (\"'basicauth' should not be mentioned before 'account' \"\n- \"in 'multiauth.policies' setting.\")\n+ if 'basicauth' in auth_policies and policy in auth_policies:\n+ if auth_policies.index('basicauth') < auth_policies.index(policy):\n+ error_msg = (\"'basicauth' should not be mentioned before '%s' \"\n+ \"in 'multiauth.policies' setting.\") % policy\n raise ConfigurationError(error_msg)\n \n # We assume anyone in account_create_principals is to create\n", "issue": "Kinto onboarding experience (part 2)\nThese is a followup from #1733 with random feedback with onboarding when trying to use the [accounts plugin](http://docs.kinto-storage.org/en/stable/api/1.x/accounts.html).\r\n\r\n\r\n\r\nStarted with updating my `config/kinto.ini` with:\r\n\r\n```\r\nkinto.includes = kinto.plugins.default_bucket\r\n kinto.plugins.admin\r\n kinto.plugins.accounts\r\n```\r\n\r\nRestarting the server goes smoothly. The admin loads fine and renders a new entry for *Kinto Account Auth*. I never created any Account just yet, though out of curiosity I try to log in using silly:silly:\r\n\r\n\r\n\r\nTadaa:\r\n\r\n\r\n\r\nWait, what?\r\n\r\nOh. It seems it actually used the Basic Auth strategy instead of the account one for login. This is odd and confusing as fsck.\r\n\r\nActually, I didn't went further with toying around with the admin as it looks broken to me. This is a little sad.\nKinto onboarding experience (part 2)\nThese is a followup from #1733 with random feedback with onboarding when trying to use the [accounts plugin](http://docs.kinto-storage.org/en/stable/api/1.x/accounts.html).\r\n\r\n\r\n\r\nStarted with updating my `config/kinto.ini` with:\r\n\r\n```\r\nkinto.includes = kinto.plugins.default_bucket\r\n kinto.plugins.admin\r\n kinto.plugins.accounts\r\n```\r\n\r\nRestarting the server goes smoothly. The admin loads fine and renders a new entry for *Kinto Account Auth*. I never created any Account just yet, though out of curiosity I try to log in using silly:silly:\r\n\r\n\r\n\r\nTadaa:\r\n\r\n\r\n\r\nWait, what?\r\n\r\nOh. It seems it actually used the Basic Auth strategy instead of the account one for login. This is odd and confusing as fsck.\r\n\r\nActually, I didn't went further with toying around with the admin as it looks broken to me. This is a little sad.\n", "before_files": [{"content": "from kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom pyramid.exceptions import ConfigurationError\n\nACCOUNT_CACHE_KEY = 'accounts:{}:verified'\nACCOUNT_POLICY_NAME = 'account'\n\n\ndef includeme(config):\n config.add_api_capability(\n 'accounts',\n description='Manage user accounts.',\n url='https://kinto.readthedocs.io/en/latest/api/1.x/accounts.html')\n\n config.scan('kinto.plugins.accounts.views')\n\n PERMISSIONS_INHERITANCE_TREE['root'].update({\n 'account:create': {}\n })\n PERMISSIONS_INHERITANCE_TREE['account'] = {\n 'write': {'account': ['write']},\n 'read': {'account': ['write', 'read']}\n }\n\n # Add some safety to avoid weird behaviour with basicauth default policy.\n settings = config.get_settings()\n auth_policies = settings['multiauth.policies']\n if 'basicauth' in auth_policies and 'account' in auth_policies:\n if auth_policies.index('basicauth') < auth_policies.index('account'):\n error_msg = (\"'basicauth' should not be mentioned before 'account' \"\n \"in 'multiauth.policies' setting.\")\n raise ConfigurationError(error_msg)\n\n # We assume anyone in account_create_principals is to create\n # accounts for other people.\n # No one can create accounts for other people unless they are an\n # \"admin\", defined as someone matching account_write_principals.\n # Therefore any account that is in account_create_principals\n # should be in account_write_principals too.\n creators = set(settings.get('account_create_principals', '').split())\n admins = set(settings.get('account_write_principals', '').split())\n cant_create_anything = creators.difference(admins)\n # system.Everyone isn't an account.\n cant_create_anything.discard('system.Everyone')\n if cant_create_anything:\n message = ('Configuration has some principals in account_create_principals '\n 'but not in account_write_principals. These principals will only be '\n 'able to create their own accounts. This may not be what you want.\\n'\n 'If you want these users to be able to create accounts for other users, '\n 'add them to account_write_principals.\\n'\n 'Affected users: {}'.format(list(cant_create_anything)))\n\n raise ConfigurationError(message)\n", "path": "kinto/plugins/accounts/__init__.py"}], "after_files": [{"content": "import re\n\nfrom kinto.authorization import PERMISSIONS_INHERITANCE_TREE\nfrom pyramid.exceptions import ConfigurationError\n\nACCOUNT_CACHE_KEY = 'accounts:{}:verified'\nACCOUNT_POLICY_NAME = 'account'\n\nDOCS_URL = \"https://kinto.readthedocs.io/en/stable/api/1.x/accounts.html\"\n\n\ndef includeme(config):\n config.add_api_capability(\n 'accounts',\n description='Manage user accounts.',\n url='https://kinto.readthedocs.io/en/latest/api/1.x/accounts.html')\n\n config.scan('kinto.plugins.accounts.views')\n\n PERMISSIONS_INHERITANCE_TREE['root'].update({\n 'account:create': {}\n })\n PERMISSIONS_INHERITANCE_TREE['account'] = {\n 'write': {'account': ['write']},\n 'read': {'account': ['write', 'read']}\n }\n\n settings = config.get_settings()\n\n # Check that the account policy is mentioned in config if included.\n accountClass = 'AccountsAuthenticationPolicy'\n policy = None\n for k, v in settings.items():\n m = re.match('multiauth\\.policy\\.(.*)\\.use', k)\n if m:\n if v.endswith(accountClass):\n policy = m.group(1)\n\n if not policy:\n error_msg = (\"Account policy missing the 'multiauth.policy.*.use' \"\n \"setting. See {} in docs {}.\").format(accountClass, DOCS_URL)\n raise ConfigurationError(error_msg)\n\n # Add some safety to avoid weird behaviour with basicauth default policy.\n auth_policies = settings['multiauth.policies']\n if 'basicauth' in auth_policies and policy in auth_policies:\n if auth_policies.index('basicauth') < auth_policies.index(policy):\n error_msg = (\"'basicauth' should not be mentioned before '%s' \"\n \"in 'multiauth.policies' setting.\") % policy\n raise ConfigurationError(error_msg)\n\n # We assume anyone in account_create_principals is to create\n # accounts for other people.\n # No one can create accounts for other people unless they are an\n # \"admin\", defined as someone matching account_write_principals.\n # Therefore any account that is in account_create_principals\n # should be in account_write_principals too.\n creators = set(settings.get('account_create_principals', '').split())\n admins = set(settings.get('account_write_principals', '').split())\n cant_create_anything = creators.difference(admins)\n # system.Everyone isn't an account.\n cant_create_anything.discard('system.Everyone')\n if cant_create_anything:\n message = ('Configuration has some principals in account_create_principals '\n 'but not in account_write_principals. These principals will only be '\n 'able to create their own accounts. This may not be what you want.\\n'\n 'If you want these users to be able to create accounts for other users, '\n 'add them to account_write_principals.\\n'\n 'Affected users: {}'.format(list(cant_create_anything)))\n\n raise ConfigurationError(message)\n", "path": "kinto/plugins/accounts/__init__.py"}]}
| 1,343 | 529 |
gh_patches_debug_37729
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-4257
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Dark Reaper does not work with Python 3
Motivation
----------
```
Traceback (most recent call last):
File "/usr/lib64/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib64/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.6/site-packages/rucio/daemons/reaper/dark_reaper.py", line 84, in reaper
hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
TypeError: Unicode-objects must be encoded before hashing
```
```
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/rucio/daemons/reaper/dark_reaper.py", line 114, in reaper
operation='delete', scheme=scheme).values()[0])
TypeError: 'dict_values' object does not support indexing
```
Modification
------------
Port to Python 3.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/daemons/reaper/dark_reaper.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2016-2020 CERN
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 # Authors:
17 # - Vincent Garonne <[email protected]>, 2016-2018
18 # - Martin Barisits <[email protected]>, 2016
19 # - Thomas Beermann <[email protected]>, 2016-2019
20 # - Hannes Hansen <[email protected]>, 2018-2019
21 # - Andrew Lister <[email protected]>, 2019
22 # - Cedric Serfon <[email protected]>, 2020
23 # - Brandon White <[email protected]>, 2019
24 # - Patrick Austin <[email protected]>, 2020
25 # - Benedikt Ziemons <[email protected]>, 2020
26 # - Dimitrios Christidis <[email protected]>, 2020
27
28 '''
29 Dark Reaper is a daemon to manage quarantined file deletion.
30 '''
31
32 import hashlib
33 import logging
34 import os
35 import random
36 import socket
37 import sys
38 import threading
39 import time
40 import traceback
41
42 import rucio.db.sqla.util
43 from rucio.common import exception
44 from rucio.common.config import config_get, config_get_bool
45 from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
46 RSEAccessDenied, ResourceTemporaryUnavailable,
47 RSENotFound, VONotFound)
48 from rucio.core.heartbeat import live, die, sanity_check
49 from rucio.core.message import add_message
50 from rucio.core.quarantined_replica import (list_quarantined_replicas,
51 delete_quarantined_replicas,
52 list_rses)
53 from rucio.core.rse_expression_parser import parse_expression
54 from rucio.core.vo import list_vos
55 from rucio.rse import rsemanager as rsemgr
56
57 logging.getLogger("requests").setLevel(logging.CRITICAL)
58
59 logging.basicConfig(stream=sys.stdout,
60 level=getattr(logging,
61 config_get('common', 'loglevel',
62 raise_exception=False,
63 default='DEBUG').upper()),
64 format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
65
66 GRACEFUL_STOP = threading.Event()
67
68
69 def reaper(rses=[], worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None):
70 """
71 Main loop to select and delete files.
72
73 :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
74 :param worker_number: The worker number.
75 :param total_workers: The total number of workers.
76 :param chunk_size: the size of chunk for deletion.
77 :param once: If True, only runs one iteration of the main loop.
78 :param scheme: Force the reaper to use a particular protocol, e.g., mock.
79 """
80 logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, ', '.join(rses))
81
82 pid = os.getpid()
83 thread = threading.current_thread()
84 hostname = socket.gethostname()
85 executable = ' '.join(sys.argv)
86 hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
87 sanity_check(executable=None, hostname=hostname)
88
89 while not GRACEFUL_STOP.is_set():
90 try:
91 # heartbeat
92 heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
93 logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))
94 nothing_to_do = True
95
96 random.shuffle(rses)
97 for rse_id in rses:
98 replicas = list_quarantined_replicas(rse_id=rse_id,
99 limit=chunk_size, worker_number=worker_number,
100 total_workers=total_workers)
101
102 rse_info = rsemgr.get_rse_info(rse_id=rse_id)
103 rse = rse_info['rse']
104 prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
105 deleted_replicas = []
106 try:
107 prot.connect()
108 for replica in replicas:
109 nothing_to_do = False
110 scope = ''
111 if replica['scope']:
112 scope = replica['scope'].external
113 try:
114 pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
115 lfns=[{'scope': scope, 'name': replica['name'], 'path': replica['path']}],
116 operation='delete', scheme=scheme).values()[0])
117 logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, scope, replica['name'], pfn, rse)
118 start = time.time()
119 prot.delete(pfn)
120 duration = time.time() - start
121 logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, scope, replica['name'], pfn, rse, duration)
122 payload = {'scope': scope,
123 'name': replica['name'],
124 'rse': rse,
125 'rse_id': rse_id,
126 'file-size': replica.get('bytes') or 0,
127 'bytes': replica.get('bytes') or 0,
128 'url': pfn,
129 'duration': duration,
130 'protocol': prot.attributes['scheme']}
131 if replica['scope'].vo != 'def':
132 payload['vo'] = replica['scope'].vo
133 add_message('deletion-done', payload)
134 deleted_replicas.append(replica)
135 except SourceNotFound:
136 err_msg = 'Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse)
137 logging.warning(err_msg)
138 deleted_replicas.append(replica)
139 except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
140 err_msg = 'Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse, str(error))
141 logging.warning(err_msg)
142 payload = {'scope': scope,
143 'name': replica['name'],
144 'rse': rse,
145 'rse_id': rse_id,
146 'file-size': replica['bytes'] or 0,
147 'bytes': replica['bytes'] or 0,
148 'url': pfn,
149 'reason': str(error),
150 'protocol': prot.attributes['scheme']}
151 if replica['scope'].vo != 'def':
152 payload['vo'] = replica['scope'].vo
153 add_message('deletion-failed', payload)
154
155 except Exception:
156 logging.critical(traceback.format_exc())
157 finally:
158 prot.close()
159
160 delete_quarantined_replicas(rse_id=rse_id, replicas=deleted_replicas)
161
162 if once:
163 break
164
165 if once:
166 break
167
168 if nothing_to_do:
169 logging.info('Dark Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)
170 time.sleep(60)
171
172 except DatabaseException as error:
173 logging.warning('Reaper: %s', str(error))
174 except Exception:
175 logging.critical(traceback.format_exc())
176
177 die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
178 logging.info('Graceful stop requested')
179 logging.info('Graceful stop done')
180 return
181
182
183 def stop(signum=None, frame=None):
184 """
185 Graceful exit.
186 """
187 GRACEFUL_STOP.set()
188
189
190 def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
191 exclude_rses=None, include_rses=None, vos=None, delay_seconds=0):
192 """
193 Starts up the reaper threads.
194
195 :param total_workers: The total number of workers.
196 :param chunk_size: the size of chunk for deletion.
197 :param once: If True, only runs one iteration of the main loop.
198 :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs (Single-VO only).
199 :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
200 :param exclude_rses: RSE expression to exclude RSEs from the Reaper.
201 :param include_rses: RSE expression to include RSEs.
202 :param vos: VOs on which to look for RSEs. Only used in multi-VO mode.
203 If None, we either use all VOs if run from "def", or the current VO otherwise.
204 """
205 if rucio.db.sqla.util.is_old_db():
206 raise exception.DatabaseException('Database was not updated, daemon won\'t start')
207
208 logging.info('main: starting processes')
209
210 multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
211 if not multi_vo:
212 if vos:
213 logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.')
214 vos = ['def']
215 else:
216 if vos:
217 invalid = set(vos) - set([v['vo'] for v in list_vos()])
218 if invalid:
219 msg = 'VO{} {} cannot be found'.format('s' if len(invalid) > 1 else '', ', '.join([repr(v) for v in invalid]))
220 raise VONotFound(msg)
221 else:
222 vos = [v['vo'] for v in list_vos()]
223 logging.info('Dark Reaper: This instance will work on VO%s: %s' % ('s' if len(vos) > 1 else '', ', '.join([v for v in vos])))
224
225 all_rses = []
226 for vo in vos:
227 all_rses.extend(list_rses(filters={'vo': vo}))
228
229 if rses:
230 invalid = set(rses) - set([rse['rse'] for rse in all_rses])
231 if invalid:
232 msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',
233 ', '.join([repr(rse) for rse in invalid]))
234 raise RSENotFound(msg)
235 rses = [rse for rse in all_rses if rse['rse'] in rses]
236 else:
237 rses = all_rses
238
239 if exclude_rses:
240 excluded_rses = parse_expression(exclude_rses)
241 rses = [rse for rse in rses if rse not in excluded_rses]
242
243 if include_rses:
244 included_rses = parse_expression(include_rses)
245 rses = [rse for rse in rses if rse in included_rses]
246
247 if not rses:
248 logging.error('Dark Reaper: No RSEs found. Exiting.')
249 return
250
251 threads = []
252 for worker in range(total_workers):
253 kwargs = {'worker_number': worker,
254 'total_workers': total_workers,
255 'rses': rses,
256 'once': once,
257 'chunk_size': chunk_size,
258 'scheme': scheme}
259 threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
260 [t.start() for t in threads]
261 while threads[0].is_alive():
262 [t.join(timeout=3.14) for t in threads]
263
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/rucio/daemons/reaper/dark_reaper.py b/lib/rucio/daemons/reaper/dark_reaper.py
--- a/lib/rucio/daemons/reaper/dark_reaper.py
+++ b/lib/rucio/daemons/reaper/dark_reaper.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright 2016-2020 CERN
+# Copyright 2016-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,7 +23,7 @@
# - Brandon White <[email protected]>, 2019
# - Patrick Austin <[email protected]>, 2020
# - Benedikt Ziemons <[email protected]>, 2020
-# - Dimitrios Christidis <[email protected]>, 2020
+# - Dimitrios Christidis <[email protected]>, 2020-2021
'''
Dark Reaper is a daemon to manage quarantined file deletion.
@@ -83,7 +83,7 @@
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
- hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()
+ hash_executable = hashlib.sha256((sys.argv[0] + ''.join(rses)).encode()).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
@@ -111,9 +111,12 @@
if replica['scope']:
scope = replica['scope'].external
try:
- pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
- lfns=[{'scope': scope, 'name': replica['name'], 'path': replica['path']}],
- operation='delete', scheme=scheme).values()[0])
+ pfn = str(list(rsemgr.lfns2pfns(rse_settings=rse_info,
+ lfns=[{'scope': scope,
+ 'name': replica['name'],
+ 'path': replica['path']}],
+ operation='delete',
+ scheme=scheme).values())[0])
logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, scope, replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
|
{"golden_diff": "diff --git a/lib/rucio/daemons/reaper/dark_reaper.py b/lib/rucio/daemons/reaper/dark_reaper.py\n--- a/lib/rucio/daemons/reaper/dark_reaper.py\n+++ b/lib/rucio/daemons/reaper/dark_reaper.py\n@@ -1,5 +1,5 @@\n # -*- coding: utf-8 -*-\n-# Copyright 2016-2020 CERN\n+# Copyright 2016-2021 CERN\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -23,7 +23,7 @@\n # - Brandon White <[email protected]>, 2019\n # - Patrick Austin <[email protected]>, 2020\n # - Benedikt Ziemons <[email protected]>, 2020\n-# - Dimitrios Christidis <[email protected]>, 2020\n+# - Dimitrios Christidis <[email protected]>, 2020-2021\n \n '''\n Dark Reaper is a daemon to manage quarantined file deletion.\n@@ -83,7 +83,7 @@\n thread = threading.current_thread()\n hostname = socket.gethostname()\n executable = ' '.join(sys.argv)\n- hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()\n+ hash_executable = hashlib.sha256((sys.argv[0] + ''.join(rses)).encode()).hexdigest()\n sanity_check(executable=None, hostname=hostname)\n \n while not GRACEFUL_STOP.is_set():\n@@ -111,9 +111,12 @@\n if replica['scope']:\n scope = replica['scope'].external\n try:\n- pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,\n- lfns=[{'scope': scope, 'name': replica['name'], 'path': replica['path']}],\n- operation='delete', scheme=scheme).values()[0])\n+ pfn = str(list(rsemgr.lfns2pfns(rse_settings=rse_info,\n+ lfns=[{'scope': scope,\n+ 'name': replica['name'],\n+ 'path': replica['path']}],\n+ operation='delete',\n+ scheme=scheme).values())[0])\n logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, scope, replica['name'], pfn, rse)\n start = time.time()\n prot.delete(pfn)\n", "issue": "Dark Reaper does not work with Python 3\nMotivation\r\n----------\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib64/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib64/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.6/site-packages/rucio/daemons/reaper/dark_reaper.py\", line 84, in reaper\r\n hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()\r\nTypeError: Unicode-objects must be encoded before hashing\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/site-packages/rucio/daemons/reaper/dark_reaper.py\", line 114, in reaper\r\n operation='delete', scheme=scheme).values()[0])\r\nTypeError: 'dict_values' object does not support indexing\r\n```\r\n\r\nModification\r\n------------\r\n\r\nPort to Python 3.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2020 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2016-2018\n# - Martin Barisits <[email protected]>, 2016\n# - Thomas Beermann <[email protected]>, 2016-2019\n# - Hannes Hansen <[email protected]>, 2018-2019\n# - Andrew Lister <[email protected]>, 2019\n# - Cedric Serfon <[email protected]>, 2020\n# - Brandon White <[email protected]>, 2019\n# - Patrick Austin <[email protected]>, 2020\n# - Benedikt Ziemons <[email protected]>, 2020\n# - Dimitrios Christidis <[email protected]>, 2020\n\n'''\nDark Reaper is a daemon to manage quarantined file deletion.\n'''\n\nimport hashlib\nimport logging\nimport os\nimport random\nimport socket\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport rucio.db.sqla.util\nfrom rucio.common import exception\nfrom rucio.common.config import config_get, config_get_bool\nfrom rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,\n RSEAccessDenied, ResourceTemporaryUnavailable,\n RSENotFound, VONotFound)\nfrom rucio.core.heartbeat import live, die, sanity_check\nfrom rucio.core.message import add_message\nfrom rucio.core.quarantined_replica import (list_quarantined_replicas,\n delete_quarantined_replicas,\n list_rses)\nfrom rucio.core.rse_expression_parser import parse_expression\nfrom rucio.core.vo import list_vos\nfrom rucio.rse import rsemanager as rsemgr\n\nlogging.getLogger(\"requests\").setLevel(logging.CRITICAL)\n\nlogging.basicConfig(stream=sys.stdout,\n level=getattr(logging,\n config_get('common', 'loglevel',\n raise_exception=False,\n default='DEBUG').upper()),\n format='%(asctime)s\\t%(process)d\\t%(levelname)s\\t%(message)s')\n\nGRACEFUL_STOP = threading.Event()\n\n\ndef reaper(rses=[], worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None):\n \"\"\"\n Main loop to select and delete files.\n\n :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.\n :param worker_number: The worker number.\n :param total_workers: The total number of workers.\n :param chunk_size: the size of chunk for deletion.\n :param once: If True, only runs one iteration of the main loop.\n :param scheme: Force the reaper to use a particular protocol, e.g., mock.\n \"\"\"\n logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, ', '.join(rses))\n\n pid = os.getpid()\n thread = threading.current_thread()\n hostname = socket.gethostname()\n executable = ' '.join(sys.argv)\n hash_executable = hashlib.sha256(sys.argv[0] + ''.join(rses)).hexdigest()\n sanity_check(executable=None, hostname=hostname)\n\n while not GRACEFUL_STOP.is_set():\n try:\n # heartbeat\n heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)\n logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))\n nothing_to_do = True\n\n random.shuffle(rses)\n for rse_id in rses:\n replicas = list_quarantined_replicas(rse_id=rse_id,\n limit=chunk_size, worker_number=worker_number,\n total_workers=total_workers)\n\n rse_info = rsemgr.get_rse_info(rse_id=rse_id)\n rse = rse_info['rse']\n prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)\n deleted_replicas = []\n try:\n prot.connect()\n for replica in replicas:\n nothing_to_do = False\n scope = ''\n if replica['scope']:\n scope = replica['scope'].external\n try:\n pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,\n lfns=[{'scope': scope, 'name': replica['name'], 'path': replica['path']}],\n operation='delete', scheme=scheme).values()[0])\n logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, scope, replica['name'], pfn, rse)\n start = time.time()\n prot.delete(pfn)\n duration = time.time() - start\n logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, scope, replica['name'], pfn, rse, duration)\n payload = {'scope': scope,\n 'name': replica['name'],\n 'rse': rse,\n 'rse_id': rse_id,\n 'file-size': replica.get('bytes') or 0,\n 'bytes': replica.get('bytes') or 0,\n 'url': pfn,\n 'duration': duration,\n 'protocol': prot.attributes['scheme']}\n if replica['scope'].vo != 'def':\n payload['vo'] = replica['scope'].vo\n add_message('deletion-done', payload)\n deleted_replicas.append(replica)\n except SourceNotFound:\n err_msg = 'Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse)\n logging.warning(err_msg)\n deleted_replicas.append(replica)\n except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:\n err_msg = 'Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse, str(error))\n logging.warning(err_msg)\n payload = {'scope': scope,\n 'name': replica['name'],\n 'rse': rse,\n 'rse_id': rse_id,\n 'file-size': replica['bytes'] or 0,\n 'bytes': replica['bytes'] or 0,\n 'url': pfn,\n 'reason': str(error),\n 'protocol': prot.attributes['scheme']}\n if replica['scope'].vo != 'def':\n payload['vo'] = replica['scope'].vo\n add_message('deletion-failed', payload)\n\n except Exception:\n logging.critical(traceback.format_exc())\n finally:\n prot.close()\n\n delete_quarantined_replicas(rse_id=rse_id, replicas=deleted_replicas)\n\n if once:\n break\n\n if once:\n break\n\n if nothing_to_do:\n logging.info('Dark Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)\n time.sleep(60)\n\n except DatabaseException as error:\n logging.warning('Reaper: %s', str(error))\n except Exception:\n logging.critical(traceback.format_exc())\n\n die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)\n logging.info('Graceful stop requested')\n logging.info('Graceful stop done')\n return\n\n\ndef stop(signum=None, frame=None):\n \"\"\"\n Graceful exit.\n \"\"\"\n GRACEFUL_STOP.set()\n\n\ndef run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,\n exclude_rses=None, include_rses=None, vos=None, delay_seconds=0):\n \"\"\"\n Starts up the reaper threads.\n\n :param total_workers: The total number of workers.\n :param chunk_size: the size of chunk for deletion.\n :param once: If True, only runs one iteration of the main loop.\n :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs (Single-VO only).\n :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.\n :param exclude_rses: RSE expression to exclude RSEs from the Reaper.\n :param include_rses: RSE expression to include RSEs.\n :param vos: VOs on which to look for RSEs. Only used in multi-VO mode.\n If None, we either use all VOs if run from \"def\", or the current VO otherwise.\n \"\"\"\n if rucio.db.sqla.util.is_old_db():\n raise exception.DatabaseException('Database was not updated, daemon won\\'t start')\n\n logging.info('main: starting processes')\n\n multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)\n if not multi_vo:\n if vos:\n logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.')\n vos = ['def']\n else:\n if vos:\n invalid = set(vos) - set([v['vo'] for v in list_vos()])\n if invalid:\n msg = 'VO{} {} cannot be found'.format('s' if len(invalid) > 1 else '', ', '.join([repr(v) for v in invalid]))\n raise VONotFound(msg)\n else:\n vos = [v['vo'] for v in list_vos()]\n logging.info('Dark Reaper: This instance will work on VO%s: %s' % ('s' if len(vos) > 1 else '', ', '.join([v for v in vos])))\n\n all_rses = []\n for vo in vos:\n all_rses.extend(list_rses(filters={'vo': vo}))\n\n if rses:\n invalid = set(rses) - set([rse['rse'] for rse in all_rses])\n if invalid:\n msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',\n ', '.join([repr(rse) for rse in invalid]))\n raise RSENotFound(msg)\n rses = [rse for rse in all_rses if rse['rse'] in rses]\n else:\n rses = all_rses\n\n if exclude_rses:\n excluded_rses = parse_expression(exclude_rses)\n rses = [rse for rse in rses if rse not in excluded_rses]\n\n if include_rses:\n included_rses = parse_expression(include_rses)\n rses = [rse for rse in rses if rse in included_rses]\n\n if not rses:\n logging.error('Dark Reaper: No RSEs found. Exiting.')\n return\n\n threads = []\n for worker in range(total_workers):\n kwargs = {'worker_number': worker,\n 'total_workers': total_workers,\n 'rses': rses,\n 'once': once,\n 'chunk_size': chunk_size,\n 'scheme': scheme}\n threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))\n [t.start() for t in threads]\n while threads[0].is_alive():\n [t.join(timeout=3.14) for t in threads]\n", "path": "lib/rucio/daemons/reaper/dark_reaper.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2016-2021 CERN\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2016-2018\n# - Martin Barisits <[email protected]>, 2016\n# - Thomas Beermann <[email protected]>, 2016-2019\n# - Hannes Hansen <[email protected]>, 2018-2019\n# - Andrew Lister <[email protected]>, 2019\n# - Cedric Serfon <[email protected]>, 2020\n# - Brandon White <[email protected]>, 2019\n# - Patrick Austin <[email protected]>, 2020\n# - Benedikt Ziemons <[email protected]>, 2020\n# - Dimitrios Christidis <[email protected]>, 2020-2021\n\n'''\nDark Reaper is a daemon to manage quarantined file deletion.\n'''\n\nimport hashlib\nimport logging\nimport os\nimport random\nimport socket\nimport sys\nimport threading\nimport time\nimport traceback\n\nimport rucio.db.sqla.util\nfrom rucio.common import exception\nfrom rucio.common.config import config_get, config_get_bool\nfrom rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,\n RSEAccessDenied, ResourceTemporaryUnavailable,\n RSENotFound, VONotFound)\nfrom rucio.core.heartbeat import live, die, sanity_check\nfrom rucio.core.message import add_message\nfrom rucio.core.quarantined_replica import (list_quarantined_replicas,\n delete_quarantined_replicas,\n list_rses)\nfrom rucio.core.rse_expression_parser import parse_expression\nfrom rucio.core.vo import list_vos\nfrom rucio.rse import rsemanager as rsemgr\n\nlogging.getLogger(\"requests\").setLevel(logging.CRITICAL)\n\nlogging.basicConfig(stream=sys.stdout,\n level=getattr(logging,\n config_get('common', 'loglevel',\n raise_exception=False,\n default='DEBUG').upper()),\n format='%(asctime)s\\t%(process)d\\t%(levelname)s\\t%(message)s')\n\nGRACEFUL_STOP = threading.Event()\n\n\ndef reaper(rses=[], worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None):\n \"\"\"\n Main loop to select and delete files.\n\n :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.\n :param worker_number: The worker number.\n :param total_workers: The total number of workers.\n :param chunk_size: the size of chunk for deletion.\n :param once: If True, only runs one iteration of the main loop.\n :param scheme: Force the reaper to use a particular protocol, e.g., mock.\n \"\"\"\n logging.info('Starting Dark Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, ', '.join(rses))\n\n pid = os.getpid()\n thread = threading.current_thread()\n hostname = socket.gethostname()\n executable = ' '.join(sys.argv)\n hash_executable = hashlib.sha256((sys.argv[0] + ''.join(rses)).encode()).hexdigest()\n sanity_check(executable=None, hostname=hostname)\n\n while not GRACEFUL_STOP.is_set():\n try:\n # heartbeat\n heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)\n logging.info('Dark Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals()))\n nothing_to_do = True\n\n random.shuffle(rses)\n for rse_id in rses:\n replicas = list_quarantined_replicas(rse_id=rse_id,\n limit=chunk_size, worker_number=worker_number,\n total_workers=total_workers)\n\n rse_info = rsemgr.get_rse_info(rse_id=rse_id)\n rse = rse_info['rse']\n prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)\n deleted_replicas = []\n try:\n prot.connect()\n for replica in replicas:\n nothing_to_do = False\n scope = ''\n if replica['scope']:\n scope = replica['scope'].external\n try:\n pfn = str(list(rsemgr.lfns2pfns(rse_settings=rse_info,\n lfns=[{'scope': scope,\n 'name': replica['name'],\n 'path': replica['path']}],\n operation='delete',\n scheme=scheme).values())[0])\n logging.info('Dark Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, scope, replica['name'], pfn, rse)\n start = time.time()\n prot.delete(pfn)\n duration = time.time() - start\n logging.info('Dark Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, scope, replica['name'], pfn, rse, duration)\n payload = {'scope': scope,\n 'name': replica['name'],\n 'rse': rse,\n 'rse_id': rse_id,\n 'file-size': replica.get('bytes') or 0,\n 'bytes': replica.get('bytes') or 0,\n 'url': pfn,\n 'duration': duration,\n 'protocol': prot.attributes['scheme']}\n if replica['scope'].vo != 'def':\n payload['vo'] = replica['scope'].vo\n add_message('deletion-done', payload)\n deleted_replicas.append(replica)\n except SourceNotFound:\n err_msg = 'Dark Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse)\n logging.warning(err_msg)\n deleted_replicas.append(replica)\n except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:\n err_msg = 'Dark Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, scope, replica['name'], pfn, rse, str(error))\n logging.warning(err_msg)\n payload = {'scope': scope,\n 'name': replica['name'],\n 'rse': rse,\n 'rse_id': rse_id,\n 'file-size': replica['bytes'] or 0,\n 'bytes': replica['bytes'] or 0,\n 'url': pfn,\n 'reason': str(error),\n 'protocol': prot.attributes['scheme']}\n if replica['scope'].vo != 'def':\n payload['vo'] = replica['scope'].vo\n add_message('deletion-failed', payload)\n\n except Exception:\n logging.critical(traceback.format_exc())\n finally:\n prot.close()\n\n delete_quarantined_replicas(rse_id=rse_id, replicas=deleted_replicas)\n\n if once:\n break\n\n if once:\n break\n\n if nothing_to_do:\n logging.info('Dark Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers)\n time.sleep(60)\n\n except DatabaseException as error:\n logging.warning('Reaper: %s', str(error))\n except Exception:\n logging.critical(traceback.format_exc())\n\n die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)\n logging.info('Graceful stop requested')\n logging.info('Graceful stop done')\n return\n\n\ndef stop(signum=None, frame=None):\n \"\"\"\n Graceful exit.\n \"\"\"\n GRACEFUL_STOP.set()\n\n\ndef run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,\n exclude_rses=None, include_rses=None, vos=None, delay_seconds=0):\n \"\"\"\n Starts up the reaper threads.\n\n :param total_workers: The total number of workers.\n :param chunk_size: the size of chunk for deletion.\n :param once: If True, only runs one iteration of the main loop.\n :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs (Single-VO only).\n :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.\n :param exclude_rses: RSE expression to exclude RSEs from the Reaper.\n :param include_rses: RSE expression to include RSEs.\n :param vos: VOs on which to look for RSEs. Only used in multi-VO mode.\n If None, we either use all VOs if run from \"def\", or the current VO otherwise.\n \"\"\"\n if rucio.db.sqla.util.is_old_db():\n raise exception.DatabaseException('Database was not updated, daemon won\\'t start')\n\n logging.info('main: starting processes')\n\n multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)\n if not multi_vo:\n if vos:\n logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.')\n vos = ['def']\n else:\n if vos:\n invalid = set(vos) - set([v['vo'] for v in list_vos()])\n if invalid:\n msg = 'VO{} {} cannot be found'.format('s' if len(invalid) > 1 else '', ', '.join([repr(v) for v in invalid]))\n raise VONotFound(msg)\n else:\n vos = [v['vo'] for v in list_vos()]\n logging.info('Dark Reaper: This instance will work on VO%s: %s' % ('s' if len(vos) > 1 else '', ', '.join([v for v in vos])))\n\n all_rses = []\n for vo in vos:\n all_rses.extend(list_rses(filters={'vo': vo}))\n\n if rses:\n invalid = set(rses) - set([rse['rse'] for rse in all_rses])\n if invalid:\n msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',\n ', '.join([repr(rse) for rse in invalid]))\n raise RSENotFound(msg)\n rses = [rse for rse in all_rses if rse['rse'] in rses]\n else:\n rses = all_rses\n\n if exclude_rses:\n excluded_rses = parse_expression(exclude_rses)\n rses = [rse for rse in rses if rse not in excluded_rses]\n\n if include_rses:\n included_rses = parse_expression(include_rses)\n rses = [rse for rse in rses if rse in included_rses]\n\n if not rses:\n logging.error('Dark Reaper: No RSEs found. Exiting.')\n return\n\n threads = []\n for worker in range(total_workers):\n kwargs = {'worker_number': worker,\n 'total_workers': total_workers,\n 'rses': rses,\n 'once': once,\n 'chunk_size': chunk_size,\n 'scheme': scheme}\n threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))\n [t.start() for t in threads]\n while threads[0].is_alive():\n [t.join(timeout=3.14) for t in threads]\n", "path": "lib/rucio/daemons/reaper/dark_reaper.py"}]}
| 3,963 | 617 |
gh_patches_debug_22977
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-410
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve documentation of cms.apps (CmsConfig)
Explain what cms.apps is doing, what CmsConfig is for and add docstrings of the following format:
```
"""
[Summary]
:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]
:type [ParamName]: [ParamType](, optional)
...
:raises [ErrorType]: [ErrorDescription]
...
:return: [ReturnDescription]
:rtype: [ReturnType]
"""
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/apps.py`
Content:
```
1 """
2 Django related class representing a config of an app
3 """
4 import logging
5 import sys
6 from django.conf import settings
7 from django.apps import AppConfig
8
9 logger = logging.getLogger(__name__)
10
11 class CmsConfig(AppConfig):
12 """
13 Class inheriting the django AppConfig
14 """
15
16 name = 'cms'
17
18 def ready(self):
19 if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:
20 logger.error("You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!")
21 sys.exit(1)
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cms/apps.py b/src/cms/apps.py
--- a/src/cms/apps.py
+++ b/src/cms/apps.py
@@ -1,6 +1,3 @@
-"""
-Django related class representing a config of an app
-"""
import logging
import sys
from django.conf import settings
@@ -10,12 +7,23 @@
class CmsConfig(AppConfig):
"""
- Class inheriting the django AppConfig
+ This class represents the Django-configuration of the backend.
+
+ See :class:`django.apps.AppConfig` for more information.
+
+ :param name: The name of the app
+ :type name: str
"""
name = 'cms'
def ready(self):
+ """
+ This function gets executed exactly once each time the cms starts. We use it to check wether the secret key was
+ not changed in production mode and show an error message if this is the case.
+
+ See :meth:`django.apps.AppConfig.ready` for more information.
+ """
if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:
logger.error("You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!")
sys.exit(1)
|
{"golden_diff": "diff --git a/src/cms/apps.py b/src/cms/apps.py\n--- a/src/cms/apps.py\n+++ b/src/cms/apps.py\n@@ -1,6 +1,3 @@\n-\"\"\"\n-Django related class representing a config of an app\n-\"\"\"\n import logging\n import sys\n from django.conf import settings\n@@ -10,12 +7,23 @@\n \n class CmsConfig(AppConfig):\n \"\"\"\n- Class inheriting the django AppConfig\n+ This class represents the Django-configuration of the backend.\n+\n+ See :class:`django.apps.AppConfig` for more information.\n+\n+ :param name: The name of the app\n+ :type name: str\n \"\"\"\n \n name = 'cms'\n \n def ready(self):\n+ \"\"\"\n+ This function gets executed exactly once each time the cms starts. We use it to check wether the secret key was\n+ not changed in production mode and show an error message if this is the case.\n+\n+ See :meth:`django.apps.AppConfig.ready` for more information.\n+ \"\"\"\n if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:\n logger.error(\"You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!\")\n sys.exit(1)\n", "issue": "Improve documentation of cms.apps (CmsConfig)\nExplain what cms.apps is doing, what CmsConfig is for and add docstrings of the following format:\r\n```\r\n\"\"\"\r\n[Summary]\r\n\r\n:param [ParamName]: [ParamDescription], defaults to [DefaultParamVal]\r\n:type [ParamName]: [ParamType](, optional)\r\n...\r\n:raises [ErrorType]: [ErrorDescription]\r\n...\r\n:return: [ReturnDescription]\r\n:rtype: [ReturnType]\r\n\"\"\"\r\n```\n", "before_files": [{"content": "\"\"\"\nDjango related class representing a config of an app\n\"\"\"\nimport logging\nimport sys\nfrom django.conf import settings\nfrom django.apps import AppConfig\n\nlogger = logging.getLogger(__name__)\n\nclass CmsConfig(AppConfig):\n \"\"\"\n Class inheriting the django AppConfig\n \"\"\"\n\n name = 'cms'\n\n def ready(self):\n if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:\n logger.error(\"You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!\")\n sys.exit(1)\n", "path": "src/cms/apps.py"}], "after_files": [{"content": "import logging\nimport sys\nfrom django.conf import settings\nfrom django.apps import AppConfig\n\nlogger = logging.getLogger(__name__)\n\nclass CmsConfig(AppConfig):\n \"\"\"\n This class represents the Django-configuration of the backend.\n\n See :class:`django.apps.AppConfig` for more information.\n\n :param name: The name of the app\n :type name: str\n \"\"\"\n\n name = 'cms'\n\n def ready(self):\n \"\"\"\n This function gets executed exactly once each time the cms starts. We use it to check wether the secret key was\n not changed in production mode and show an error message if this is the case.\n\n See :meth:`django.apps.AppConfig.ready` for more information.\n \"\"\"\n if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:\n logger.error(\"You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!\")\n sys.exit(1)\n", "path": "src/cms/apps.py"}]}
| 542 | 312 |
gh_patches_debug_19895
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-57
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Schema getter should return public, but not mathesar_types
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
If a user wants to create a table the `public` schema, they can't currently, because the logic in the `db.schemas.get_all_schemas` function ignores it. This means when they try, an error is thrown. This is especially a problem when they've imported a DB, since most tables are in the `public` schema in most installations of PostgreSQL in the wild.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
The public schema should be available for holding mathesar tables.
**To Reproduce**
Please try to provide a [Minimal, Complete, and Verifiable](http://stackoverflow.com/help/mcve) example.
Start the webapp using the README. Try to upload a CSV to the `public` schema. See the error.
**Have a nice day!**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/schemas.py`
Content:
```
1 from sqlalchemy.schema import CreateSchema
2 from sqlalchemy import inspect
3
4
5 def get_all_schemas(engine):
6 inspector = inspect(engine)
7 return [
8 schema
9 for schema in inspector.get_schema_names()
10 if schema not in ["public", "information_schema"]
11 ]
12
13
14 def schema_exists(schema, engine):
15 return schema in get_all_schemas(engine)
16
17
18 def create_schema(schema, engine):
19 """
20 This method creates a Postgres schema.
21 """
22 if not schema_exists(schema, engine):
23 with engine.begin() as connection:
24 connection.execute(CreateSchema(schema))
25
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/schemas.py b/db/schemas.py
--- a/db/schemas.py
+++ b/db/schemas.py
@@ -1,24 +1,28 @@
from sqlalchemy.schema import CreateSchema
from sqlalchemy import inspect
+from db import types
-def get_all_schemas(engine):
- inspector = inspect(engine)
+TYPES_SCHEMA = types.base.SCHEMA
+
+
+def get_mathesar_schemas(engine):
return [
schema
- for schema in inspector.get_schema_names()
- if schema not in ["public", "information_schema"]
+ for schema in get_all_schemas(engine)
+ if schema not in [TYPES_SCHEMA, "information_schema"]
]
-def schema_exists(schema, engine):
- return schema in get_all_schemas(engine)
+def get_all_schemas(engine):
+ inspector = inspect(engine)
+ return inspector.get_schema_names()
def create_schema(schema, engine):
"""
This method creates a Postgres schema.
"""
- if not schema_exists(schema, engine):
+ if schema not in get_all_schemas(engine):
with engine.begin() as connection:
connection.execute(CreateSchema(schema))
|
{"golden_diff": "diff --git a/db/schemas.py b/db/schemas.py\n--- a/db/schemas.py\n+++ b/db/schemas.py\n@@ -1,24 +1,28 @@\n from sqlalchemy.schema import CreateSchema\n from sqlalchemy import inspect\n \n+from db import types\n \n-def get_all_schemas(engine):\n- inspector = inspect(engine)\n+TYPES_SCHEMA = types.base.SCHEMA\n+\n+\n+def get_mathesar_schemas(engine):\n return [\n schema\n- for schema in inspector.get_schema_names()\n- if schema not in [\"public\", \"information_schema\"]\n+ for schema in get_all_schemas(engine)\n+ if schema not in [TYPES_SCHEMA, \"information_schema\"]\n ]\n \n \n-def schema_exists(schema, engine):\n- return schema in get_all_schemas(engine)\n+def get_all_schemas(engine):\n+ inspector = inspect(engine)\n+ return inspector.get_schema_names()\n \n \n def create_schema(schema, engine):\n \"\"\"\n This method creates a Postgres schema.\n \"\"\"\n- if not schema_exists(schema, engine):\n+ if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n", "issue": "Schema getter should return public, but not mathesar_types\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nIf a user wants to create a table the `public` schema, they can't currently, because the logic in the `db.schemas.get_all_schemas` function ignores it. This means when they try, an error is thrown. This is especially a problem when they've imported a DB, since most tables are in the `public` schema in most installations of PostgreSQL in the wild.\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe public schema should be available for holding mathesar tables.\r\n\r\n**To Reproduce**\r\nPlease try to provide a [Minimal, Complete, and Verifiable](http://stackoverflow.com/help/mcve) example.\r\n\r\nStart the webapp using the README. Try to upload a CSV to the `public` schema. See the error.\r\n\r\n**Have a nice day!**\r\n\n", "before_files": [{"content": "from sqlalchemy.schema import CreateSchema\nfrom sqlalchemy import inspect\n\n\ndef get_all_schemas(engine):\n inspector = inspect(engine)\n return [\n schema\n for schema in inspector.get_schema_names()\n if schema not in [\"public\", \"information_schema\"]\n ]\n\n\ndef schema_exists(schema, engine):\n return schema in get_all_schemas(engine)\n\n\ndef create_schema(schema, engine):\n \"\"\"\n This method creates a Postgres schema.\n \"\"\"\n if not schema_exists(schema, engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n", "path": "db/schemas.py"}], "after_files": [{"content": "from sqlalchemy.schema import CreateSchema\nfrom sqlalchemy import inspect\n\nfrom db import types\n\nTYPES_SCHEMA = types.base.SCHEMA\n\n\ndef get_mathesar_schemas(engine):\n return [\n schema\n for schema in get_all_schemas(engine)\n if schema not in [TYPES_SCHEMA, \"information_schema\"]\n ]\n\n\ndef get_all_schemas(engine):\n inspector = inspect(engine)\n return inspector.get_schema_names()\n\n\ndef create_schema(schema, engine):\n \"\"\"\n This method creates a Postgres schema.\n \"\"\"\n if schema not in get_all_schemas(engine):\n with engine.begin() as connection:\n connection.execute(CreateSchema(schema))\n", "path": "db/schemas.py"}]}
| 626 | 248 |
gh_patches_debug_30876
|
rasdani/github-patches
|
git_diff
|
google__jax-12203
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pure_callback passes jax.DeviceArray to the callback on CPU when not jitted
### Description
As title.
This only happens when the callback is executed outside of jit boundaries.
This is different from the documented behaviour.
```python
➜ python
Python 3.10.6 (main, Aug 23 2022, 11:35:18) [Clang 13.1.6 (clang-1316.0.21.2.5)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import jax
>>> def test(x):
... print(type(x))
... return x
...
>>> def f(x):
... return jax.pure_callback(test, x, x)
...
>>> x= jax.numpy.ones(3)
>>> f(x)
<class 'jaxlib.xla_extension.DeviceArray'>
DeviceArray([1., 1., 1.], dtype=float32)
>>> jax.jit(f)(x)
<class 'numpy.ndarray'>
DeviceArray([1., 1., 1.], dtype=float32)
```
### What jax/jaxlib version are you using?
jax 0.3.17
### Which accelerator(s) are you using?
CPU
### Additional System Info
MacOs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jax/_src/callback.py`
Content:
```
1 # Copyright 2022 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Module for JAX callbacks."""
15 from __future__ import annotations
16
17 from typing import Any, Callable, Sequence
18
19 from jax import core
20 from jax import tree_util
21 from jax._src import dtypes
22 from jax._src import lib as jaxlib
23 from jax._src import util
24 from jax.interpreters import ad
25 from jax.interpreters import batching
26 from jax.interpreters import mlir
27 import numpy as np
28
29 # `pure_callback_p` is the main primitive for staging out Python pure callbacks.
30 pure_callback_p = core.Primitive("pure_callback")
31 pure_callback_p.multiple_results = True
32
33 map, unsafe_map = util.safe_map, map
34
35
36 @pure_callback_p.def_impl
37 def pure_callback_impl(*args, result_avals, callback: Callable[..., Any],
38 vectorized: bool):
39 del vectorized, result_avals
40 return callback(*args)
41
42
43 @pure_callback_p.def_abstract_eval
44 def pure_callback_abstract_eval(*avals, callback: Callable[..., Any],
45 result_avals, vectorized: bool):
46 del avals, callback, vectorized
47 return result_avals
48
49
50 def pure_callback_jvp_rule(*args, **kwargs):
51 del args, kwargs
52 raise ValueError(
53 "Pure callbacks do not support JVP. "
54 "Please use `jax.custom_jvp` to use callbacks while taking gradients.")
55
56
57 ad.primitive_jvps[pure_callback_p] = pure_callback_jvp_rule
58
59
60 def pure_callback_transpose_rule(*args, **kwargs):
61 del args, kwargs
62 raise ValueError(
63 "Pure callbacks do not support transpose. "
64 "Please use `jax.custom_vjp` to use callbacks while taking gradients.")
65
66 ad.primitive_transposes[pure_callback_p] = pure_callback_transpose_rule
67
68
69 def pure_callback_batching_rule(args, dims, *, callback, vectorized: bool,
70 result_avals: Sequence[core.ShapedArray]):
71 axis_size = next(a.shape[0] for a, d in zip(args, dims)
72 if d is not batching.not_mapped)
73 new_args = [arg if dim is batching.not_mapped else
74 batching.moveaxis(arg, dim, 0) for arg, dim in zip(args, dims)]
75 if vectorized:
76 result_avals = tuple(
77 core.unmapped_aval(axis_size, core.no_axis_name, 0, aval) # type: ignore
78 for aval in result_avals)
79 outvals = pure_callback_p.bind(
80 *new_args, callback=callback, vectorized=vectorized,
81 result_avals=result_avals)
82 else:
83 is_batched = [d is not batching.not_mapped for d in dims]
84 unbatched_args, batched_args = util.partition_list(is_batched, new_args)
85 def _batch_fun(*batched_args):
86 merged_args = util.merge_lists(is_batched, unbatched_args, batched_args)
87 return pure_callback_p.bind(
88 *merged_args, callback=callback, result_avals=result_avals,
89 vectorized=vectorized)
90 from jax._src.lax.control_flow import map as lax_map
91 outvals = lax_map(_batch_fun, *batched_args)
92 return tuple(outvals), (0,) * len(outvals)
93
94
95 batching.primitive_batchers[pure_callback_p] = pure_callback_batching_rule
96
97
98 def pure_callback_lowering(ctx, *args, callback, **params):
99
100 if ctx.module_context.platform == "TPU" and jaxlib.version < (0, 3, 15):
101 raise NotImplementedError("Pure callbacks on TPU not supported. "
102 "Please upgrade to a jaxlib >= 0.3.15.")
103
104 def _callback(*flat_args):
105 return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params))
106
107 result, _, keepalive = mlir.emit_python_callback(
108 ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,
109 sharding=None)
110 ctx.module_context.add_keepalive(keepalive)
111 return result
112
113 mlir.register_lowering(pure_callback_p, pure_callback_lowering)
114
115 def _check_shape_dtype(shape_dtype):
116 dt = np.dtype(shape_dtype.dtype)
117 if dtypes.canonicalize_dtype(dt) != dt:
118 raise ValueError(
119 "Cannot return 64-bit values when `jax_enable_x64` is disabled")
120
121 def pure_callback(callback: Callable[..., Any], result_shape_dtypes: Any,
122 *args: Any, vectorized: bool = False, **kwargs: Any):
123 def _flat_callback(*flat_args):
124 args, kwargs = tree_util.tree_unflatten(in_tree, flat_args)
125 return tree_util.tree_leaves(callback(*args, **kwargs))
126
127 flat_args, in_tree = tree_util.tree_flatten((args, kwargs))
128 tree_util.tree_map(_check_shape_dtype, result_shape_dtypes)
129 result_avals = tree_util.tree_map(
130 lambda x: core.ShapedArray(x.shape, x.dtype), result_shape_dtypes)
131 flat_result_avals, out_tree = tree_util.tree_flatten(result_avals)
132 out_flat = pure_callback_p.bind(
133 *flat_args, callback=_flat_callback,
134 result_avals=tuple(flat_result_avals), vectorized=vectorized)
135 return tree_util.tree_unflatten(out_tree, out_flat)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/jax/_src/callback.py b/jax/_src/callback.py
--- a/jax/_src/callback.py
+++ b/jax/_src/callback.py
@@ -14,6 +14,8 @@
"""Module for JAX callbacks."""
from __future__ import annotations
+import functools
+
from typing import Any, Callable, Sequence
from jax import core
@@ -21,6 +23,7 @@
from jax._src import dtypes
from jax._src import lib as jaxlib
from jax._src import util
+from jax._src import dispatch
from jax.interpreters import ad
from jax.interpreters import batching
from jax.interpreters import mlir
@@ -33,11 +36,12 @@
map, unsafe_map = util.safe_map, map
-@pure_callback_p.def_impl
def pure_callback_impl(*args, result_avals, callback: Callable[..., Any],
vectorized: bool):
del vectorized, result_avals
return callback(*args)
+pure_callback_p.def_impl(functools.partial(dispatch.apply_primitive,
+ pure_callback_p))
@pure_callback_p.def_abstract_eval
@@ -102,7 +106,7 @@
"Please upgrade to a jaxlib >= 0.3.15.")
def _callback(*flat_args):
- return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params))
+ return tuple(pure_callback_impl(*flat_args, callback=callback, **params))
result, _, keepalive = mlir.emit_python_callback(
ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,
|
{"golden_diff": "diff --git a/jax/_src/callback.py b/jax/_src/callback.py\n--- a/jax/_src/callback.py\n+++ b/jax/_src/callback.py\n@@ -14,6 +14,8 @@\n \"\"\"Module for JAX callbacks.\"\"\"\n from __future__ import annotations\n \n+import functools\n+\n from typing import Any, Callable, Sequence\n \n from jax import core\n@@ -21,6 +23,7 @@\n from jax._src import dtypes\n from jax._src import lib as jaxlib\n from jax._src import util\n+from jax._src import dispatch\n from jax.interpreters import ad\n from jax.interpreters import batching\n from jax.interpreters import mlir\n@@ -33,11 +36,12 @@\n map, unsafe_map = util.safe_map, map\n \n \n-@pure_callback_p.def_impl\n def pure_callback_impl(*args, result_avals, callback: Callable[..., Any],\n vectorized: bool):\n del vectorized, result_avals\n return callback(*args)\n+pure_callback_p.def_impl(functools.partial(dispatch.apply_primitive,\n+ pure_callback_p))\n \n \n @pure_callback_p.def_abstract_eval\n@@ -102,7 +106,7 @@\n \"Please upgrade to a jaxlib >= 0.3.15.\")\n \n def _callback(*flat_args):\n- return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params))\n+ return tuple(pure_callback_impl(*flat_args, callback=callback, **params))\n \n result, _, keepalive = mlir.emit_python_callback(\n ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,\n", "issue": "pure_callback passes jax.DeviceArray to the callback on CPU when not jitted\n### Description\r\n\r\nAs title. \r\nThis only happens when the callback is executed outside of jit boundaries.\r\n\r\nThis is different from the documented behaviour.\r\n\r\n```python\r\n\u279c python\r\nPython 3.10.6 (main, Aug 23 2022, 11:35:18) [Clang 13.1.6 (clang-1316.0.21.2.5)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import jax\r\n>>> def test(x):\r\n... print(type(x))\r\n... return x\r\n...\r\n>>> def f(x):\r\n... return jax.pure_callback(test, x, x)\r\n...\r\n>>> x= jax.numpy.ones(3)\r\n>>> f(x)\r\n<class 'jaxlib.xla_extension.DeviceArray'>\r\nDeviceArray([1., 1., 1.], dtype=float32)\r\n>>> jax.jit(f)(x)\r\n<class 'numpy.ndarray'>\r\nDeviceArray([1., 1., 1.], dtype=float32)\r\n```\r\n\r\n### What jax/jaxlib version are you using?\r\n\r\njax 0.3.17\r\n\r\n### Which accelerator(s) are you using?\r\n\r\nCPU\r\n\r\n### Additional System Info\r\n\r\nMacOs\n", "before_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module for JAX callbacks.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any, Callable, Sequence\n\nfrom jax import core\nfrom jax import tree_util\nfrom jax._src import dtypes\nfrom jax._src import lib as jaxlib\nfrom jax._src import util\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import mlir\nimport numpy as np\n\n# `pure_callback_p` is the main primitive for staging out Python pure callbacks.\npure_callback_p = core.Primitive(\"pure_callback\")\npure_callback_p.multiple_results = True\n\nmap, unsafe_map = util.safe_map, map\n\n\n@pure_callback_p.def_impl\ndef pure_callback_impl(*args, result_avals, callback: Callable[..., Any],\n vectorized: bool):\n del vectorized, result_avals\n return callback(*args)\n\n\n@pure_callback_p.def_abstract_eval\ndef pure_callback_abstract_eval(*avals, callback: Callable[..., Any],\n result_avals, vectorized: bool):\n del avals, callback, vectorized\n return result_avals\n\n\ndef pure_callback_jvp_rule(*args, **kwargs):\n del args, kwargs\n raise ValueError(\n \"Pure callbacks do not support JVP. \"\n \"Please use `jax.custom_jvp` to use callbacks while taking gradients.\")\n\n\nad.primitive_jvps[pure_callback_p] = pure_callback_jvp_rule\n\n\ndef pure_callback_transpose_rule(*args, **kwargs):\n del args, kwargs\n raise ValueError(\n \"Pure callbacks do not support transpose. \"\n \"Please use `jax.custom_vjp` to use callbacks while taking gradients.\")\n\nad.primitive_transposes[pure_callback_p] = pure_callback_transpose_rule\n\n\ndef pure_callback_batching_rule(args, dims, *, callback, vectorized: bool,\n result_avals: Sequence[core.ShapedArray]):\n axis_size = next(a.shape[0] for a, d in zip(args, dims)\n if d is not batching.not_mapped)\n new_args = [arg if dim is batching.not_mapped else\n batching.moveaxis(arg, dim, 0) for arg, dim in zip(args, dims)]\n if vectorized:\n result_avals = tuple(\n core.unmapped_aval(axis_size, core.no_axis_name, 0, aval) # type: ignore\n for aval in result_avals)\n outvals = pure_callback_p.bind(\n *new_args, callback=callback, vectorized=vectorized,\n result_avals=result_avals)\n else:\n is_batched = [d is not batching.not_mapped for d in dims]\n unbatched_args, batched_args = util.partition_list(is_batched, new_args)\n def _batch_fun(*batched_args):\n merged_args = util.merge_lists(is_batched, unbatched_args, batched_args)\n return pure_callback_p.bind(\n *merged_args, callback=callback, result_avals=result_avals,\n vectorized=vectorized)\n from jax._src.lax.control_flow import map as lax_map\n outvals = lax_map(_batch_fun, *batched_args)\n return tuple(outvals), (0,) * len(outvals)\n\n\nbatching.primitive_batchers[pure_callback_p] = pure_callback_batching_rule\n\n\ndef pure_callback_lowering(ctx, *args, callback, **params):\n\n if ctx.module_context.platform == \"TPU\" and jaxlib.version < (0, 3, 15):\n raise NotImplementedError(\"Pure callbacks on TPU not supported. \"\n \"Please upgrade to a jaxlib >= 0.3.15.\")\n\n def _callback(*flat_args):\n return tuple(pure_callback_p.impl(*flat_args, callback=callback, **params))\n\n result, _, keepalive = mlir.emit_python_callback(\n ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,\n sharding=None)\n ctx.module_context.add_keepalive(keepalive)\n return result\n\nmlir.register_lowering(pure_callback_p, pure_callback_lowering)\n\ndef _check_shape_dtype(shape_dtype):\n dt = np.dtype(shape_dtype.dtype)\n if dtypes.canonicalize_dtype(dt) != dt:\n raise ValueError(\n \"Cannot return 64-bit values when `jax_enable_x64` is disabled\")\n\ndef pure_callback(callback: Callable[..., Any], result_shape_dtypes: Any,\n *args: Any, vectorized: bool = False, **kwargs: Any):\n def _flat_callback(*flat_args):\n args, kwargs = tree_util.tree_unflatten(in_tree, flat_args)\n return tree_util.tree_leaves(callback(*args, **kwargs))\n\n flat_args, in_tree = tree_util.tree_flatten((args, kwargs))\n tree_util.tree_map(_check_shape_dtype, result_shape_dtypes)\n result_avals = tree_util.tree_map(\n lambda x: core.ShapedArray(x.shape, x.dtype), result_shape_dtypes)\n flat_result_avals, out_tree = tree_util.tree_flatten(result_avals)\n out_flat = pure_callback_p.bind(\n *flat_args, callback=_flat_callback,\n result_avals=tuple(flat_result_avals), vectorized=vectorized)\n return tree_util.tree_unflatten(out_tree, out_flat)\n", "path": "jax/_src/callback.py"}], "after_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Module for JAX callbacks.\"\"\"\nfrom __future__ import annotations\n\nimport functools\n\nfrom typing import Any, Callable, Sequence\n\nfrom jax import core\nfrom jax import tree_util\nfrom jax._src import dtypes\nfrom jax._src import lib as jaxlib\nfrom jax._src import util\nfrom jax._src import dispatch\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import mlir\nimport numpy as np\n\n# `pure_callback_p` is the main primitive for staging out Python pure callbacks.\npure_callback_p = core.Primitive(\"pure_callback\")\npure_callback_p.multiple_results = True\n\nmap, unsafe_map = util.safe_map, map\n\n\ndef pure_callback_impl(*args, result_avals, callback: Callable[..., Any],\n vectorized: bool):\n del vectorized, result_avals\n return callback(*args)\npure_callback_p.def_impl(functools.partial(dispatch.apply_primitive,\n pure_callback_p))\n\n\n@pure_callback_p.def_abstract_eval\ndef pure_callback_abstract_eval(*avals, callback: Callable[..., Any],\n result_avals, vectorized: bool):\n del avals, callback, vectorized\n return result_avals\n\n\ndef pure_callback_jvp_rule(*args, **kwargs):\n del args, kwargs\n raise ValueError(\n \"Pure callbacks do not support JVP. \"\n \"Please use `jax.custom_jvp` to use callbacks while taking gradients.\")\n\n\nad.primitive_jvps[pure_callback_p] = pure_callback_jvp_rule\n\n\ndef pure_callback_transpose_rule(*args, **kwargs):\n del args, kwargs\n raise ValueError(\n \"Pure callbacks do not support transpose. \"\n \"Please use `jax.custom_vjp` to use callbacks while taking gradients.\")\n\nad.primitive_transposes[pure_callback_p] = pure_callback_transpose_rule\n\n\ndef pure_callback_batching_rule(args, dims, *, callback, vectorized: bool,\n result_avals: Sequence[core.ShapedArray]):\n axis_size = next(a.shape[0] for a, d in zip(args, dims)\n if d is not batching.not_mapped)\n new_args = [arg if dim is batching.not_mapped else\n batching.moveaxis(arg, dim, 0) for arg, dim in zip(args, dims)]\n if vectorized:\n result_avals = tuple(\n core.unmapped_aval(axis_size, core.no_axis_name, 0, aval) # type: ignore\n for aval in result_avals)\n outvals = pure_callback_p.bind(\n *new_args, callback=callback, vectorized=vectorized,\n result_avals=result_avals)\n else:\n is_batched = [d is not batching.not_mapped for d in dims]\n unbatched_args, batched_args = util.partition_list(is_batched, new_args)\n def _batch_fun(*batched_args):\n merged_args = util.merge_lists(is_batched, unbatched_args, batched_args)\n return pure_callback_p.bind(\n *merged_args, callback=callback, result_avals=result_avals,\n vectorized=vectorized)\n from jax._src.lax.control_flow import map as lax_map\n outvals = lax_map(_batch_fun, *batched_args)\n return tuple(outvals), (0,) * len(outvals)\n\n\nbatching.primitive_batchers[pure_callback_p] = pure_callback_batching_rule\n\n\ndef pure_callback_lowering(ctx, *args, callback, **params):\n\n if ctx.module_context.platform == \"TPU\" and jaxlib.version < (0, 3, 15):\n raise NotImplementedError(\"Pure callbacks on TPU not supported. \"\n \"Please upgrade to a jaxlib >= 0.3.15.\")\n\n def _callback(*flat_args):\n return tuple(pure_callback_impl(*flat_args, callback=callback, **params))\n\n result, _, keepalive = mlir.emit_python_callback(\n ctx, _callback, None, list(args), ctx.avals_in, ctx.avals_out, False,\n sharding=None)\n ctx.module_context.add_keepalive(keepalive)\n return result\n\nmlir.register_lowering(pure_callback_p, pure_callback_lowering)\n\ndef _check_shape_dtype(shape_dtype):\n dt = np.dtype(shape_dtype.dtype)\n if dtypes.canonicalize_dtype(dt) != dt:\n raise ValueError(\n \"Cannot return 64-bit values when `jax_enable_x64` is disabled\")\n\ndef pure_callback(callback: Callable[..., Any], result_shape_dtypes: Any,\n *args: Any, vectorized: bool = False, **kwargs: Any):\n def _flat_callback(*flat_args):\n args, kwargs = tree_util.tree_unflatten(in_tree, flat_args)\n return tree_util.tree_leaves(callback(*args, **kwargs))\n\n flat_args, in_tree = tree_util.tree_flatten((args, kwargs))\n tree_util.tree_map(_check_shape_dtype, result_shape_dtypes)\n result_avals = tree_util.tree_map(\n lambda x: core.ShapedArray(x.shape, x.dtype), result_shape_dtypes)\n flat_result_avals, out_tree = tree_util.tree_flatten(result_avals)\n out_flat = pure_callback_p.bind(\n *flat_args, callback=_flat_callback,\n result_avals=tuple(flat_result_avals), vectorized=vectorized)\n return tree_util.tree_unflatten(out_tree, out_flat)\n", "path": "jax/_src/callback.py"}]}
| 2,140 | 378 |
gh_patches_debug_48322
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-1564
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pesquisa matérias legislativas
Ao preencher alguns dados no form de pesquisa de matéria legislativa e clicar em pesquisar, ocorre o seguinte:
No chrome o botão desabilita e a seta do mouse vira um sinal de proibido
no firefox ocorre o mesmo, porem mostra resultados
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sapl/crispy_layout_mixin.py`
Content:
```
1 from math import ceil
2
3 import rtyaml
4 from crispy_forms.bootstrap import FormActions
5 from crispy_forms.helper import FormHelper
6 from crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit
7 from django import template
8 from django.core.urlresolvers import reverse
9 from django.utils import formats
10 from django.utils.translation import ugettext as _
11
12
13 def heads_and_tails(list_of_lists):
14 for alist in list_of_lists:
15 yield alist[0], alist[1:]
16
17
18 def to_column(name_span):
19 fieldname, span = name_span
20 return Div(fieldname, css_class='col-md-%d' % span)
21
22
23 def to_row(names_spans):
24 return Div(*map(to_column, names_spans), css_class='row-fluid')
25
26
27 def to_fieldsets(fields):
28 for field in fields:
29 if isinstance(field, list):
30 legend, row_specs = field[0], field[1:]
31 rows = [to_row(name_span_list) for name_span_list in row_specs]
32 yield Fieldset(legend, *rows)
33 else:
34 yield field
35
36
37 def form_actions(more=[], save_label=_('Salvar')):
38 return FormActions(
39 Submit('salvar', save_label, css_class='pull-right',
40 # para impedir resubmissão do form
41 onclick='this.disabled=true;'),
42 *more)
43
44
45 class SaplFormLayout(Layout):
46
47 def __init__(self, *fields, cancel_label=_('Cancelar'),
48 save_label=_('Salvar'), actions=None):
49
50 buttons = actions
51 if not buttons:
52 buttons = form_actions(save_label=save_label, more=[
53 HTML('<a href="{{ view.cancel_url }}"'
54 ' class="btn btn-inverse">%s</a>' % cancel_label)
55 if cancel_label else None])
56
57 _fields = list(to_fieldsets(fields))
58 if buttons:
59 _fields += [to_row([(buttons, 12)])]
60 super(SaplFormLayout, self).__init__(*_fields)
61
62
63 def get_field_display(obj, fieldname):
64 field = ''
65 try:
66 field = obj._meta.get_field(fieldname)
67 except Exception as e:
68 """ nos casos que o fieldname não é um field_model,
69 ele pode ser um aggregate, annotate, um property, um manager,
70 ou mesmo uma método no model.
71 """
72 value = getattr(obj, fieldname)
73 try:
74 verbose_name = value.model._meta.verbose_name
75 except AttributeError:
76 verbose_name = ''
77
78 else:
79 verbose_name = str(field.verbose_name)\
80 if hasattr(field, 'verbose_name') else ''
81
82 if hasattr(field, 'choices') and field.choices:
83 value = getattr(obj, 'get_%s_display' % fieldname)()
84 else:
85 value = getattr(obj, fieldname)
86
87 str_type_from_value = str(type(value))
88 str_type_from_field = str(type(field))
89
90 if value is None:
91 display = ''
92 elif 'date' in str_type_from_value:
93 display = formats.date_format(value, "SHORT_DATE_FORMAT")
94 elif 'bool' in str_type_from_value:
95 display = _('Sim') if value else _('Não')
96 elif 'ImageFieldFile' in str(type(value)):
97 if value:
98 display = '<img src="{}" />'.format(value.url)
99 else:
100 display = ''
101 elif 'FieldFile' in str_type_from_value:
102 if value:
103 display = '<a href="{}">{}</a>'.format(
104 value.url,
105 value.name.split('/')[-1:][0])
106 else:
107 display = ''
108 elif 'ManyRelatedManager' in str_type_from_value\
109 or 'RelatedManager' in str_type_from_value\
110 or 'GenericRelatedObjectManager' in str_type_from_value:
111 display = '<ul>'
112 for v in value.all():
113 display += '<li>%s</li>' % str(v)
114 display += '</ul>'
115 if not verbose_name:
116 if hasattr(field, 'related_model'):
117 verbose_name = str(
118 field.related_model._meta.verbose_name_plural)
119 elif hasattr(field, 'model'):
120 verbose_name = str(field.model._meta.verbose_name_plural)
121 elif 'GenericForeignKey' in str_type_from_field:
122 display = '<a href="{}">{}</a>'.format(
123 reverse(
124 '%s:%s_detail' % (
125 value._meta.app_config.name, obj.content_type.model),
126 args=(value.id,)),
127 value)
128 else:
129 display = str(value)
130 return verbose_name, display
131
132
133 class CrispyLayoutFormMixin:
134
135 @property
136 def layout_key(self):
137 if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):
138 return super(CrispyLayoutFormMixin, self).layout_key
139 else:
140 return self.model.__name__
141
142 @property
143 def layout_key_set(self):
144 if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):
145 return super(CrispyLayoutFormMixin, self).layout_key_set
146 else:
147 obj = self.crud if hasattr(self, 'crud') else self
148 return getattr(obj.model,
149 obj.model_set).field.model.__name__
150
151 def get_layout(self):
152 yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label
153 return read_layout_from_yaml(yaml_layout, self.layout_key)
154
155 def get_layout_set(self):
156 obj = self.crud if hasattr(self, 'crud') else self
157 yaml_layout = '%s/layouts.yaml' % getattr(
158 obj.model, obj.model_set).field.model._meta.app_config.label
159 return read_layout_from_yaml(yaml_layout, self.layout_key_set)
160
161 @property
162 def fields(self):
163 if hasattr(self, 'form_class') and self.form_class:
164 return None
165 else:
166 '''Returns all fields in the layout'''
167 return [fieldname for legend_rows in self.get_layout()
168 for row in legend_rows[1:]
169 for fieldname, span in row]
170
171 def get_form(self, form_class=None):
172 try:
173 form = super(CrispyLayoutFormMixin, self).get_form(form_class)
174 except AttributeError:
175 # simply return None if there is no get_form on super
176 pass
177 else:
178 if self.layout_key:
179 form.helper = FormHelper()
180 form.helper.layout = SaplFormLayout(*self.get_layout())
181 return form
182
183 @property
184 def list_field_names(self):
185 '''The list of field names to display on table
186
187 This base implementation returns the field names
188 in the first fieldset of the layout.
189 '''
190 obj = self.crud if hasattr(self, 'crud') else self
191 if hasattr(obj, 'list_field_names') and obj.list_field_names:
192 return obj.list_field_names
193 rows = self.get_layout()[0][1:]
194 return [fieldname for row in rows for fieldname, __ in row]
195
196 @property
197 def list_field_names_set(self):
198 '''The list of field names to display on table
199
200 This base implementation returns the field names
201 in the first fieldset of the layout.
202 '''
203 rows = self.get_layout_set()[0][1:]
204 return [fieldname for row in rows for fieldname, __ in row]
205
206 def get_column(self, fieldname, span):
207 obj = self.get_object()
208 verbose_name, text = get_field_display(obj, fieldname)
209 return {
210 'id': fieldname,
211 'span': span,
212 'verbose_name': verbose_name,
213 'text': text,
214 }
215
216 @property
217 def layout_display(self):
218
219 return [
220 {'legend': legend,
221 'rows': [[self.get_column(fieldname, span)
222 for fieldname, span in row]
223 for row in rows]
224 } for legend, rows in heads_and_tails(self.get_layout())]
225
226
227 def read_yaml_from_file(yaml_layout):
228 # TODO cache this at application level
229 t = template.loader.get_template(yaml_layout)
230 # aqui é importante converter para str pois, dependendo do ambiente,
231 # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream
232 rendered = str(t.render())
233 return rtyaml.load(rendered)
234
235
236 def read_layout_from_yaml(yaml_layout, key):
237 # TODO cache this at application level
238 yaml = read_yaml_from_file(yaml_layout)
239 base = yaml[key]
240
241 def line_to_namespans(line):
242 split = [cell.split(':') for cell in line.split()]
243 namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]
244 remaining = 12 - sum(s for n, s in namespans)
245 nondefined = [ns for ns in namespans if not ns[1]]
246 while nondefined:
247 span = ceil(remaining / len(nondefined))
248 namespan = nondefined.pop(0)
249 namespan[1] = span
250 remaining = remaining - span
251 return list(map(tuple, namespans))
252
253 return [[legend] + [line_to_namespans(l) for l in lines]
254 for legend, lines in base.items()]
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sapl/crispy_layout_mixin.py b/sapl/crispy_layout_mixin.py
--- a/sapl/crispy_layout_mixin.py
+++ b/sapl/crispy_layout_mixin.py
@@ -38,7 +38,7 @@
return FormActions(
Submit('salvar', save_label, css_class='pull-right',
# para impedir resubmissão do form
- onclick='this.disabled=true;'),
+ onclick='this.form.submit();this.disabled=true;'),
*more)
|
{"golden_diff": "diff --git a/sapl/crispy_layout_mixin.py b/sapl/crispy_layout_mixin.py\n--- a/sapl/crispy_layout_mixin.py\n+++ b/sapl/crispy_layout_mixin.py\n@@ -38,7 +38,7 @@\n return FormActions(\n Submit('salvar', save_label, css_class='pull-right',\n # para impedir resubmiss\u00e3o do form\n- onclick='this.disabled=true;'),\n+ onclick='this.form.submit();this.disabled=true;'),\n *more)\n", "issue": "Pesquisa mat\u00e9rias legislativas\nAo preencher alguns dados no form de pesquisa de mat\u00e9ria legislativa e clicar em pesquisar, ocorre o seguinte:\r\nNo chrome o bot\u00e3o desabilita e a seta do mouse vira um sinal de proibido\r\nno firefox ocorre o mesmo, porem mostra resultados\n", "before_files": [{"content": "from math import ceil\n\nimport rtyaml\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.utils import formats\nfrom django.utils.translation import ugettext as _\n\n\ndef heads_and_tails(list_of_lists):\n for alist in list_of_lists:\n yield alist[0], alist[1:]\n\n\ndef to_column(name_span):\n fieldname, span = name_span\n return Div(fieldname, css_class='col-md-%d' % span)\n\n\ndef to_row(names_spans):\n return Div(*map(to_column, names_spans), css_class='row-fluid')\n\n\ndef to_fieldsets(fields):\n for field in fields:\n if isinstance(field, list):\n legend, row_specs = field[0], field[1:]\n rows = [to_row(name_span_list) for name_span_list in row_specs]\n yield Fieldset(legend, *rows)\n else:\n yield field\n\n\ndef form_actions(more=[], save_label=_('Salvar')):\n return FormActions(\n Submit('salvar', save_label, css_class='pull-right',\n # para impedir resubmiss\u00e3o do form\n onclick='this.disabled=true;'),\n *more)\n\n\nclass SaplFormLayout(Layout):\n\n def __init__(self, *fields, cancel_label=_('Cancelar'),\n save_label=_('Salvar'), actions=None):\n\n buttons = actions\n if not buttons:\n buttons = form_actions(save_label=save_label, more=[\n HTML('<a href=\"{{ view.cancel_url }}\"'\n ' class=\"btn btn-inverse\">%s</a>' % cancel_label)\n if cancel_label else None])\n\n _fields = list(to_fieldsets(fields))\n if buttons:\n _fields += [to_row([(buttons, 12)])]\n super(SaplFormLayout, self).__init__(*_fields)\n\n\ndef get_field_display(obj, fieldname):\n field = ''\n try:\n field = obj._meta.get_field(fieldname)\n except Exception as e:\n \"\"\" nos casos que o fieldname n\u00e3o \u00e9 um field_model,\n ele pode ser um aggregate, annotate, um property, um manager,\n ou mesmo uma m\u00e9todo no model.\n \"\"\"\n value = getattr(obj, fieldname)\n try:\n verbose_name = value.model._meta.verbose_name\n except AttributeError:\n verbose_name = ''\n\n else:\n verbose_name = str(field.verbose_name)\\\n if hasattr(field, 'verbose_name') else ''\n\n if hasattr(field, 'choices') and field.choices:\n value = getattr(obj, 'get_%s_display' % fieldname)()\n else:\n value = getattr(obj, fieldname)\n\n str_type_from_value = str(type(value))\n str_type_from_field = str(type(field))\n\n if value is None:\n display = ''\n elif 'date' in str_type_from_value:\n display = formats.date_format(value, \"SHORT_DATE_FORMAT\")\n elif 'bool' in str_type_from_value:\n display = _('Sim') if value else _('N\u00e3o')\n elif 'ImageFieldFile' in str(type(value)):\n if value:\n display = '<img src=\"{}\" />'.format(value.url)\n else:\n display = ''\n elif 'FieldFile' in str_type_from_value:\n if value:\n display = '<a href=\"{}\">{}</a>'.format(\n value.url,\n value.name.split('/')[-1:][0])\n else:\n display = ''\n elif 'ManyRelatedManager' in str_type_from_value\\\n or 'RelatedManager' in str_type_from_value\\\n or 'GenericRelatedObjectManager' in str_type_from_value:\n display = '<ul>'\n for v in value.all():\n display += '<li>%s</li>' % str(v)\n display += '</ul>'\n if not verbose_name:\n if hasattr(field, 'related_model'):\n verbose_name = str(\n field.related_model._meta.verbose_name_plural)\n elif hasattr(field, 'model'):\n verbose_name = str(field.model._meta.verbose_name_plural)\n elif 'GenericForeignKey' in str_type_from_field:\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, obj.content_type.model),\n args=(value.id,)),\n value)\n else:\n display = str(value)\n return verbose_name, display\n\n\nclass CrispyLayoutFormMixin:\n\n @property\n def layout_key(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):\n return super(CrispyLayoutFormMixin, self).layout_key\n else:\n return self.model.__name__\n\n @property\n def layout_key_set(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):\n return super(CrispyLayoutFormMixin, self).layout_key_set\n else:\n obj = self.crud if hasattr(self, 'crud') else self\n return getattr(obj.model,\n obj.model_set).field.model.__name__\n\n def get_layout(self):\n yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key)\n\n def get_layout_set(self):\n obj = self.crud if hasattr(self, 'crud') else self\n yaml_layout = '%s/layouts.yaml' % getattr(\n obj.model, obj.model_set).field.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key_set)\n\n @property\n def fields(self):\n if hasattr(self, 'form_class') and self.form_class:\n return None\n else:\n '''Returns all fields in the layout'''\n return [fieldname for legend_rows in self.get_layout()\n for row in legend_rows[1:]\n for fieldname, span in row]\n\n def get_form(self, form_class=None):\n try:\n form = super(CrispyLayoutFormMixin, self).get_form(form_class)\n except AttributeError:\n # simply return None if there is no get_form on super\n pass\n else:\n if self.layout_key:\n form.helper = FormHelper()\n form.helper.layout = SaplFormLayout(*self.get_layout())\n return form\n\n @property\n def list_field_names(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n obj = self.crud if hasattr(self, 'crud') else self\n if hasattr(obj, 'list_field_names') and obj.list_field_names:\n return obj.list_field_names\n rows = self.get_layout()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n @property\n def list_field_names_set(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n rows = self.get_layout_set()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n def get_column(self, fieldname, span):\n obj = self.get_object()\n verbose_name, text = get_field_display(obj, fieldname)\n return {\n 'id': fieldname,\n 'span': span,\n 'verbose_name': verbose_name,\n 'text': text,\n }\n\n @property\n def layout_display(self):\n\n return [\n {'legend': legend,\n 'rows': [[self.get_column(fieldname, span)\n for fieldname, span in row]\n for row in rows]\n } for legend, rows in heads_and_tails(self.get_layout())]\n\n\ndef read_yaml_from_file(yaml_layout):\n # TODO cache this at application level\n t = template.loader.get_template(yaml_layout)\n # aqui \u00e9 importante converter para str pois, dependendo do ambiente,\n # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream\n rendered = str(t.render())\n return rtyaml.load(rendered)\n\n\ndef read_layout_from_yaml(yaml_layout, key):\n # TODO cache this at application level\n yaml = read_yaml_from_file(yaml_layout)\n base = yaml[key]\n\n def line_to_namespans(line):\n split = [cell.split(':') for cell in line.split()]\n namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]\n remaining = 12 - sum(s for n, s in namespans)\n nondefined = [ns for ns in namespans if not ns[1]]\n while nondefined:\n span = ceil(remaining / len(nondefined))\n namespan = nondefined.pop(0)\n namespan[1] = span\n remaining = remaining - span\n return list(map(tuple, namespans))\n\n return [[legend] + [line_to_namespans(l) for l in lines]\n for legend, lines in base.items()]\n", "path": "sapl/crispy_layout_mixin.py"}], "after_files": [{"content": "from math import ceil\n\nimport rtyaml\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import HTML, Div, Fieldset, Layout, Submit\nfrom django import template\nfrom django.core.urlresolvers import reverse\nfrom django.utils import formats\nfrom django.utils.translation import ugettext as _\n\n\ndef heads_and_tails(list_of_lists):\n for alist in list_of_lists:\n yield alist[0], alist[1:]\n\n\ndef to_column(name_span):\n fieldname, span = name_span\n return Div(fieldname, css_class='col-md-%d' % span)\n\n\ndef to_row(names_spans):\n return Div(*map(to_column, names_spans), css_class='row-fluid')\n\n\ndef to_fieldsets(fields):\n for field in fields:\n if isinstance(field, list):\n legend, row_specs = field[0], field[1:]\n rows = [to_row(name_span_list) for name_span_list in row_specs]\n yield Fieldset(legend, *rows)\n else:\n yield field\n\n\ndef form_actions(more=[], save_label=_('Salvar')):\n return FormActions(\n Submit('salvar', save_label, css_class='pull-right',\n # para impedir resubmiss\u00e3o do form\n onclick='this.form.submit();this.disabled=true;'),\n *more)\n\n\nclass SaplFormLayout(Layout):\n\n def __init__(self, *fields, cancel_label=_('Cancelar'),\n save_label=_('Salvar'), actions=None):\n\n buttons = actions\n if not buttons:\n buttons = form_actions(save_label=save_label, more=[\n HTML('<a href=\"{{ view.cancel_url }}\"'\n ' class=\"btn btn-inverse\">%s</a>' % cancel_label)\n if cancel_label else None])\n\n _fields = list(to_fieldsets(fields))\n if buttons:\n _fields += [to_row([(buttons, 12)])]\n super(SaplFormLayout, self).__init__(*_fields)\n\n\ndef get_field_display(obj, fieldname):\n field = ''\n try:\n field = obj._meta.get_field(fieldname)\n except Exception as e:\n \"\"\" nos casos que o fieldname n\u00e3o \u00e9 um field_model,\n ele pode ser um aggregate, annotate, um property, um manager,\n ou mesmo uma m\u00e9todo no model.\n \"\"\"\n value = getattr(obj, fieldname)\n try:\n verbose_name = value.model._meta.verbose_name\n except AttributeError:\n verbose_name = ''\n\n else:\n verbose_name = str(field.verbose_name)\\\n if hasattr(field, 'verbose_name') else ''\n\n if hasattr(field, 'choices') and field.choices:\n value = getattr(obj, 'get_%s_display' % fieldname)()\n else:\n value = getattr(obj, fieldname)\n\n str_type_from_value = str(type(value))\n str_type_from_field = str(type(field))\n\n if value is None:\n display = ''\n elif 'date' in str_type_from_value:\n display = formats.date_format(value, \"SHORT_DATE_FORMAT\")\n elif 'bool' in str_type_from_value:\n display = _('Sim') if value else _('N\u00e3o')\n elif 'ImageFieldFile' in str(type(value)):\n if value:\n display = '<img src=\"{}\" />'.format(value.url)\n else:\n display = ''\n elif 'FieldFile' in str_type_from_value:\n if value:\n display = '<a href=\"{}\">{}</a>'.format(\n value.url,\n value.name.split('/')[-1:][0])\n else:\n display = ''\n elif 'ManyRelatedManager' in str_type_from_value\\\n or 'RelatedManager' in str_type_from_value\\\n or 'GenericRelatedObjectManager' in str_type_from_value:\n display = '<ul>'\n for v in value.all():\n display += '<li>%s</li>' % str(v)\n display += '</ul>'\n if not verbose_name:\n if hasattr(field, 'related_model'):\n verbose_name = str(\n field.related_model._meta.verbose_name_plural)\n elif hasattr(field, 'model'):\n verbose_name = str(field.model._meta.verbose_name_plural)\n elif 'GenericForeignKey' in str_type_from_field:\n display = '<a href=\"{}\">{}</a>'.format(\n reverse(\n '%s:%s_detail' % (\n value._meta.app_config.name, obj.content_type.model),\n args=(value.id,)),\n value)\n else:\n display = str(value)\n return verbose_name, display\n\n\nclass CrispyLayoutFormMixin:\n\n @property\n def layout_key(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key'):\n return super(CrispyLayoutFormMixin, self).layout_key\n else:\n return self.model.__name__\n\n @property\n def layout_key_set(self):\n if hasattr(super(CrispyLayoutFormMixin, self), 'layout_key_set'):\n return super(CrispyLayoutFormMixin, self).layout_key_set\n else:\n obj = self.crud if hasattr(self, 'crud') else self\n return getattr(obj.model,\n obj.model_set).field.model.__name__\n\n def get_layout(self):\n yaml_layout = '%s/layouts.yaml' % self.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key)\n\n def get_layout_set(self):\n obj = self.crud if hasattr(self, 'crud') else self\n yaml_layout = '%s/layouts.yaml' % getattr(\n obj.model, obj.model_set).field.model._meta.app_config.label\n return read_layout_from_yaml(yaml_layout, self.layout_key_set)\n\n @property\n def fields(self):\n if hasattr(self, 'form_class') and self.form_class:\n return None\n else:\n '''Returns all fields in the layout'''\n return [fieldname for legend_rows in self.get_layout()\n for row in legend_rows[1:]\n for fieldname, span in row]\n\n def get_form(self, form_class=None):\n try:\n form = super(CrispyLayoutFormMixin, self).get_form(form_class)\n except AttributeError:\n # simply return None if there is no get_form on super\n pass\n else:\n if self.layout_key:\n form.helper = FormHelper()\n form.helper.layout = SaplFormLayout(*self.get_layout())\n return form\n\n @property\n def list_field_names(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n obj = self.crud if hasattr(self, 'crud') else self\n if hasattr(obj, 'list_field_names') and obj.list_field_names:\n return obj.list_field_names\n rows = self.get_layout()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n @property\n def list_field_names_set(self):\n '''The list of field names to display on table\n\n This base implementation returns the field names\n in the first fieldset of the layout.\n '''\n rows = self.get_layout_set()[0][1:]\n return [fieldname for row in rows for fieldname, __ in row]\n\n def get_column(self, fieldname, span):\n obj = self.get_object()\n verbose_name, text = get_field_display(obj, fieldname)\n return {\n 'id': fieldname,\n 'span': span,\n 'verbose_name': verbose_name,\n 'text': text,\n }\n\n @property\n def layout_display(self):\n\n return [\n {'legend': legend,\n 'rows': [[self.get_column(fieldname, span)\n for fieldname, span in row]\n for row in rows]\n } for legend, rows in heads_and_tails(self.get_layout())]\n\n\ndef read_yaml_from_file(yaml_layout):\n # TODO cache this at application level\n t = template.loader.get_template(yaml_layout)\n # aqui \u00e9 importante converter para str pois, dependendo do ambiente,\n # o rtyaml pode usar yaml.CSafeLoader, que exige str ou stream\n rendered = str(t.render())\n return rtyaml.load(rendered)\n\n\ndef read_layout_from_yaml(yaml_layout, key):\n # TODO cache this at application level\n yaml = read_yaml_from_file(yaml_layout)\n base = yaml[key]\n\n def line_to_namespans(line):\n split = [cell.split(':') for cell in line.split()]\n namespans = [[s[0], int(s[1]) if len(s) > 1 else 0] for s in split]\n remaining = 12 - sum(s for n, s in namespans)\n nondefined = [ns for ns in namespans if not ns[1]]\n while nondefined:\n span = ceil(remaining / len(nondefined))\n namespan = nondefined.pop(0)\n namespan[1] = span\n remaining = remaining - span\n return list(map(tuple, namespans))\n\n return [[legend] + [line_to_namespans(l) for l in lines]\n for legend, lines in base.items()]\n", "path": "sapl/crispy_layout_mixin.py"}]}
| 2,975 | 117 |
gh_patches_debug_37488
|
rasdani/github-patches
|
git_diff
|
OCA__server-tools-478
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
base_name_search_improved - Portal Access Managment
@dreispt
This ocurrs in when click on customer --> More --> Portal Access Managment.
> Traceback (most recent call last):
> File "/opt/odoov1/OCB/openerp/http.py", line 539, in _handle_exception
> return super(JsonRequest, self)._handle_exception(exception)
> File "/opt/odoov1/OCB/openerp/http.py", line 576, in dispatch
> result = self._call_function(*_self.params)
> File "/opt/odoov1/OCB/openerp/http.py", line 312, in _call_function
> return checked_call(self.db, *args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/service/model.py", line 118, in wrapper
> return f(dbname, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/http.py", line 309, in checked_call
> return self.endpoint(_a, *_kw)
> File "/opt/odoov1/OCB/openerp/http.py", line 805, in __call__
> return self.method(_args, *_kw)
> File "/opt/odoov1/OCB/openerp/http.py", line 405, in response_wrap
> response = f(_args, *_kw)
> File "/opt/odoov1/OCB/addons/web/controllers/main.py", line 944, in call_kw
> return self._call_kw(model, method, args, kwargs)
> File "/opt/odoov1/OCB/addons/web/controllers/main.py", line 936, in _call_kw
> return getattr(request.registry.get(model), method)(request.cr, request.uid, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/api.py", line 268, in wrapper
> return old_api(self, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/api.py", line 372, in old_api
> result = method(recs, _args, *_kwargs)
> File "/opt/odoov1/extra-addons/base_name_search_improved/models/ir_model.py", line 61, in name_search
> res = _extend_name_results(self, domain, res, limit)
> File "/opt/odoov1/extra-addons/base_name_search_improved/models/ir_model.py", line 27, in _extend_name_results
> recs = self.search(domain, limit=limit - result_count)
> File "/opt/odoov1/OCB/openerp/api.py", line 266, in wrapper
> return new_api(self, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/api.py", line 508, in new_api
> result = method(self._model, cr, uid, _args, *_old_kwargs)
> File "/opt/odoov1/OCB/openerp/addons/base/res/res_users.py", line 121, in search
> return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)
> File "/opt/odoov1/OCB/openerp/api.py", line 268, in wrapper
> return old_api(self, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/models.py", line 1646, in search
> return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
> File "/opt/odoov1/OCB/openerp/api.py", line 268, in wrapper
> return old_api(self, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/models.py", line 4673, in _search
> query = self._where_calc(cr, user, args, context=context)
> File "/opt/odoov1/OCB/openerp/api.py", line 268, in wrapper
> return old_api(self, _args, *_kwargs)
> File "/opt/odoov1/OCB/openerp/models.py", line 4484, in _where_calc
> e = expression.expression(cr, user, domain, self, context)
> File "/opt/odoov1/OCB/openerp/osv/expression.py", line 662, in __init__
> self.parse(cr, uid, context=context)
> File "/opt/odoov1/OCB/openerp/osv/expression.py", line 921, in parse
> fct_domain = column.search(cr, uid, model, left, [leaf.leaf], context=context)
> File "/opt/odoov1/OCB/openerp/osv/fields.py", line 1423, in search
> return self._fnct_search(obj, cr, uid, obj, name, args, context=context)
> File "/opt/odoov1/OCB/openerp/addons/base/res/res_users.py", line 80, in _search_group
> group_name = values.pop().strip()
> IndexError: pop from empty list
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `base_name_search_improved/models/ir_model.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # © 2016 Daniel Reis
3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
4
5 from openerp import models, fields, api
6 from openerp import SUPERUSER_ID
7 from openerp import tools
8
9
10 # Extended name search is only used on some operators
11 ALLOWED_OPS = set(['ilike', 'like'])
12
13
14 @tools.ormcache(skiparg=0)
15 def _get_rec_names(self):
16 model = self.env['ir.model'].search(
17 [('model', '=', str(self._model))])
18 rec_name = [self._rec_name] or []
19 other_names = model.name_search_ids.mapped('name')
20 return rec_name + other_names
21
22
23 def _extend_name_results(self, domain, results, limit):
24 result_count = len(results)
25 if result_count < limit:
26 domain += [('id', 'not in', [x[0] for x in results])]
27 recs = self.search(domain, limit=limit - result_count)
28 results.extend(recs.name_get())
29 return results
30
31
32 class ModelExtended(models.Model):
33 _inherit = 'ir.model'
34
35 name_search_ids = fields.Many2many(
36 'ir.model.fields',
37 string='Name Search Fields')
38
39 def _register_hook(self, cr, ids=None):
40
41 def make_name_search():
42
43 @api.model
44 def name_search(self, name='', args=None,
45 operator='ilike', limit=100):
46 # Perform standard name search
47 res = name_search.origin(
48 self, name=name, args=args, operator=operator, limit=limit)
49 enabled = self.env.context.get('name_search_extended', True)
50 # Perform extended name search
51 if enabled and operator in ALLOWED_OPS:
52 # Support a list of fields to search on
53 all_names = _get_rec_names(self)
54 # Try regular search on each additional search field
55 for rec_name in all_names[1:]:
56 domain = [(rec_name, operator, name)]
57 res = _extend_name_results(self, domain, res, limit)
58 # Try ordered word search on each of the search fields
59 for rec_name in all_names:
60 domain = [(rec_name, operator, name.replace(' ', '%'))]
61 res = _extend_name_results(self, domain, res, limit)
62 # Try unordered word search on each of the search fields
63 for rec_name in all_names:
64 domain = [(rec_name, operator, x)
65 for x in name.split() if x]
66 res = _extend_name_results(self, domain, res, limit)
67 return res
68 return name_search
69
70 if ids is None:
71 ids = self.search(cr, SUPERUSER_ID, [])
72 for model in self.browse(cr, SUPERUSER_ID, ids):
73 Model = self.pool.get(model.model)
74 if Model:
75 Model._patch_method('name_search', make_name_search())
76 return super(ModelExtended, self)._register_hook(cr)
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/base_name_search_improved/models/ir_model.py b/base_name_search_improved/models/ir_model.py
--- a/base_name_search_improved/models/ir_model.py
+++ b/base_name_search_improved/models/ir_model.py
@@ -13,6 +13,7 @@
@tools.ormcache(skiparg=0)
def _get_rec_names(self):
+ "List of fields to search into"
model = self.env['ir.model'].search(
[('model', '=', str(self._model))])
rec_name = [self._rec_name] or []
@@ -48,22 +49,28 @@
self, name=name, args=args, operator=operator, limit=limit)
enabled = self.env.context.get('name_search_extended', True)
# Perform extended name search
- if enabled and operator in ALLOWED_OPS:
+ # Note: Empty name causes error on
+ # Customer->More->Portal Access Management
+ if name and enabled and operator in ALLOWED_OPS:
# Support a list of fields to search on
all_names = _get_rec_names(self)
+ base_domain = args or []
# Try regular search on each additional search field
for rec_name in all_names[1:]:
domain = [(rec_name, operator, name)]
- res = _extend_name_results(self, domain, res, limit)
+ res = _extend_name_results(
+ self, base_domain + domain, res, limit)
# Try ordered word search on each of the search fields
for rec_name in all_names:
domain = [(rec_name, operator, name.replace(' ', '%'))]
- res = _extend_name_results(self, domain, res, limit)
+ res = _extend_name_results(
+ self, base_domain + domain, res, limit)
# Try unordered word search on each of the search fields
for rec_name in all_names:
domain = [(rec_name, operator, x)
for x in name.split() if x]
- res = _extend_name_results(self, domain, res, limit)
+ res = _extend_name_results(
+ self, base_domain + domain, res, limit)
return res
return name_search
|
{"golden_diff": "diff --git a/base_name_search_improved/models/ir_model.py b/base_name_search_improved/models/ir_model.py\n--- a/base_name_search_improved/models/ir_model.py\n+++ b/base_name_search_improved/models/ir_model.py\n@@ -13,6 +13,7 @@\n \n @tools.ormcache(skiparg=0)\n def _get_rec_names(self):\n+ \"List of fields to search into\"\n model = self.env['ir.model'].search(\n [('model', '=', str(self._model))])\n rec_name = [self._rec_name] or []\n@@ -48,22 +49,28 @@\n self, name=name, args=args, operator=operator, limit=limit)\n enabled = self.env.context.get('name_search_extended', True)\n # Perform extended name search\n- if enabled and operator in ALLOWED_OPS:\n+ # Note: Empty name causes error on\n+ # Customer->More->Portal Access Management\n+ if name and enabled and operator in ALLOWED_OPS:\n # Support a list of fields to search on\n all_names = _get_rec_names(self)\n+ base_domain = args or []\n # Try regular search on each additional search field\n for rec_name in all_names[1:]:\n domain = [(rec_name, operator, name)]\n- res = _extend_name_results(self, domain, res, limit)\n+ res = _extend_name_results(\n+ self, base_domain + domain, res, limit)\n # Try ordered word search on each of the search fields\n for rec_name in all_names:\n domain = [(rec_name, operator, name.replace(' ', '%'))]\n- res = _extend_name_results(self, domain, res, limit)\n+ res = _extend_name_results(\n+ self, base_domain + domain, res, limit)\n # Try unordered word search on each of the search fields\n for rec_name in all_names:\n domain = [(rec_name, operator, x)\n for x in name.split() if x]\n- res = _extend_name_results(self, domain, res, limit)\n+ res = _extend_name_results(\n+ self, base_domain + domain, res, limit)\n return res\n return name_search\n", "issue": "base_name_search_improved - Portal Access Managment\n@dreispt \nThis ocurrs in when click on customer --> More --> Portal Access Managment.\n\n> Traceback (most recent call last):\n> File \"/opt/odoov1/OCB/openerp/http.py\", line 539, in _handle_exception\n> return super(JsonRequest, self)._handle_exception(exception)\n> File \"/opt/odoov1/OCB/openerp/http.py\", line 576, in dispatch\n> result = self._call_function(*_self.params)\n> File \"/opt/odoov1/OCB/openerp/http.py\", line 312, in _call_function\n> return checked_call(self.db, *args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/service/model.py\", line 118, in wrapper\n> return f(dbname, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/http.py\", line 309, in checked_call\n> return self.endpoint(_a, *_kw)\n> File \"/opt/odoov1/OCB/openerp/http.py\", line 805, in __call__\n> return self.method(_args, *_kw)\n> File \"/opt/odoov1/OCB/openerp/http.py\", line 405, in response_wrap\n> response = f(_args, *_kw)\n> File \"/opt/odoov1/OCB/addons/web/controllers/main.py\", line 944, in call_kw\n> return self._call_kw(model, method, args, kwargs)\n> File \"/opt/odoov1/OCB/addons/web/controllers/main.py\", line 936, in _call_kw\n> return getattr(request.registry.get(model), method)(request.cr, request.uid, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 268, in wrapper\n> return old_api(self, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 372, in old_api\n> result = method(recs, _args, *_kwargs)\n> File \"/opt/odoov1/extra-addons/base_name_search_improved/models/ir_model.py\", line 61, in name_search\n> res = _extend_name_results(self, domain, res, limit)\n> File \"/opt/odoov1/extra-addons/base_name_search_improved/models/ir_model.py\", line 27, in _extend_name_results\n> recs = self.search(domain, limit=limit - result_count)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 266, in wrapper\n> return new_api(self, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 508, in new_api\n> result = method(self._model, cr, uid, _args, *_old_kwargs)\n> File \"/opt/odoov1/OCB/openerp/addons/base/res/res_users.py\", line 121, in search\n> return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 268, in wrapper\n> return old_api(self, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/models.py\", line 1646, in search\n> return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 268, in wrapper\n> return old_api(self, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/models.py\", line 4673, in _search\n> query = self._where_calc(cr, user, args, context=context)\n> File \"/opt/odoov1/OCB/openerp/api.py\", line 268, in wrapper\n> return old_api(self, _args, *_kwargs)\n> File \"/opt/odoov1/OCB/openerp/models.py\", line 4484, in _where_calc\n> e = expression.expression(cr, user, domain, self, context)\n> File \"/opt/odoov1/OCB/openerp/osv/expression.py\", line 662, in __init__\n> self.parse(cr, uid, context=context)\n> File \"/opt/odoov1/OCB/openerp/osv/expression.py\", line 921, in parse\n> fct_domain = column.search(cr, uid, model, left, [leaf.leaf], context=context)\n> File \"/opt/odoov1/OCB/openerp/osv/fields.py\", line 1423, in search\n> return self._fnct_search(obj, cr, uid, obj, name, args, context=context)\n> File \"/opt/odoov1/OCB/openerp/addons/base/res/res_users.py\", line 80, in _search_group\n> group_name = values.pop().strip()\n> IndexError: pop from empty list\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2016 Daniel Reis\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import models, fields, api\nfrom openerp import SUPERUSER_ID\nfrom openerp import tools\n\n\n# Extended name search is only used on some operators\nALLOWED_OPS = set(['ilike', 'like'])\n\n\[email protected](skiparg=0)\ndef _get_rec_names(self):\n model = self.env['ir.model'].search(\n [('model', '=', str(self._model))])\n rec_name = [self._rec_name] or []\n other_names = model.name_search_ids.mapped('name')\n return rec_name + other_names\n\n\ndef _extend_name_results(self, domain, results, limit):\n result_count = len(results)\n if result_count < limit:\n domain += [('id', 'not in', [x[0] for x in results])]\n recs = self.search(domain, limit=limit - result_count)\n results.extend(recs.name_get())\n return results\n\n\nclass ModelExtended(models.Model):\n _inherit = 'ir.model'\n\n name_search_ids = fields.Many2many(\n 'ir.model.fields',\n string='Name Search Fields')\n\n def _register_hook(self, cr, ids=None):\n\n def make_name_search():\n\n @api.model\n def name_search(self, name='', args=None,\n operator='ilike', limit=100):\n # Perform standard name search\n res = name_search.origin(\n self, name=name, args=args, operator=operator, limit=limit)\n enabled = self.env.context.get('name_search_extended', True)\n # Perform extended name search\n if enabled and operator in ALLOWED_OPS:\n # Support a list of fields to search on\n all_names = _get_rec_names(self)\n # Try regular search on each additional search field\n for rec_name in all_names[1:]:\n domain = [(rec_name, operator, name)]\n res = _extend_name_results(self, domain, res, limit)\n # Try ordered word search on each of the search fields\n for rec_name in all_names:\n domain = [(rec_name, operator, name.replace(' ', '%'))]\n res = _extend_name_results(self, domain, res, limit)\n # Try unordered word search on each of the search fields\n for rec_name in all_names:\n domain = [(rec_name, operator, x)\n for x in name.split() if x]\n res = _extend_name_results(self, domain, res, limit)\n return res\n return name_search\n\n if ids is None:\n ids = self.search(cr, SUPERUSER_ID, [])\n for model in self.browse(cr, SUPERUSER_ID, ids):\n Model = self.pool.get(model.model)\n if Model:\n Model._patch_method('name_search', make_name_search())\n return super(ModelExtended, self)._register_hook(cr)\n", "path": "base_name_search_improved/models/ir_model.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2016 Daniel Reis\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import models, fields, api\nfrom openerp import SUPERUSER_ID\nfrom openerp import tools\n\n\n# Extended name search is only used on some operators\nALLOWED_OPS = set(['ilike', 'like'])\n\n\[email protected](skiparg=0)\ndef _get_rec_names(self):\n \"List of fields to search into\"\n model = self.env['ir.model'].search(\n [('model', '=', str(self._model))])\n rec_name = [self._rec_name] or []\n other_names = model.name_search_ids.mapped('name')\n return rec_name + other_names\n\n\ndef _extend_name_results(self, domain, results, limit):\n result_count = len(results)\n if result_count < limit:\n domain += [('id', 'not in', [x[0] for x in results])]\n recs = self.search(domain, limit=limit - result_count)\n results.extend(recs.name_get())\n return results\n\n\nclass ModelExtended(models.Model):\n _inherit = 'ir.model'\n\n name_search_ids = fields.Many2many(\n 'ir.model.fields',\n string='Name Search Fields')\n\n def _register_hook(self, cr, ids=None):\n\n def make_name_search():\n\n @api.model\n def name_search(self, name='', args=None,\n operator='ilike', limit=100):\n # Perform standard name search\n res = name_search.origin(\n self, name=name, args=args, operator=operator, limit=limit)\n enabled = self.env.context.get('name_search_extended', True)\n # Perform extended name search\n # Note: Empty name causes error on\n # Customer->More->Portal Access Management\n if name and enabled and operator in ALLOWED_OPS:\n # Support a list of fields to search on\n all_names = _get_rec_names(self)\n base_domain = args or []\n # Try regular search on each additional search field\n for rec_name in all_names[1:]:\n domain = [(rec_name, operator, name)]\n res = _extend_name_results(\n self, base_domain + domain, res, limit)\n # Try ordered word search on each of the search fields\n for rec_name in all_names:\n domain = [(rec_name, operator, name.replace(' ', '%'))]\n res = _extend_name_results(\n self, base_domain + domain, res, limit)\n # Try unordered word search on each of the search fields\n for rec_name in all_names:\n domain = [(rec_name, operator, x)\n for x in name.split() if x]\n res = _extend_name_results(\n self, base_domain + domain, res, limit)\n return res\n return name_search\n\n if ids is None:\n ids = self.search(cr, SUPERUSER_ID, [])\n for model in self.browse(cr, SUPERUSER_ID, ids):\n Model = self.pool.get(model.model)\n if Model:\n Model._patch_method('name_search', make_name_search())\n return super(ModelExtended, self)._register_hook(cr)\n", "path": "base_name_search_improved/models/ir_model.py"}]}
| 2,277 | 489 |
gh_patches_debug_25834
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-2899
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Delete bodhi.server.views.admin
Bodhi has a strange view that tells admin users what their username and principals are, but does not allow non-admin users to use it:
https://github.com/fedora-infra/bodhi/blob/3.0.0/bodhi/server/views/admin.py
When I visit https://bodhi.fedoraproject.org/admin/ I see:
```
{"principals": ["system.Everyone", "system.Authenticated", "bowlofeggs", "group:packager", "group:infra-sig", "group:bodhiadmin"], "user": "bowlofeggs"}
```
I don't know what the purpose of this view was, but I'm pretty sure we can delete it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/views/admin.py`
Content:
```
1 # Copyright © 2014-2017 Red Hat, Inc. and others
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """Define the admin view."""
19
20 from cornice import Service
21
22 from bodhi.server import log
23 from bodhi.server import security
24
25
26 admin_service = Service(name='admin', path='/admin/',
27 description='Administrator view',
28 factory=security.AdminACLFactory)
29
30
31 @admin_service.get(permission='admin')
32 def admin(request):
33 """
34 Return a dictionary with keys "user" and "principals".
35
36 "user" indexes the current user's name, and "principals" indexes the user's effective
37 principals.
38
39 Args:
40 request (pyramid.request): The current request.
41 Returns:
42 dict: A dictionary as described above.
43 """
44 user = request.user
45 log.info('%s logged into admin panel' % user.name)
46 principals = request.effective_principals
47 return {'user': user.name, 'principals': principals}
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bodhi/server/views/admin.py b/bodhi/server/views/admin.py
deleted file mode 100644
--- a/bodhi/server/views/admin.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright © 2014-2017 Red Hat, Inc. and others
-#
-# This file is part of Bodhi.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-"""Define the admin view."""
-
-from cornice import Service
-
-from bodhi.server import log
-from bodhi.server import security
-
-
-admin_service = Service(name='admin', path='/admin/',
- description='Administrator view',
- factory=security.AdminACLFactory)
-
-
-@admin_service.get(permission='admin')
-def admin(request):
- """
- Return a dictionary with keys "user" and "principals".
-
- "user" indexes the current user's name, and "principals" indexes the user's effective
- principals.
-
- Args:
- request (pyramid.request): The current request.
- Returns:
- dict: A dictionary as described above.
- """
- user = request.user
- log.info('%s logged into admin panel' % user.name)
- principals = request.effective_principals
- return {'user': user.name, 'principals': principals}
|
{"golden_diff": "diff --git a/bodhi/server/views/admin.py b/bodhi/server/views/admin.py\ndeleted file mode 100644\n--- a/bodhi/server/views/admin.py\n+++ /dev/null\n@@ -1,47 +0,0 @@\n-# Copyright \u00a9 2014-2017 Red Hat, Inc. and others\n-#\n-# This file is part of Bodhi.\n-#\n-# This program is free software; you can redistribute it and/or\n-# modify it under the terms of the GNU General Public License\n-# as published by the Free Software Foundation; either version 2\n-# of the License, or (at your option) any later version.\n-#\n-# This program is distributed in the hope that it will be useful,\n-# but WITHOUT ANY WARRANTY; without even the implied warranty of\n-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n-# GNU General Public License for more details.\n-#\n-# You should have received a copy of the GNU General Public License\n-# along with this program; if not, write to the Free Software\n-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n-\"\"\"Define the admin view.\"\"\"\n-\n-from cornice import Service\n-\n-from bodhi.server import log\n-from bodhi.server import security\n-\n-\n-admin_service = Service(name='admin', path='/admin/',\n- description='Administrator view',\n- factory=security.AdminACLFactory)\n-\n-\n-@admin_service.get(permission='admin')\n-def admin(request):\n- \"\"\"\n- Return a dictionary with keys \"user\" and \"principals\".\n-\n- \"user\" indexes the current user's name, and \"principals\" indexes the user's effective\n- principals.\n-\n- Args:\n- request (pyramid.request): The current request.\n- Returns:\n- dict: A dictionary as described above.\n- \"\"\"\n- user = request.user\n- log.info('%s logged into admin panel' % user.name)\n- principals = request.effective_principals\n- return {'user': user.name, 'principals': principals}\n", "issue": "Delete bodhi.server.views.admin\nBodhi has a strange view that tells admin users what their username and principals are, but does not allow non-admin users to use it:\r\n\r\nhttps://github.com/fedora-infra/bodhi/blob/3.0.0/bodhi/server/views/admin.py\r\n\r\nWhen I visit https://bodhi.fedoraproject.org/admin/ I see:\r\n\r\n```\r\n{\"principals\": [\"system.Everyone\", \"system.Authenticated\", \"bowlofeggs\", \"group:packager\", \"group:infra-sig\", \"group:bodhiadmin\"], \"user\": \"bowlofeggs\"}\r\n```\r\n\r\nI don't know what the purpose of this view was, but I'm pretty sure we can delete it.\n", "before_files": [{"content": "# Copyright \u00a9 2014-2017 Red Hat, Inc. and others\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Define the admin view.\"\"\"\n\nfrom cornice import Service\n\nfrom bodhi.server import log\nfrom bodhi.server import security\n\n\nadmin_service = Service(name='admin', path='/admin/',\n description='Administrator view',\n factory=security.AdminACLFactory)\n\n\n@admin_service.get(permission='admin')\ndef admin(request):\n \"\"\"\n Return a dictionary with keys \"user\" and \"principals\".\n\n \"user\" indexes the current user's name, and \"principals\" indexes the user's effective\n principals.\n\n Args:\n request (pyramid.request): The current request.\n Returns:\n dict: A dictionary as described above.\n \"\"\"\n user = request.user\n log.info('%s logged into admin panel' % user.name)\n principals = request.effective_principals\n return {'user': user.name, 'principals': principals}\n", "path": "bodhi/server/views/admin.py"}], "after_files": [{"content": null, "path": "bodhi/server/views/admin.py"}]}
| 897 | 479 |
gh_patches_debug_12896
|
rasdani/github-patches
|
git_diff
|
spotify__luigi-2572
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HdfsTarget is not workging in luigi > 2.6.1
This code runs correct under luigi <= 2.6.1:
```python
from luigi.contrib import hdfs
import luigi.format
# just to be sure that realy the right version is used...
import pkg_resources
print "luigi ==", pkg_resources.get_distribution("luigi").version
print "snakebite ==", pkg_resources.get_distribution("snakebite").version
destination_file = '/tmp/test/file.gz'
target = hdfs.HdfsTarget(path=destination_file, format=luigi.format.Gzip)
if target.exists():
target.remove(skip_trash=False)
fsobj = target.open('w')
fsobj.write('lol3\n')
fsobj.close()
```
with luigi 2.6.2 or 2.7.0 it breaks:
```python
luigi == 2.7.0
snakebite == 2.11.0
Traceback (most recent call last):
File "/opt/tests/hdfs_target.py", line 18, in <module>
fsobj.close()
File "/opt/python-2.7.10/lib/python2.7/site-packages/luigi/format.py", line 224, in close
self._output_pipe.close()
File "/opt/python-2.7.10/lib/python2.7/site-packages/luigi/contrib/hdfs/format.py", line 51, in close
remove(self.path)
File "/opt/python-2.7.10/lib/python2.7/site-packages/luigi/contrib/hdfs/clients.py", line 62, in result
return getattr(get_autoconfig_client(), method_name)(*args, **kwargs)
File "/opt/python-2.7.10/lib/python2.7/site-packages/luigi/contrib/hdfs/snakebite_client.py", line 140, in remove
return list(self.get_bite().delete(self.list_path(path), recurse=recursive))
File "/opt/python-2.7.10/lib/python2.7/site-packages/snakebite/client.py", line 1540, in wrapped
yield results.next()
File "/opt/python-2.7.10/lib/python2.7/site-packages/snakebite/client.py", line 508, in delete
for item in self._find_items(paths, processor, include_toplevel=True):
File "/opt/python-2.7.10/lib/python2.7/site-packages/snakebite/client.py", line 1216, in _find_items
raise FileNotFoundException("`%s': No such file or directory" % path)
snakebite.errors.FileNotFoundException: `/tmp/test/file.gz': No such file or directory
```
client.cfg:
```
[hdfs]
client = snakebite
snakebite_autoconfig = True
tmp_dir: /x/tmp
```
The file is correct in form and content written in "tmp_dir" (/x/tmp/username/test/file.gz-luigitemp-951771388).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/contrib/hdfs/format.py`
Content:
```
1 import logging
2 import os
3
4 import luigi.format
5 from luigi.contrib.hdfs.config import load_hadoop_cmd
6 from luigi.contrib.hdfs import config as hdfs_config
7 from luigi.contrib.hdfs.clients import remove, rename, mkdir, listdir, exists
8 from luigi.contrib.hdfs.error import HDFSCliError
9
10 logger = logging.getLogger('luigi-interface')
11
12
13 class HdfsAtomicWriteError(IOError):
14 pass
15
16
17 class HdfsReadPipe(luigi.format.InputPipeProcessWrapper):
18
19 def __init__(self, path):
20 super(HdfsReadPipe, self).__init__(load_hadoop_cmd() + ['fs', '-cat', path])
21
22
23 class HdfsAtomicWritePipe(luigi.format.OutputPipeProcessWrapper):
24 """
25 File like object for writing to HDFS
26
27 The referenced file is first written to a temporary location and then
28 renamed to final location on close(). If close() isn't called
29 the temporary file will be cleaned up when this object is
30 garbage collected
31
32 TODO: if this is buggy, change it so it first writes to a
33 local temporary file and then uploads it on completion
34 """
35
36 def __init__(self, path):
37 self.path = path
38 self.tmppath = hdfs_config.tmppath(self.path)
39 parent_dir = os.path.dirname(self.tmppath)
40 mkdir(parent_dir, parents=True, raise_if_exists=False)
41 super(HdfsAtomicWritePipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.tmppath])
42
43 def abort(self):
44 logger.info("Aborting %s('%s'). Removing temporary file '%s'",
45 self.__class__.__name__, self.path, self.tmppath)
46 super(HdfsAtomicWritePipe, self).abort()
47 remove(self.tmppath, skip_trash=True)
48
49 def close(self):
50 super(HdfsAtomicWritePipe, self).close()
51 try:
52 remove(self.path)
53 except HDFSCliError:
54 pass
55 if not all(result['result'] for result in rename(self.tmppath, self.path) or []):
56 raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))
57
58
59 class HdfsAtomicWriteDirPipe(luigi.format.OutputPipeProcessWrapper):
60 """
61 Writes a data<data_extension> file to a directory at <path>.
62 """
63
64 def __init__(self, path, data_extension=""):
65 self.path = path
66 self.tmppath = hdfs_config.tmppath(self.path)
67 self.datapath = self.tmppath + ("/data%s" % data_extension)
68 super(HdfsAtomicWriteDirPipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.datapath])
69
70 def abort(self):
71 logger.info("Aborting %s('%s'). Removing temporary dir '%s'",
72 self.__class__.__name__, self.path, self.tmppath)
73 super(HdfsAtomicWriteDirPipe, self).abort()
74 remove(self.tmppath, skip_trash=True)
75
76 def close(self):
77 super(HdfsAtomicWriteDirPipe, self).close()
78 try:
79 if exists(self.path):
80 remove(self.path)
81 except Exception as ex:
82 if isinstance(ex, HDFSCliError) or ex.args[0].contains("FileNotFoundException"):
83 pass
84 else:
85 raise ex
86
87 # it's unlikely to fail in this way but better safe than sorry
88 if not all(result['result'] for result in rename(self.tmppath, self.path) or []):
89 raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))
90
91 if os.path.basename(self.tmppath) in map(os.path.basename, listdir(self.path)):
92 remove(self.path)
93 raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))
94
95
96 class PlainFormat(luigi.format.Format):
97
98 input = 'bytes'
99 output = 'hdfs'
100
101 def hdfs_writer(self, path):
102 return self.pipe_writer(path)
103
104 def hdfs_reader(self, path):
105 return self.pipe_reader(path)
106
107 def pipe_reader(self, path):
108 return HdfsReadPipe(path)
109
110 def pipe_writer(self, output_pipe):
111 return HdfsAtomicWritePipe(output_pipe)
112
113
114 class PlainDirFormat(luigi.format.Format):
115
116 input = 'bytes'
117 output = 'hdfs'
118
119 def hdfs_writer(self, path):
120 return self.pipe_writer(path)
121
122 def hdfs_reader(self, path):
123 return self.pipe_reader(path)
124
125 def pipe_reader(self, path):
126 # exclude underscore-prefixedfiles/folders (created by MapReduce)
127 return HdfsReadPipe("%s/[^_]*" % path)
128
129 def pipe_writer(self, path):
130 return HdfsAtomicWriteDirPipe(path)
131
132
133 Plain = PlainFormat()
134 PlainDir = PlainDirFormat()
135
136
137 class CompatibleHdfsFormat(luigi.format.Format):
138
139 output = 'hdfs'
140
141 def __init__(self, writer, reader, input=None):
142 if input is not None:
143 self.input = input
144
145 self.reader = reader
146 self.writer = writer
147
148 def pipe_writer(self, output):
149 return self.writer(output)
150
151 def pipe_reader(self, input):
152 return self.reader(input)
153
154 def hdfs_writer(self, output):
155 return self.writer(output)
156
157 def hdfs_reader(self, input):
158 return self.reader(input)
159
160 # __getstate__/__setstate__ needed for pickling, because self.reader and
161 # self.writer may be unpickleable instance methods of another format class.
162 # This was mainly to support pickling of standard HdfsTarget instances.
163
164 def __getstate__(self):
165 d = self.__dict__.copy()
166 for attr in ('reader', 'writer'):
167 method = getattr(self, attr)
168 try:
169 # if instance method, pickle instance and method name
170 d[attr] = method.__self__, method.__func__.__name__
171 except AttributeError:
172 pass # not an instance method
173 return d
174
175 def __setstate__(self, d):
176 self.__dict__ = d
177 for attr in ('reader', 'writer'):
178 try:
179 method_self, method_name = d[attr]
180 except ValueError:
181 continue
182 method = getattr(method_self, method_name)
183 setattr(self, attr, method)
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/luigi/contrib/hdfs/format.py b/luigi/contrib/hdfs/format.py
--- a/luigi/contrib/hdfs/format.py
+++ b/luigi/contrib/hdfs/format.py
@@ -49,9 +49,13 @@
def close(self):
super(HdfsAtomicWritePipe, self).close()
try:
- remove(self.path)
- except HDFSCliError:
- pass
+ if exists(self.path):
+ remove(self.path)
+ except Exception as ex:
+ if isinstance(ex, HDFSCliError) or ex.args[0].contains("FileNotFoundException"):
+ pass
+ else:
+ raise ex
if not all(result['result'] for result in rename(self.tmppath, self.path) or []):
raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))
|
{"golden_diff": "diff --git a/luigi/contrib/hdfs/format.py b/luigi/contrib/hdfs/format.py\n--- a/luigi/contrib/hdfs/format.py\n+++ b/luigi/contrib/hdfs/format.py\n@@ -49,9 +49,13 @@\n def close(self):\n super(HdfsAtomicWritePipe, self).close()\n try:\n- remove(self.path)\n- except HDFSCliError:\n- pass\n+ if exists(self.path):\n+ remove(self.path)\n+ except Exception as ex:\n+ if isinstance(ex, HDFSCliError) or ex.args[0].contains(\"FileNotFoundException\"):\n+ pass\n+ else:\n+ raise ex\n if not all(result['result'] for result in rename(self.tmppath, self.path) or []):\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n", "issue": "HdfsTarget is not workging in luigi > 2.6.1\nThis code runs correct under luigi <= 2.6.1:\r\n\r\n```python\r\nfrom luigi.contrib import hdfs\r\nimport luigi.format\r\n\r\n# just to be sure that realy the right version is used...\r\nimport pkg_resources\r\nprint \"luigi ==\", pkg_resources.get_distribution(\"luigi\").version\r\nprint \"snakebite ==\", pkg_resources.get_distribution(\"snakebite\").version\r\n\r\ndestination_file = '/tmp/test/file.gz'\r\n\r\ntarget = hdfs.HdfsTarget(path=destination_file, format=luigi.format.Gzip)\r\n\r\nif target.exists():\r\n target.remove(skip_trash=False)\r\n\r\nfsobj = target.open('w')\r\nfsobj.write('lol3\\n')\r\nfsobj.close()\r\n```\r\n\r\nwith luigi 2.6.2 or 2.7.0 it breaks:\r\n```python\r\nluigi == 2.7.0\r\nsnakebite == 2.11.0\r\nTraceback (most recent call last):\r\n File \"/opt/tests/hdfs_target.py\", line 18, in <module>\r\n fsobj.close()\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/luigi/format.py\", line 224, in close\r\n self._output_pipe.close()\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/luigi/contrib/hdfs/format.py\", line 51, in close\r\n remove(self.path)\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/luigi/contrib/hdfs/clients.py\", line 62, in result\r\n return getattr(get_autoconfig_client(), method_name)(*args, **kwargs)\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/luigi/contrib/hdfs/snakebite_client.py\", line 140, in remove\r\n return list(self.get_bite().delete(self.list_path(path), recurse=recursive))\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/snakebite/client.py\", line 1540, in wrapped\r\n yield results.next()\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/snakebite/client.py\", line 508, in delete\r\n for item in self._find_items(paths, processor, include_toplevel=True):\r\n File \"/opt/python-2.7.10/lib/python2.7/site-packages/snakebite/client.py\", line 1216, in _find_items\r\n raise FileNotFoundException(\"`%s': No such file or directory\" % path)\r\nsnakebite.errors.FileNotFoundException: `/tmp/test/file.gz': No such file or directory\r\n```\r\n\r\nclient.cfg:\r\n```\r\n[hdfs]\r\nclient = snakebite\r\nsnakebite_autoconfig = True\r\ntmp_dir: /x/tmp\r\n```\r\n\r\nThe file is correct in form and content written in \"tmp_dir\" (/x/tmp/username/test/file.gz-luigitemp-951771388).\n", "before_files": [{"content": "import logging\nimport os\n\nimport luigi.format\nfrom luigi.contrib.hdfs.config import load_hadoop_cmd\nfrom luigi.contrib.hdfs import config as hdfs_config\nfrom luigi.contrib.hdfs.clients import remove, rename, mkdir, listdir, exists\nfrom luigi.contrib.hdfs.error import HDFSCliError\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass HdfsAtomicWriteError(IOError):\n pass\n\n\nclass HdfsReadPipe(luigi.format.InputPipeProcessWrapper):\n\n def __init__(self, path):\n super(HdfsReadPipe, self).__init__(load_hadoop_cmd() + ['fs', '-cat', path])\n\n\nclass HdfsAtomicWritePipe(luigi.format.OutputPipeProcessWrapper):\n \"\"\"\n File like object for writing to HDFS\n\n The referenced file is first written to a temporary location and then\n renamed to final location on close(). If close() isn't called\n the temporary file will be cleaned up when this object is\n garbage collected\n\n TODO: if this is buggy, change it so it first writes to a\n local temporary file and then uploads it on completion\n \"\"\"\n\n def __init__(self, path):\n self.path = path\n self.tmppath = hdfs_config.tmppath(self.path)\n parent_dir = os.path.dirname(self.tmppath)\n mkdir(parent_dir, parents=True, raise_if_exists=False)\n super(HdfsAtomicWritePipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.tmppath])\n\n def abort(self):\n logger.info(\"Aborting %s('%s'). Removing temporary file '%s'\",\n self.__class__.__name__, self.path, self.tmppath)\n super(HdfsAtomicWritePipe, self).abort()\n remove(self.tmppath, skip_trash=True)\n\n def close(self):\n super(HdfsAtomicWritePipe, self).close()\n try:\n remove(self.path)\n except HDFSCliError:\n pass\n if not all(result['result'] for result in rename(self.tmppath, self.path) or []):\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n\n\nclass HdfsAtomicWriteDirPipe(luigi.format.OutputPipeProcessWrapper):\n \"\"\"\n Writes a data<data_extension> file to a directory at <path>.\n \"\"\"\n\n def __init__(self, path, data_extension=\"\"):\n self.path = path\n self.tmppath = hdfs_config.tmppath(self.path)\n self.datapath = self.tmppath + (\"/data%s\" % data_extension)\n super(HdfsAtomicWriteDirPipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.datapath])\n\n def abort(self):\n logger.info(\"Aborting %s('%s'). Removing temporary dir '%s'\",\n self.__class__.__name__, self.path, self.tmppath)\n super(HdfsAtomicWriteDirPipe, self).abort()\n remove(self.tmppath, skip_trash=True)\n\n def close(self):\n super(HdfsAtomicWriteDirPipe, self).close()\n try:\n if exists(self.path):\n remove(self.path)\n except Exception as ex:\n if isinstance(ex, HDFSCliError) or ex.args[0].contains(\"FileNotFoundException\"):\n pass\n else:\n raise ex\n\n # it's unlikely to fail in this way but better safe than sorry\n if not all(result['result'] for result in rename(self.tmppath, self.path) or []):\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n\n if os.path.basename(self.tmppath) in map(os.path.basename, listdir(self.path)):\n remove(self.path)\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n\n\nclass PlainFormat(luigi.format.Format):\n\n input = 'bytes'\n output = 'hdfs'\n\n def hdfs_writer(self, path):\n return self.pipe_writer(path)\n\n def hdfs_reader(self, path):\n return self.pipe_reader(path)\n\n def pipe_reader(self, path):\n return HdfsReadPipe(path)\n\n def pipe_writer(self, output_pipe):\n return HdfsAtomicWritePipe(output_pipe)\n\n\nclass PlainDirFormat(luigi.format.Format):\n\n input = 'bytes'\n output = 'hdfs'\n\n def hdfs_writer(self, path):\n return self.pipe_writer(path)\n\n def hdfs_reader(self, path):\n return self.pipe_reader(path)\n\n def pipe_reader(self, path):\n # exclude underscore-prefixedfiles/folders (created by MapReduce)\n return HdfsReadPipe(\"%s/[^_]*\" % path)\n\n def pipe_writer(self, path):\n return HdfsAtomicWriteDirPipe(path)\n\n\nPlain = PlainFormat()\nPlainDir = PlainDirFormat()\n\n\nclass CompatibleHdfsFormat(luigi.format.Format):\n\n output = 'hdfs'\n\n def __init__(self, writer, reader, input=None):\n if input is not None:\n self.input = input\n\n self.reader = reader\n self.writer = writer\n\n def pipe_writer(self, output):\n return self.writer(output)\n\n def pipe_reader(self, input):\n return self.reader(input)\n\n def hdfs_writer(self, output):\n return self.writer(output)\n\n def hdfs_reader(self, input):\n return self.reader(input)\n\n # __getstate__/__setstate__ needed for pickling, because self.reader and\n # self.writer may be unpickleable instance methods of another format class.\n # This was mainly to support pickling of standard HdfsTarget instances.\n\n def __getstate__(self):\n d = self.__dict__.copy()\n for attr in ('reader', 'writer'):\n method = getattr(self, attr)\n try:\n # if instance method, pickle instance and method name\n d[attr] = method.__self__, method.__func__.__name__\n except AttributeError:\n pass # not an instance method\n return d\n\n def __setstate__(self, d):\n self.__dict__ = d\n for attr in ('reader', 'writer'):\n try:\n method_self, method_name = d[attr]\n except ValueError:\n continue\n method = getattr(method_self, method_name)\n setattr(self, attr, method)\n", "path": "luigi/contrib/hdfs/format.py"}], "after_files": [{"content": "import logging\nimport os\n\nimport luigi.format\nfrom luigi.contrib.hdfs.config import load_hadoop_cmd\nfrom luigi.contrib.hdfs import config as hdfs_config\nfrom luigi.contrib.hdfs.clients import remove, rename, mkdir, listdir, exists\nfrom luigi.contrib.hdfs.error import HDFSCliError\n\nlogger = logging.getLogger('luigi-interface')\n\n\nclass HdfsAtomicWriteError(IOError):\n pass\n\n\nclass HdfsReadPipe(luigi.format.InputPipeProcessWrapper):\n\n def __init__(self, path):\n super(HdfsReadPipe, self).__init__(load_hadoop_cmd() + ['fs', '-cat', path])\n\n\nclass HdfsAtomicWritePipe(luigi.format.OutputPipeProcessWrapper):\n \"\"\"\n File like object for writing to HDFS\n\n The referenced file is first written to a temporary location and then\n renamed to final location on close(). If close() isn't called\n the temporary file will be cleaned up when this object is\n garbage collected\n\n TODO: if this is buggy, change it so it first writes to a\n local temporary file and then uploads it on completion\n \"\"\"\n\n def __init__(self, path):\n self.path = path\n self.tmppath = hdfs_config.tmppath(self.path)\n parent_dir = os.path.dirname(self.tmppath)\n mkdir(parent_dir, parents=True, raise_if_exists=False)\n super(HdfsAtomicWritePipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.tmppath])\n\n def abort(self):\n logger.info(\"Aborting %s('%s'). Removing temporary file '%s'\",\n self.__class__.__name__, self.path, self.tmppath)\n super(HdfsAtomicWritePipe, self).abort()\n remove(self.tmppath, skip_trash=True)\n\n def close(self):\n super(HdfsAtomicWritePipe, self).close()\n try:\n if exists(self.path):\n remove(self.path)\n except Exception as ex:\n if isinstance(ex, HDFSCliError) or ex.args[0].contains(\"FileNotFoundException\"):\n pass\n else:\n raise ex\n if not all(result['result'] for result in rename(self.tmppath, self.path) or []):\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n\n\nclass HdfsAtomicWriteDirPipe(luigi.format.OutputPipeProcessWrapper):\n \"\"\"\n Writes a data<data_extension> file to a directory at <path>.\n \"\"\"\n\n def __init__(self, path, data_extension=\"\"):\n self.path = path\n self.tmppath = hdfs_config.tmppath(self.path)\n self.datapath = self.tmppath + (\"/data%s\" % data_extension)\n super(HdfsAtomicWriteDirPipe, self).__init__(load_hadoop_cmd() + ['fs', '-put', '-', self.datapath])\n\n def abort(self):\n logger.info(\"Aborting %s('%s'). Removing temporary dir '%s'\",\n self.__class__.__name__, self.path, self.tmppath)\n super(HdfsAtomicWriteDirPipe, self).abort()\n remove(self.tmppath, skip_trash=True)\n\n def close(self):\n super(HdfsAtomicWriteDirPipe, self).close()\n try:\n if exists(self.path):\n remove(self.path)\n except Exception as ex:\n if isinstance(ex, HDFSCliError) or ex.args[0].contains(\"FileNotFoundException\"):\n pass\n else:\n raise ex\n\n # it's unlikely to fail in this way but better safe than sorry\n if not all(result['result'] for result in rename(self.tmppath, self.path) or []):\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n\n if os.path.basename(self.tmppath) in map(os.path.basename, listdir(self.path)):\n remove(self.path)\n raise HdfsAtomicWriteError('Atomic write to {} failed'.format(self.path))\n\n\nclass PlainFormat(luigi.format.Format):\n\n input = 'bytes'\n output = 'hdfs'\n\n def hdfs_writer(self, path):\n return self.pipe_writer(path)\n\n def hdfs_reader(self, path):\n return self.pipe_reader(path)\n\n def pipe_reader(self, path):\n return HdfsReadPipe(path)\n\n def pipe_writer(self, output_pipe):\n return HdfsAtomicWritePipe(output_pipe)\n\n\nclass PlainDirFormat(luigi.format.Format):\n\n input = 'bytes'\n output = 'hdfs'\n\n def hdfs_writer(self, path):\n return self.pipe_writer(path)\n\n def hdfs_reader(self, path):\n return self.pipe_reader(path)\n\n def pipe_reader(self, path):\n # exclude underscore-prefixedfiles/folders (created by MapReduce)\n return HdfsReadPipe(\"%s/[^_]*\" % path)\n\n def pipe_writer(self, path):\n return HdfsAtomicWriteDirPipe(path)\n\n\nPlain = PlainFormat()\nPlainDir = PlainDirFormat()\n\n\nclass CompatibleHdfsFormat(luigi.format.Format):\n\n output = 'hdfs'\n\n def __init__(self, writer, reader, input=None):\n if input is not None:\n self.input = input\n\n self.reader = reader\n self.writer = writer\n\n def pipe_writer(self, output):\n return self.writer(output)\n\n def pipe_reader(self, input):\n return self.reader(input)\n\n def hdfs_writer(self, output):\n return self.writer(output)\n\n def hdfs_reader(self, input):\n return self.reader(input)\n\n # __getstate__/__setstate__ needed for pickling, because self.reader and\n # self.writer may be unpickleable instance methods of another format class.\n # This was mainly to support pickling of standard HdfsTarget instances.\n\n def __getstate__(self):\n d = self.__dict__.copy()\n for attr in ('reader', 'writer'):\n method = getattr(self, attr)\n try:\n # if instance method, pickle instance and method name\n d[attr] = method.__self__, method.__func__.__name__\n except AttributeError:\n pass # not an instance method\n return d\n\n def __setstate__(self, d):\n self.__dict__ = d\n for attr in ('reader', 'writer'):\n try:\n method_self, method_name = d[attr]\n except ValueError:\n continue\n method = getattr(method_self, method_name)\n setattr(self, attr, method)\n", "path": "luigi/contrib/hdfs/format.py"}]}
| 2,760 | 194 |
gh_patches_debug_67390
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-4675
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Proxy Provider not working
Hello
Please help me, I updated the authentik server to 23.1.2, it worked perfectly until now, now the Proxy Provider is not working because of the following error
this is in the server log
{"error":"Post \"https://auth.xxx.com/application/o/token/\": dial tcp 192.168.10.240:443: connect: connection refused","event":"failed to redeem code","level":"warning","logger":"authentik.outpost.proxyv2.application","name":"Kuma","timestamp":"2023-01-24T13:01:34Z"}
The IP in the log is the IP of the nginx reverse proxy manager. The proxy works properly, I don't see any errors. Anyone have any ideas?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/core/tasks.py`
Content:
```
1 """authentik core tasks"""
2 from datetime import datetime, timedelta
3
4 from django.contrib.sessions.backends.cache import KEY_PREFIX
5 from django.core.cache import cache
6 from django.utils.timezone import now
7 from structlog.stdlib import get_logger
8
9 from authentik.core.models import (
10 USER_ATTRIBUTE_EXPIRES,
11 USER_ATTRIBUTE_GENERATED,
12 AuthenticatedSession,
13 ExpiringModel,
14 User,
15 )
16 from authentik.events.monitored_tasks import (
17 MonitoredTask,
18 TaskResult,
19 TaskResultStatus,
20 prefill_task,
21 )
22 from authentik.root.celery import CELERY_APP
23
24 LOGGER = get_logger()
25
26
27 @CELERY_APP.task(bind=True, base=MonitoredTask)
28 @prefill_task
29 def clean_expired_models(self: MonitoredTask):
30 """Remove expired objects"""
31 messages = []
32 for cls in ExpiringModel.__subclasses__():
33 cls: ExpiringModel
34 objects = (
35 cls.objects.all().exclude(expiring=False).exclude(expiring=True, expires__gt=now())
36 )
37 amount = objects.count()
38 for obj in objects:
39 obj.expire_action()
40 LOGGER.debug("Expired models", model=cls, amount=amount)
41 messages.append(f"Expired {amount} {cls._meta.verbose_name_plural}")
42 # Special case
43 amount = 0
44 for session in AuthenticatedSession.objects.all():
45 cache_key = f"{KEY_PREFIX}{session.session_key}"
46 try:
47 value = cache.get(cache_key)
48 # pylint: disable=broad-except
49 except Exception as exc:
50 LOGGER.debug("Failed to get session from cache", exc=exc)
51 if not value:
52 session.delete()
53 amount += 1
54 LOGGER.debug("Expired sessions", model=AuthenticatedSession, amount=amount)
55 messages.append(f"Expired {amount} {AuthenticatedSession._meta.verbose_name_plural}")
56 self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))
57
58
59 @CELERY_APP.task(bind=True, base=MonitoredTask)
60 @prefill_task
61 def clean_temporary_users(self: MonitoredTask):
62 """Remove temporary users created by SAML Sources"""
63 _now = datetime.now()
64 messages = []
65 deleted_users = 0
66 for user in User.objects.filter(**{f"attributes__{USER_ATTRIBUTE_GENERATED}": True}):
67 if not user.attributes.get(USER_ATTRIBUTE_EXPIRES):
68 continue
69 delta: timedelta = _now - datetime.fromtimestamp(
70 user.attributes.get(USER_ATTRIBUTE_EXPIRES)
71 )
72 if delta.total_seconds() > 0:
73 LOGGER.debug("User is expired and will be deleted.", user=user, delta=delta)
74 user.delete()
75 deleted_users += 1
76 messages.append(f"Successfully deleted {deleted_users} users.")
77 self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/authentik/core/tasks.py b/authentik/core/tasks.py
--- a/authentik/core/tasks.py
+++ b/authentik/core/tasks.py
@@ -43,6 +43,7 @@
amount = 0
for session in AuthenticatedSession.objects.all():
cache_key = f"{KEY_PREFIX}{session.session_key}"
+ value = None
try:
value = cache.get(cache_key)
# pylint: disable=broad-except
|
{"golden_diff": "diff --git a/authentik/core/tasks.py b/authentik/core/tasks.py\n--- a/authentik/core/tasks.py\n+++ b/authentik/core/tasks.py\n@@ -43,6 +43,7 @@\n amount = 0\n for session in AuthenticatedSession.objects.all():\n cache_key = f\"{KEY_PREFIX}{session.session_key}\"\n+ value = None\n try:\n value = cache.get(cache_key)\n # pylint: disable=broad-except\n", "issue": "Proxy Provider not working \nHello\r\n\r\nPlease help me, I updated the authentik server to 23.1.2, it worked perfectly until now, now the Proxy Provider is not working because of the following error\r\n\r\nthis is in the server log\r\n\r\n{\"error\":\"Post \\\"https://auth.xxx.com/application/o/token/\\\": dial tcp 192.168.10.240:443: connect: connection refused\",\"event\":\"failed to redeem code\",\"level\":\"warning\",\"logger\":\"authentik.outpost.proxyv2.application\",\"name\":\"Kuma\",\"timestamp\":\"2023-01-24T13:01:34Z\"}\r\n\r\nThe IP in the log is the IP of the nginx reverse proxy manager. The proxy works properly, I don't see any errors. Anyone have any ideas?\r\n\n", "before_files": [{"content": "\"\"\"authentik core tasks\"\"\"\nfrom datetime import datetime, timedelta\n\nfrom django.contrib.sessions.backends.cache import KEY_PREFIX\nfrom django.core.cache import cache\nfrom django.utils.timezone import now\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import (\n USER_ATTRIBUTE_EXPIRES,\n USER_ATTRIBUTE_GENERATED,\n AuthenticatedSession,\n ExpiringModel,\n User,\n)\nfrom authentik.events.monitored_tasks import (\n MonitoredTask,\n TaskResult,\n TaskResultStatus,\n prefill_task,\n)\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger()\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\n@prefill_task\ndef clean_expired_models(self: MonitoredTask):\n \"\"\"Remove expired objects\"\"\"\n messages = []\n for cls in ExpiringModel.__subclasses__():\n cls: ExpiringModel\n objects = (\n cls.objects.all().exclude(expiring=False).exclude(expiring=True, expires__gt=now())\n )\n amount = objects.count()\n for obj in objects:\n obj.expire_action()\n LOGGER.debug(\"Expired models\", model=cls, amount=amount)\n messages.append(f\"Expired {amount} {cls._meta.verbose_name_plural}\")\n # Special case\n amount = 0\n for session in AuthenticatedSession.objects.all():\n cache_key = f\"{KEY_PREFIX}{session.session_key}\"\n try:\n value = cache.get(cache_key)\n # pylint: disable=broad-except\n except Exception as exc:\n LOGGER.debug(\"Failed to get session from cache\", exc=exc)\n if not value:\n session.delete()\n amount += 1\n LOGGER.debug(\"Expired sessions\", model=AuthenticatedSession, amount=amount)\n messages.append(f\"Expired {amount} {AuthenticatedSession._meta.verbose_name_plural}\")\n self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\n@prefill_task\ndef clean_temporary_users(self: MonitoredTask):\n \"\"\"Remove temporary users created by SAML Sources\"\"\"\n _now = datetime.now()\n messages = []\n deleted_users = 0\n for user in User.objects.filter(**{f\"attributes__{USER_ATTRIBUTE_GENERATED}\": True}):\n if not user.attributes.get(USER_ATTRIBUTE_EXPIRES):\n continue\n delta: timedelta = _now - datetime.fromtimestamp(\n user.attributes.get(USER_ATTRIBUTE_EXPIRES)\n )\n if delta.total_seconds() > 0:\n LOGGER.debug(\"User is expired and will be deleted.\", user=user, delta=delta)\n user.delete()\n deleted_users += 1\n messages.append(f\"Successfully deleted {deleted_users} users.\")\n self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))\n", "path": "authentik/core/tasks.py"}], "after_files": [{"content": "\"\"\"authentik core tasks\"\"\"\nfrom datetime import datetime, timedelta\n\nfrom django.contrib.sessions.backends.cache import KEY_PREFIX\nfrom django.core.cache import cache\nfrom django.utils.timezone import now\nfrom structlog.stdlib import get_logger\n\nfrom authentik.core.models import (\n USER_ATTRIBUTE_EXPIRES,\n USER_ATTRIBUTE_GENERATED,\n AuthenticatedSession,\n ExpiringModel,\n User,\n)\nfrom authentik.events.monitored_tasks import (\n MonitoredTask,\n TaskResult,\n TaskResultStatus,\n prefill_task,\n)\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger()\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\n@prefill_task\ndef clean_expired_models(self: MonitoredTask):\n \"\"\"Remove expired objects\"\"\"\n messages = []\n for cls in ExpiringModel.__subclasses__():\n cls: ExpiringModel\n objects = (\n cls.objects.all().exclude(expiring=False).exclude(expiring=True, expires__gt=now())\n )\n amount = objects.count()\n for obj in objects:\n obj.expire_action()\n LOGGER.debug(\"Expired models\", model=cls, amount=amount)\n messages.append(f\"Expired {amount} {cls._meta.verbose_name_plural}\")\n # Special case\n amount = 0\n for session in AuthenticatedSession.objects.all():\n cache_key = f\"{KEY_PREFIX}{session.session_key}\"\n value = None\n try:\n value = cache.get(cache_key)\n # pylint: disable=broad-except\n except Exception as exc:\n LOGGER.debug(\"Failed to get session from cache\", exc=exc)\n if not value:\n session.delete()\n amount += 1\n LOGGER.debug(\"Expired sessions\", model=AuthenticatedSession, amount=amount)\n messages.append(f\"Expired {amount} {AuthenticatedSession._meta.verbose_name_plural}\")\n self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))\n\n\n@CELERY_APP.task(bind=True, base=MonitoredTask)\n@prefill_task\ndef clean_temporary_users(self: MonitoredTask):\n \"\"\"Remove temporary users created by SAML Sources\"\"\"\n _now = datetime.now()\n messages = []\n deleted_users = 0\n for user in User.objects.filter(**{f\"attributes__{USER_ATTRIBUTE_GENERATED}\": True}):\n if not user.attributes.get(USER_ATTRIBUTE_EXPIRES):\n continue\n delta: timedelta = _now - datetime.fromtimestamp(\n user.attributes.get(USER_ATTRIBUTE_EXPIRES)\n )\n if delta.total_seconds() > 0:\n LOGGER.debug(\"User is expired and will be deleted.\", user=user, delta=delta)\n user.delete()\n deleted_users += 1\n messages.append(f\"Successfully deleted {deleted_users} users.\")\n self.set_status(TaskResult(TaskResultStatus.SUCCESSFUL, messages))\n", "path": "authentik/core/tasks.py"}]}
| 1,184 | 104 |
gh_patches_debug_3477
|
rasdani/github-patches
|
git_diff
|
opentensor__bittensor-969
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bit 590 backward fix
- Keep count of remote loss on server, saving model based on most 20 recent average loss.
- When doing tokenization remap, make sure the input size and output size is the same.
- Ensure encode_forward_causallmnext deterministic by set seed.
- When both local_train and remote_train are on: do local_train only when the server is free.
- Validator default to do backward train
Minor fixes
1) Removes the parser generation on the config with messes with --help when using a parser
2) Turns off console-rich local logging (which sucks)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bittensor/__init__.py`
Content:
```
1 # The MIT License (MIT)
2 # Copyright © 2021 Yuma Rao
3
4 # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
5 # documentation files (the “Software”), to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
7 # and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
8
9 # The above copyright notice and this permission notice shall be included in all copies or substantial portions of
10 # the Software.
11
12 # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
13 # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
14 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
15 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
16 # DEALINGS IN THE SOFTWARE.
17
18 from rich.console import Console
19 from rich.traceback import install
20 from prometheus_client import Info
21
22 import nest_asyncio
23 nest_asyncio.apply()
24
25 # Bittensor code and protocol version.
26 __version__ = '3.4.1'
27 version_split = __version__.split(".")
28 __version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))
29
30
31 # Turn off rich console locals trace.
32 from rich.traceback import install
33 install(show_locals=False)
34
35 # Rich console.
36 __console__ = Console()
37 __use_console__ = True
38
39 # Remove overdue locals in debug training.
40 install(show_locals=False)
41
42 def turn_console_off():
43 from io import StringIO
44 __use_console__ = False
45 __console__ = Console(file=StringIO(), stderr=False)
46
47
48
49 # Vocabulary dimension.
50 #__vocab_size__ = len( tokenizer ) + len( tokenizer.additional_special_tokens) + 100 # Plus 100 for eventual token size increase.
51 __vocab_size__ = 50258
52
53 # Tensor dimension.
54 # NOTE (const): if/when this increases peers must be responsible for trimming or expanding output to this size.
55 __network_dim__ = 1024 # All network responses have shape = [ __batch_size__, __sequence_dim__, __network_dim__ ]
56
57 # Substrate chain block time (seconds).
58 __blocktime__ = 12
59
60 # Pip address for versioning
61 __pipaddress__ = 'https://pypi.org/pypi/bittensor/json'
62
63 # Substrate ss58_format
64 __ss58_format__ = 42
65
66 # Wallet ss58 address length
67 __ss58_address_length__ = 48
68
69 __networks__ = [ 'local', 'bellagene', 'nobunaga', 'nakamoto']
70
71 __datasets__ = ['ArXiv', 'BookCorpus2', 'Books3', 'DMMathematics', 'EnronEmails', 'EuroParl', 'Gutenberg_PG', 'HackerNews', 'NIHExPorter', 'OpenSubtitles', 'PhilPapers', 'UbuntuIRC', 'YoutubeSubtitles']
72
73 __nakamoto_entrypoint__ = "AtreusLB-2c6154f73e6429a9.elb.us-east-2.amazonaws.com:9944"
74
75
76 __nobunaga_entrypoint__ = "staging.nobunaga.opentensor.ai:9944"
77
78 # Needs to use wss://
79 __bellagene_entrypoint__ = "wss://parachain.opentensor.ai:443"
80
81
82 __local_entrypoint__ = "127.0.0.1:9944"
83
84
85 # Avoid collisions with other processes
86 from .utils.test_utils import get_random_unused_port
87 mock_subtensor_port = get_random_unused_port()
88 __mock_entrypoint__ = f"localhost:{mock_subtensor_port}"
89
90
91 # --- Prometheus ---
92 __prometheus_version__ = "0.1.0"
93 prometheus_version__split = __prometheus_version__.split(".")
94 __prometheus_version__as_int__ = (100 * int(prometheus_version__split[0])) + (10 * int(prometheus_version__split[1])) + (1 * int(prometheus_version__split[2]))
95 try:
96 bt_promo_info = Info("bittensor_info", "Information about the installed bittensor package.")
97 bt_promo_info.info (
98 {
99 '__version__': str(__version__),
100 '__version_as_int__': str(__version_as_int__),
101 '__vocab_size__': str(__vocab_size__),
102 '__network_dim__': str(__network_dim__),
103 '__blocktime__': str(__blocktime__),
104 '__prometheus_version__': str(__prometheus_version__),
105 '__prometheus_version__as_int__': str(__prometheus_version__as_int__),
106 }
107 )
108 except ValueError:
109 # This can silently fail if we import bittensor twice in the same process.
110 # We simply pass over this error.
111 pass
112
113 # ---- Config ----
114 from bittensor._config import config as config
115
116 # ---- LOGGING ----
117 from bittensor._logging import logging as logging
118
119 # ---- Protos ----
120 import bittensor._proto.bittensor_pb2 as proto
121 import bittensor._proto.bittensor_pb2_grpc as grpc
122
123 # ---- Neurons ----
124 import bittensor._neuron as neurons
125
126 # ---- Utils ----
127 from bittensor.utils import unbiased_topk as unbiased_topk
128
129 # ---- Factories -----
130 from bittensor.utils.balance import Balance as Balance
131 from bittensor._cli import cli as cli
132 from bittensor._axon import axon as axon
133 from bittensor._wallet import wallet as wallet
134 from bittensor._keyfile import keyfile as keyfile
135 from bittensor._receptor import receptor as receptor
136 from bittensor._endpoint import endpoint as endpoint
137 from bittensor._dendrite import dendrite as dendrite
138 from bittensor._metagraph import metagraph as metagraph
139 from bittensor._prometheus import prometheus as prometheus
140 from bittensor._subtensor import subtensor as subtensor
141 from bittensor._tokenizer import tokenizer as tokenizer
142 from bittensor._serializer import serializer as serializer
143 from bittensor._synapse import synapse as synapse
144 from bittensor._dataset import dataset as dataset
145 from bittensor._receptor import receptor_pool as receptor_pool
146 from bittensor._wandb import wandb as wandb
147 from bittensor._threadpool import prioritythreadpool as prioritythreadpool
148
149 # ---- Classes -----
150 from bittensor._cli.cli_impl import CLI as CLI
151 from bittensor._axon.axon_impl import Axon as Axon
152 from bittensor._config.config_impl import Config as Config
153 from bittensor._wallet.wallet_impl import Wallet as Wallet
154 from bittensor._keyfile.keyfile_impl import Keyfile as Keyfile
155 from bittensor._receptor.receptor_impl import Receptor as Receptor
156 from bittensor._endpoint.endpoint_impl import Endpoint as Endpoint
157 from bittensor._dendrite.dendrite_impl import Dendrite as Dendrite
158 from bittensor._metagraph.metagraph_impl import Metagraph as Metagraph
159 from bittensor._subtensor.subtensor_impl import Subtensor as Subtensor
160 from bittensor._serializer.serializer_impl import Serializer as Serializer
161 from bittensor._dataset.dataset_impl import Dataset as Dataset
162 from bittensor._receptor.receptor_pool_impl import ReceptorPool as ReceptorPool
163 from bittensor._threadpool.priority_thread_pool_impl import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor
164 from bittensor._ipfs.ipfs_impl import Ipfs as Ipfs
165 from bittensor._synapse.synapse_impl import Synapse as Synapse
166 from bittensor._synapse.text_causallm_impl import TextCausalLM as TextCausalLM
167 from bittensor._synapse.text_causallmnext_impl import TextCausalLMNext as TextCausalLMNext
168 from bittensor._synapse.text_lasthiddenstate_impl import TextLastHiddenState as TextLastHiddenState
169 from bittensor._synapse.text_seq2seq_impl import TextSeq2Seq as TextSeq2Seq
170
171 # DEFAULTS
172 defaults = Config()
173 subtensor.add_defaults( defaults )
174 dendrite.add_defaults( defaults )
175 axon.add_defaults( defaults )
176 prometheus.add_defaults( defaults )
177 wallet.add_defaults( defaults )
178 dataset.add_defaults( defaults )
179 wandb.add_defaults( defaults )
180 logging.add_defaults( defaults )
181
182 from substrateinterface import Keypair as Keypair
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bittensor/__init__.py b/bittensor/__init__.py
--- a/bittensor/__init__.py
+++ b/bittensor/__init__.py
@@ -23,7 +23,7 @@
nest_asyncio.apply()
# Bittensor code and protocol version.
-__version__ = '3.4.1'
+__version__ = '3.4.2'
version_split = __version__.split(".")
__version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))
|
{"golden_diff": "diff --git a/bittensor/__init__.py b/bittensor/__init__.py\n--- a/bittensor/__init__.py\n+++ b/bittensor/__init__.py\n@@ -23,7 +23,7 @@\n nest_asyncio.apply()\n \n # Bittensor code and protocol version.\n-__version__ = '3.4.1'\n+__version__ = '3.4.2'\n version_split = __version__.split(\".\")\n __version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))\n", "issue": "Bit 590 backward fix\n- Keep count of remote loss on server, saving model based on most 20 recent average loss.\r\n- When doing tokenization remap, make sure the input size and output size is the same.\r\n- Ensure encode_forward_causallmnext deterministic by set seed.\r\n- When both local_train and remote_train are on: do local_train only when the server is free.\r\n- Validator default to do backward train\nMinor fixes\n1) Removes the parser generation on the config with messes with --help when using a parser\r\n2) Turns off console-rich local logging (which sucks)\n", "before_files": [{"content": "# The MIT License (MIT)\n# Copyright \u00a9 2021 Yuma Rao\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated \n# documentation files (the \u201cSoftware\u201d), to deal in the Software without restriction, including without limitation \n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of \n# the Software.\n\n# THE SOFTWARE IS PROVIDED \u201cAS IS\u201d, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION \n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n# DEALINGS IN THE SOFTWARE.\n\nfrom rich.console import Console\nfrom rich.traceback import install\nfrom prometheus_client import Info\n\nimport nest_asyncio\nnest_asyncio.apply()\n\n# Bittensor code and protocol version.\n__version__ = '3.4.1'\nversion_split = __version__.split(\".\")\n__version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))\n\n\n# Turn off rich console locals trace.\nfrom rich.traceback import install\ninstall(show_locals=False)\n\n# Rich console.\n__console__ = Console()\n__use_console__ = True\n\n# Remove overdue locals in debug training.\ninstall(show_locals=False)\n\ndef turn_console_off():\n from io import StringIO\n __use_console__ = False\n __console__ = Console(file=StringIO(), stderr=False)\n\n\n\n# Vocabulary dimension.\n#__vocab_size__ = len( tokenizer ) + len( tokenizer.additional_special_tokens) + 100 # Plus 100 for eventual token size increase.\n__vocab_size__ = 50258\n\n# Tensor dimension.\n# NOTE (const): if/when this increases peers must be responsible for trimming or expanding output to this size.\n__network_dim__ = 1024 # All network responses have shape = [ __batch_size__, __sequence_dim__, __network_dim__ ]\n\n# Substrate chain block time (seconds).\n__blocktime__ = 12\n\n# Pip address for versioning\n__pipaddress__ = 'https://pypi.org/pypi/bittensor/json'\n\n# Substrate ss58_format\n__ss58_format__ = 42\n\n# Wallet ss58 address length\n__ss58_address_length__ = 48\n\n__networks__ = [ 'local', 'bellagene', 'nobunaga', 'nakamoto']\n\n__datasets__ = ['ArXiv', 'BookCorpus2', 'Books3', 'DMMathematics', 'EnronEmails', 'EuroParl', 'Gutenberg_PG', 'HackerNews', 'NIHExPorter', 'OpenSubtitles', 'PhilPapers', 'UbuntuIRC', 'YoutubeSubtitles']\n\n__nakamoto_entrypoint__ = \"AtreusLB-2c6154f73e6429a9.elb.us-east-2.amazonaws.com:9944\"\n\n\n__nobunaga_entrypoint__ = \"staging.nobunaga.opentensor.ai:9944\"\n\n# Needs to use wss://\n__bellagene_entrypoint__ = \"wss://parachain.opentensor.ai:443\"\n\n\n__local_entrypoint__ = \"127.0.0.1:9944\"\n\n\n# Avoid collisions with other processes\nfrom .utils.test_utils import get_random_unused_port\nmock_subtensor_port = get_random_unused_port()\n__mock_entrypoint__ = f\"localhost:{mock_subtensor_port}\"\n\n\n# --- Prometheus ---\n__prometheus_version__ = \"0.1.0\"\nprometheus_version__split = __prometheus_version__.split(\".\")\n__prometheus_version__as_int__ = (100 * int(prometheus_version__split[0])) + (10 * int(prometheus_version__split[1])) + (1 * int(prometheus_version__split[2]))\ntry:\n bt_promo_info = Info(\"bittensor_info\", \"Information about the installed bittensor package.\")\n bt_promo_info.info ( \n {\n '__version__': str(__version__),\n '__version_as_int__': str(__version_as_int__),\n '__vocab_size__': str(__vocab_size__),\n '__network_dim__': str(__network_dim__),\n '__blocktime__': str(__blocktime__),\n '__prometheus_version__': str(__prometheus_version__),\n '__prometheus_version__as_int__': str(__prometheus_version__as_int__),\n } \n )\nexcept ValueError: \n # This can silently fail if we import bittensor twice in the same process.\n # We simply pass over this error. \n pass\n\n# ---- Config ----\nfrom bittensor._config import config as config\n\n# ---- LOGGING ----\nfrom bittensor._logging import logging as logging\n\n# ---- Protos ----\nimport bittensor._proto.bittensor_pb2 as proto\nimport bittensor._proto.bittensor_pb2_grpc as grpc\n\n# ---- Neurons ----\nimport bittensor._neuron as neurons\n\n# ---- Utils ----\nfrom bittensor.utils import unbiased_topk as unbiased_topk\n\n# ---- Factories -----\nfrom bittensor.utils.balance import Balance as Balance\nfrom bittensor._cli import cli as cli\nfrom bittensor._axon import axon as axon\nfrom bittensor._wallet import wallet as wallet\nfrom bittensor._keyfile import keyfile as keyfile\nfrom bittensor._receptor import receptor as receptor\nfrom bittensor._endpoint import endpoint as endpoint\nfrom bittensor._dendrite import dendrite as dendrite\nfrom bittensor._metagraph import metagraph as metagraph\nfrom bittensor._prometheus import prometheus as prometheus\nfrom bittensor._subtensor import subtensor as subtensor\nfrom bittensor._tokenizer import tokenizer as tokenizer\nfrom bittensor._serializer import serializer as serializer\nfrom bittensor._synapse import synapse as synapse \nfrom bittensor._dataset import dataset as dataset\nfrom bittensor._receptor import receptor_pool as receptor_pool\nfrom bittensor._wandb import wandb as wandb\nfrom bittensor._threadpool import prioritythreadpool as prioritythreadpool\n\n# ---- Classes -----\nfrom bittensor._cli.cli_impl import CLI as CLI\nfrom bittensor._axon.axon_impl import Axon as Axon\nfrom bittensor._config.config_impl import Config as Config\nfrom bittensor._wallet.wallet_impl import Wallet as Wallet\nfrom bittensor._keyfile.keyfile_impl import Keyfile as Keyfile\nfrom bittensor._receptor.receptor_impl import Receptor as Receptor\nfrom bittensor._endpoint.endpoint_impl import Endpoint as Endpoint\nfrom bittensor._dendrite.dendrite_impl import Dendrite as Dendrite\nfrom bittensor._metagraph.metagraph_impl import Metagraph as Metagraph\nfrom bittensor._subtensor.subtensor_impl import Subtensor as Subtensor\nfrom bittensor._serializer.serializer_impl import Serializer as Serializer\nfrom bittensor._dataset.dataset_impl import Dataset as Dataset\nfrom bittensor._receptor.receptor_pool_impl import ReceptorPool as ReceptorPool\nfrom bittensor._threadpool.priority_thread_pool_impl import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor\nfrom bittensor._ipfs.ipfs_impl import Ipfs as Ipfs\nfrom bittensor._synapse.synapse_impl import Synapse as Synapse\nfrom bittensor._synapse.text_causallm_impl import TextCausalLM as TextCausalLM\nfrom bittensor._synapse.text_causallmnext_impl import TextCausalLMNext as TextCausalLMNext\nfrom bittensor._synapse.text_lasthiddenstate_impl import TextLastHiddenState as TextLastHiddenState\nfrom bittensor._synapse.text_seq2seq_impl import TextSeq2Seq as TextSeq2Seq\n\n# DEFAULTS\ndefaults = Config()\nsubtensor.add_defaults( defaults )\ndendrite.add_defaults( defaults )\naxon.add_defaults( defaults )\nprometheus.add_defaults( defaults )\nwallet.add_defaults( defaults )\ndataset.add_defaults( defaults )\nwandb.add_defaults( defaults )\nlogging.add_defaults( defaults )\n\nfrom substrateinterface import Keypair as Keypair\n", "path": "bittensor/__init__.py"}], "after_files": [{"content": "# The MIT License (MIT)\n# Copyright \u00a9 2021 Yuma Rao\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated \n# documentation files (the \u201cSoftware\u201d), to deal in the Software without restriction, including without limitation \n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of \n# the Software.\n\n# THE SOFTWARE IS PROVIDED \u201cAS IS\u201d, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION \n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n# DEALINGS IN THE SOFTWARE.\n\nfrom rich.console import Console\nfrom rich.traceback import install\nfrom prometheus_client import Info\n\nimport nest_asyncio\nnest_asyncio.apply()\n\n# Bittensor code and protocol version.\n__version__ = '3.4.2'\nversion_split = __version__.split(\".\")\n__version_as_int__ = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))\n\n\n# Turn off rich console locals trace.\nfrom rich.traceback import install\ninstall(show_locals=False)\n\n# Rich console.\n__console__ = Console()\n__use_console__ = True\n\n# Remove overdue locals in debug training.\ninstall(show_locals=False)\n\ndef turn_console_off():\n from io import StringIO\n __use_console__ = False\n __console__ = Console(file=StringIO(), stderr=False)\n\n\n\n# Vocabulary dimension.\n#__vocab_size__ = len( tokenizer ) + len( tokenizer.additional_special_tokens) + 100 # Plus 100 for eventual token size increase.\n__vocab_size__ = 50258\n\n# Tensor dimension.\n# NOTE (const): if/when this increases peers must be responsible for trimming or expanding output to this size.\n__network_dim__ = 1024 # All network responses have shape = [ __batch_size__, __sequence_dim__, __network_dim__ ]\n\n# Substrate chain block time (seconds).\n__blocktime__ = 12\n\n# Pip address for versioning\n__pipaddress__ = 'https://pypi.org/pypi/bittensor/json'\n\n# Substrate ss58_format\n__ss58_format__ = 42\n\n# Wallet ss58 address length\n__ss58_address_length__ = 48\n\n__networks__ = [ 'local', 'bellagene', 'nobunaga', 'nakamoto']\n\n__datasets__ = ['ArXiv', 'BookCorpus2', 'Books3', 'DMMathematics', 'EnronEmails', 'EuroParl', 'Gutenberg_PG', 'HackerNews', 'NIHExPorter', 'OpenSubtitles', 'PhilPapers', 'UbuntuIRC', 'YoutubeSubtitles']\n\n__nakamoto_entrypoint__ = \"AtreusLB-2c6154f73e6429a9.elb.us-east-2.amazonaws.com:9944\"\n\n\n__nobunaga_entrypoint__ = \"staging.nobunaga.opentensor.ai:9944\"\n\n# Needs to use wss://\n__bellagene_entrypoint__ = \"wss://parachain.opentensor.ai:443\"\n\n\n__local_entrypoint__ = \"127.0.0.1:9944\"\n\n\n# Avoid collisions with other processes\nfrom .utils.test_utils import get_random_unused_port\nmock_subtensor_port = get_random_unused_port()\n__mock_entrypoint__ = f\"localhost:{mock_subtensor_port}\"\n\n\n# --- Prometheus ---\n__prometheus_version__ = \"0.1.0\"\nprometheus_version__split = __prometheus_version__.split(\".\")\n__prometheus_version__as_int__ = (100 * int(prometheus_version__split[0])) + (10 * int(prometheus_version__split[1])) + (1 * int(prometheus_version__split[2]))\ntry:\n bt_promo_info = Info(\"bittensor_info\", \"Information about the installed bittensor package.\")\n bt_promo_info.info ( \n {\n '__version__': str(__version__),\n '__version_as_int__': str(__version_as_int__),\n '__vocab_size__': str(__vocab_size__),\n '__network_dim__': str(__network_dim__),\n '__blocktime__': str(__blocktime__),\n '__prometheus_version__': str(__prometheus_version__),\n '__prometheus_version__as_int__': str(__prometheus_version__as_int__),\n } \n )\nexcept ValueError: \n # This can silently fail if we import bittensor twice in the same process.\n # We simply pass over this error. \n pass\n\n# ---- Config ----\nfrom bittensor._config import config as config\n\n# ---- LOGGING ----\nfrom bittensor._logging import logging as logging\n\n# ---- Protos ----\nimport bittensor._proto.bittensor_pb2 as proto\nimport bittensor._proto.bittensor_pb2_grpc as grpc\n\n# ---- Neurons ----\nimport bittensor._neuron as neurons\n\n# ---- Utils ----\nfrom bittensor.utils import unbiased_topk as unbiased_topk\n\n# ---- Factories -----\nfrom bittensor.utils.balance import Balance as Balance\nfrom bittensor._cli import cli as cli\nfrom bittensor._axon import axon as axon\nfrom bittensor._wallet import wallet as wallet\nfrom bittensor._keyfile import keyfile as keyfile\nfrom bittensor._receptor import receptor as receptor\nfrom bittensor._endpoint import endpoint as endpoint\nfrom bittensor._dendrite import dendrite as dendrite\nfrom bittensor._metagraph import metagraph as metagraph\nfrom bittensor._prometheus import prometheus as prometheus\nfrom bittensor._subtensor import subtensor as subtensor\nfrom bittensor._tokenizer import tokenizer as tokenizer\nfrom bittensor._serializer import serializer as serializer\nfrom bittensor._synapse import synapse as synapse \nfrom bittensor._dataset import dataset as dataset\nfrom bittensor._receptor import receptor_pool as receptor_pool\nfrom bittensor._wandb import wandb as wandb\nfrom bittensor._threadpool import prioritythreadpool as prioritythreadpool\n\n# ---- Classes -----\nfrom bittensor._cli.cli_impl import CLI as CLI\nfrom bittensor._axon.axon_impl import Axon as Axon\nfrom bittensor._config.config_impl import Config as Config\nfrom bittensor._wallet.wallet_impl import Wallet as Wallet\nfrom bittensor._keyfile.keyfile_impl import Keyfile as Keyfile\nfrom bittensor._receptor.receptor_impl import Receptor as Receptor\nfrom bittensor._endpoint.endpoint_impl import Endpoint as Endpoint\nfrom bittensor._dendrite.dendrite_impl import Dendrite as Dendrite\nfrom bittensor._metagraph.metagraph_impl import Metagraph as Metagraph\nfrom bittensor._subtensor.subtensor_impl import Subtensor as Subtensor\nfrom bittensor._serializer.serializer_impl import Serializer as Serializer\nfrom bittensor._dataset.dataset_impl import Dataset as Dataset\nfrom bittensor._receptor.receptor_pool_impl import ReceptorPool as ReceptorPool\nfrom bittensor._threadpool.priority_thread_pool_impl import PriorityThreadPoolExecutor as PriorityThreadPoolExecutor\nfrom bittensor._ipfs.ipfs_impl import Ipfs as Ipfs\nfrom bittensor._synapse.synapse_impl import Synapse as Synapse\nfrom bittensor._synapse.text_causallm_impl import TextCausalLM as TextCausalLM\nfrom bittensor._synapse.text_causallmnext_impl import TextCausalLMNext as TextCausalLMNext\nfrom bittensor._synapse.text_lasthiddenstate_impl import TextLastHiddenState as TextLastHiddenState\nfrom bittensor._synapse.text_seq2seq_impl import TextSeq2Seq as TextSeq2Seq\n\n# DEFAULTS\ndefaults = Config()\nsubtensor.add_defaults( defaults )\ndendrite.add_defaults( defaults )\naxon.add_defaults( defaults )\nprometheus.add_defaults( defaults )\nwallet.add_defaults( defaults )\ndataset.add_defaults( defaults )\nwandb.add_defaults( defaults )\nlogging.add_defaults( defaults )\n\nfrom substrateinterface import Keypair as Keypair\n", "path": "bittensor/__init__.py"}]}
| 2,751 | 138 |
gh_patches_debug_27082
|
rasdani/github-patches
|
git_diff
|
apache__airflow-25524
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API server /plugin crashes
### Apache Airflow version
2.3.3 (latest released)
### What happened
The `/plugins` endpoint returned a 500 http status code.
```
curl -X GET http://localhost:8080/api/v1/plugins\?limit\=1 \
-H 'Cache-Control: no-cache' \
--user "admin:admin"
{
"detail": "\"{'name': 'Test View', 'category': 'Test Plugin', 'view': 'test.appbuilder_views.TestAppBuilderBaseView'}\" is not of type 'object'\n\nFailed validating 'type' in schema['allOf'][0]['properties']['plugins']['items']['properties']['appbuilder_views']['items']:\n {'nullable': True, 'type': 'object'}\n\nOn instance['plugins'][0]['appbuilder_views'][0]:\n (\"{'name': 'Test View', 'category': 'Test Plugin', 'view': \"\n \"'test.appbuilder_views.TestAppBuilderBaseView'}\")",
"status": 500,
"title": "Response body does not conform to specification",
"type": "http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com/docs/apache-airflow/latest/stable-rest-api-ref.html#section/Errors/Unknown"
}
```
The error message in the webserver is as followed
```
[2022-08-03 17:07:57,705] {validation.py:244} ERROR - http://localhost:8080/api/v1/plugins?limit=1 validation error: "{'name': 'Test View', 'category': 'Test Plugin', 'view': 'test.appbuilder_views.TestAppBuilderBaseView'}" is not of type 'object'
Failed validating 'type' in schema['allOf'][0]['properties']['plugins']['items']['properties']['appbuilder_views']['items']:
{'nullable': True, 'type': 'object'}
On instance['plugins'][0]['appbuilder_views'][0]:
("{'name': 'Test View', 'category': 'Test Plugin', 'view': "
"'test.appbuilder_views.TestAppBuilderBaseView'}")
172.18.0.1 - admin [03/Aug/2022:17:10:17 +0000] "GET /api/v1/plugins?limit=1 HTTP/1.1" 500 733 "-" "curl/7.79.1"
```
### What you think should happen instead
The response should contain all the plugins integrated with Airflow.
### How to reproduce
Create a simple plugin in the plugin directory.
`appbuilder_views.py`
```
from flask_appbuilder import expose, BaseView as AppBuilderBaseView
# Creating a flask appbuilder BaseView
class TestAppBuilderBaseView(AppBuilderBaseView):
@expose("/")
def test(self):
return self.render_template("test_plugin/test.html", content="Hello galaxy!")
```
`plugin.py`
```
from airflow.plugins_manager import AirflowPlugin
from test.appbuilder_views import TestAppBuilderBaseView
class TestPlugin(AirflowPlugin):
name = "test"
appbuilder_views = [
{
"name": "Test View",
"category": "Test Plugin",
"view": TestAppBuilderBaseView()
}
]
```
Call the `/plugin` endpoint.
```
curl -X GET http://localhost:8080/api/v1/plugins\?limit\=1 \
-H 'Cache-Control: no-cache' \
--user "admin:admin"
```
### Operating System
N/A
### Versions of Apache Airflow Providers
_No response_
### Deployment
Astronomer
### Deployment details
_No response_
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/api_connexion/endpoints/plugin_endpoint.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 from airflow.api_connexion import security
18 from airflow.api_connexion.parameters import check_limit, format_parameters
19 from airflow.api_connexion.schemas.plugin_schema import PluginCollection, plugin_collection_schema
20 from airflow.api_connexion.types import APIResponse
21 from airflow.plugins_manager import get_plugin_info
22 from airflow.security import permissions
23
24
25 @security.requires_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN)])
26 @format_parameters({"limit": check_limit})
27 def get_plugins(*, limit: int, offset: int = 0) -> APIResponse:
28 """Get plugins endpoint"""
29 plugins_info = get_plugin_info()
30 total_entries = len(plugins_info)
31 plugins_info = plugins_info[offset:]
32 plugins_info = plugins_info[:limit]
33 return plugin_collection_schema.dump(PluginCollection(plugins=plugins_info, total_entries=total_entries))
34
```
Path: `airflow/api_connexion/schemas/plugin_schema.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 from typing import List, NamedTuple
19
20 from marshmallow import Schema, fields
21
22
23 class PluginSchema(Schema):
24 """Plugin schema"""
25
26 number = fields.Int()
27 name = fields.String()
28 hooks = fields.List(fields.String())
29 executors = fields.List(fields.String())
30 macros = fields.List(fields.String())
31 flask_blueprints = fields.List(fields.String())
32 appbuilder_views = fields.List(fields.String())
33 appbuilder_menu_items = fields.List(fields.Dict())
34 global_operator_extra_links = fields.List(fields.String())
35 operator_extra_links = fields.List(fields.String())
36 source = fields.String()
37
38
39 class PluginCollection(NamedTuple):
40 """Plugin List"""
41
42 plugins: List
43 total_entries: int
44
45
46 class PluginCollectionSchema(Schema):
47 """Plugin Collection List"""
48
49 plugins = fields.List(fields.Nested(PluginSchema))
50 total_entries = fields.Int()
51
52
53 plugin_schema = PluginSchema()
54 plugin_collection_schema = PluginCollectionSchema()
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/airflow/api_connexion/endpoints/plugin_endpoint.py b/airflow/api_connexion/endpoints/plugin_endpoint.py
--- a/airflow/api_connexion/endpoints/plugin_endpoint.py
+++ b/airflow/api_connexion/endpoints/plugin_endpoint.py
@@ -27,7 +27,5 @@
def get_plugins(*, limit: int, offset: int = 0) -> APIResponse:
"""Get plugins endpoint"""
plugins_info = get_plugin_info()
- total_entries = len(plugins_info)
- plugins_info = plugins_info[offset:]
- plugins_info = plugins_info[:limit]
- return plugin_collection_schema.dump(PluginCollection(plugins=plugins_info, total_entries=total_entries))
+ collection = PluginCollection(plugins=plugins_info[offset:][:limit], total_entries=len(plugins_info))
+ return plugin_collection_schema.dump(collection)
diff --git a/airflow/api_connexion/schemas/plugin_schema.py b/airflow/api_connexion/schemas/plugin_schema.py
--- a/airflow/api_connexion/schemas/plugin_schema.py
+++ b/airflow/api_connexion/schemas/plugin_schema.py
@@ -23,16 +23,15 @@
class PluginSchema(Schema):
"""Plugin schema"""
- number = fields.Int()
name = fields.String()
hooks = fields.List(fields.String())
executors = fields.List(fields.String())
- macros = fields.List(fields.String())
- flask_blueprints = fields.List(fields.String())
- appbuilder_views = fields.List(fields.String())
+ macros = fields.List(fields.Dict())
+ flask_blueprints = fields.List(fields.Dict())
+ appbuilder_views = fields.List(fields.Dict())
appbuilder_menu_items = fields.List(fields.Dict())
- global_operator_extra_links = fields.List(fields.String())
- operator_extra_links = fields.List(fields.String())
+ global_operator_extra_links = fields.List(fields.Dict())
+ operator_extra_links = fields.List(fields.Dict())
source = fields.String()
|
{"golden_diff": "diff --git a/airflow/api_connexion/endpoints/plugin_endpoint.py b/airflow/api_connexion/endpoints/plugin_endpoint.py\n--- a/airflow/api_connexion/endpoints/plugin_endpoint.py\n+++ b/airflow/api_connexion/endpoints/plugin_endpoint.py\n@@ -27,7 +27,5 @@\n def get_plugins(*, limit: int, offset: int = 0) -> APIResponse:\n \"\"\"Get plugins endpoint\"\"\"\n plugins_info = get_plugin_info()\n- total_entries = len(plugins_info)\n- plugins_info = plugins_info[offset:]\n- plugins_info = plugins_info[:limit]\n- return plugin_collection_schema.dump(PluginCollection(plugins=plugins_info, total_entries=total_entries))\n+ collection = PluginCollection(plugins=plugins_info[offset:][:limit], total_entries=len(plugins_info))\n+ return plugin_collection_schema.dump(collection)\ndiff --git a/airflow/api_connexion/schemas/plugin_schema.py b/airflow/api_connexion/schemas/plugin_schema.py\n--- a/airflow/api_connexion/schemas/plugin_schema.py\n+++ b/airflow/api_connexion/schemas/plugin_schema.py\n@@ -23,16 +23,15 @@\n class PluginSchema(Schema):\n \"\"\"Plugin schema\"\"\"\n \n- number = fields.Int()\n name = fields.String()\n hooks = fields.List(fields.String())\n executors = fields.List(fields.String())\n- macros = fields.List(fields.String())\n- flask_blueprints = fields.List(fields.String())\n- appbuilder_views = fields.List(fields.String())\n+ macros = fields.List(fields.Dict())\n+ flask_blueprints = fields.List(fields.Dict())\n+ appbuilder_views = fields.List(fields.Dict())\n appbuilder_menu_items = fields.List(fields.Dict())\n- global_operator_extra_links = fields.List(fields.String())\n- operator_extra_links = fields.List(fields.String())\n+ global_operator_extra_links = fields.List(fields.Dict())\n+ operator_extra_links = fields.List(fields.Dict())\n source = fields.String()\n", "issue": "API server /plugin crashes\n### Apache Airflow version\r\n\r\n2.3.3 (latest released)\r\n\r\n### What happened\r\n\r\nThe `/plugins` endpoint returned a 500 http status code.\r\n\r\n```\r\ncurl -X GET http://localhost:8080/api/v1/plugins\\?limit\\=1 \\\r\n -H 'Cache-Control: no-cache' \\\r\n --user \"admin:admin\"\r\n{\r\n \"detail\": \"\\\"{'name': 'Test View', 'category': 'Test Plugin', 'view': 'test.appbuilder_views.TestAppBuilderBaseView'}\\\" is not of type 'object'\\n\\nFailed validating 'type' in schema['allOf'][0]['properties']['plugins']['items']['properties']['appbuilder_views']['items']:\\n {'nullable': True, 'type': 'object'}\\n\\nOn instance['plugins'][0]['appbuilder_views'][0]:\\n (\\\"{'name': 'Test View', 'category': 'Test Plugin', 'view': \\\"\\n \\\"'test.appbuilder_views.TestAppBuilderBaseView'}\\\")\",\r\n \"status\": 500,\r\n \"title\": \"Response body does not conform to specification\",\r\n \"type\": \"http://apache-airflow-docs.s3-website.eu-central-1.amazonaws.com/docs/apache-airflow/latest/stable-rest-api-ref.html#section/Errors/Unknown\"\r\n}\r\n```\r\n\r\nThe error message in the webserver is as followed\r\n\r\n```\r\n[2022-08-03 17:07:57,705] {validation.py:244} ERROR - http://localhost:8080/api/v1/plugins?limit=1 validation error: \"{'name': 'Test View', 'category': 'Test Plugin', 'view': 'test.appbuilder_views.TestAppBuilderBaseView'}\" is not of type 'object'\r\n\r\nFailed validating 'type' in schema['allOf'][0]['properties']['plugins']['items']['properties']['appbuilder_views']['items']:\r\n {'nullable': True, 'type': 'object'}\r\n\r\nOn instance['plugins'][0]['appbuilder_views'][0]:\r\n (\"{'name': 'Test View', 'category': 'Test Plugin', 'view': \"\r\n \"'test.appbuilder_views.TestAppBuilderBaseView'}\")\r\n172.18.0.1 - admin [03/Aug/2022:17:10:17 +0000] \"GET /api/v1/plugins?limit=1 HTTP/1.1\" 500 733 \"-\" \"curl/7.79.1\"\r\n```\r\n\r\n### What you think should happen instead\r\n\r\nThe response should contain all the plugins integrated with Airflow.\r\n\r\n### How to reproduce\r\n\r\nCreate a simple plugin in the plugin directory.\r\n\r\n`appbuilder_views.py`\r\n\r\n```\r\nfrom flask_appbuilder import expose, BaseView as AppBuilderBaseView\r\n\r\n\r\n# Creating a flask appbuilder BaseView\r\nclass TestAppBuilderBaseView(AppBuilderBaseView):\r\n @expose(\"/\")\r\n def test(self):\r\n return self.render_template(\"test_plugin/test.html\", content=\"Hello galaxy!\")\r\n\r\n```\r\n\r\n`plugin.py`\r\n\r\n```\r\nfrom airflow.plugins_manager import AirflowPlugin\r\nfrom test.appbuilder_views import TestAppBuilderBaseView\r\n\r\n\r\nclass TestPlugin(AirflowPlugin):\r\n name = \"test\"\r\n\r\n appbuilder_views = [\r\n {\r\n \"name\": \"Test View\",\r\n \"category\": \"Test Plugin\",\r\n \"view\": TestAppBuilderBaseView()\r\n }\r\n ]\r\n\r\n```\r\n\r\nCall the `/plugin` endpoint.\r\n\r\n```\r\ncurl -X GET http://localhost:8080/api/v1/plugins\\?limit\\=1 \\\r\n -H 'Cache-Control: no-cache' \\\r\n --user \"admin:admin\"\r\n```\r\n\r\n### Operating System\r\n\r\nN/A\r\n\r\n### Versions of Apache Airflow Providers\r\n\r\n_No response_\r\n\r\n### Deployment\r\n\r\nAstronomer\r\n\r\n### Deployment details\r\n\r\n_No response_\r\n\r\n### Anything else\r\n\r\n_No response_\r\n\r\n### Are you willing to submit PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.parameters import check_limit, format_parameters\nfrom airflow.api_connexion.schemas.plugin_schema import PluginCollection, plugin_collection_schema\nfrom airflow.api_connexion.types import APIResponse\nfrom airflow.plugins_manager import get_plugin_info\nfrom airflow.security import permissions\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN)])\n@format_parameters({\"limit\": check_limit})\ndef get_plugins(*, limit: int, offset: int = 0) -> APIResponse:\n \"\"\"Get plugins endpoint\"\"\"\n plugins_info = get_plugin_info()\n total_entries = len(plugins_info)\n plugins_info = plugins_info[offset:]\n plugins_info = plugins_info[:limit]\n return plugin_collection_schema.dump(PluginCollection(plugins=plugins_info, total_entries=total_entries))\n", "path": "airflow/api_connexion/endpoints/plugin_endpoint.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import List, NamedTuple\n\nfrom marshmallow import Schema, fields\n\n\nclass PluginSchema(Schema):\n \"\"\"Plugin schema\"\"\"\n\n number = fields.Int()\n name = fields.String()\n hooks = fields.List(fields.String())\n executors = fields.List(fields.String())\n macros = fields.List(fields.String())\n flask_blueprints = fields.List(fields.String())\n appbuilder_views = fields.List(fields.String())\n appbuilder_menu_items = fields.List(fields.Dict())\n global_operator_extra_links = fields.List(fields.String())\n operator_extra_links = fields.List(fields.String())\n source = fields.String()\n\n\nclass PluginCollection(NamedTuple):\n \"\"\"Plugin List\"\"\"\n\n plugins: List\n total_entries: int\n\n\nclass PluginCollectionSchema(Schema):\n \"\"\"Plugin Collection List\"\"\"\n\n plugins = fields.List(fields.Nested(PluginSchema))\n total_entries = fields.Int()\n\n\nplugin_schema = PluginSchema()\nplugin_collection_schema = PluginCollectionSchema()\n", "path": "airflow/api_connexion/schemas/plugin_schema.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.parameters import check_limit, format_parameters\nfrom airflow.api_connexion.schemas.plugin_schema import PluginCollection, plugin_collection_schema\nfrom airflow.api_connexion.types import APIResponse\nfrom airflow.plugins_manager import get_plugin_info\nfrom airflow.security import permissions\n\n\[email protected]_access([(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN)])\n@format_parameters({\"limit\": check_limit})\ndef get_plugins(*, limit: int, offset: int = 0) -> APIResponse:\n \"\"\"Get plugins endpoint\"\"\"\n plugins_info = get_plugin_info()\n collection = PluginCollection(plugins=plugins_info[offset:][:limit], total_entries=len(plugins_info))\n return plugin_collection_schema.dump(collection)\n", "path": "airflow/api_connexion/endpoints/plugin_endpoint.py"}, {"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom typing import List, NamedTuple\n\nfrom marshmallow import Schema, fields\n\n\nclass PluginSchema(Schema):\n \"\"\"Plugin schema\"\"\"\n\n name = fields.String()\n hooks = fields.List(fields.String())\n executors = fields.List(fields.String())\n macros = fields.List(fields.Dict())\n flask_blueprints = fields.List(fields.Dict())\n appbuilder_views = fields.List(fields.Dict())\n appbuilder_menu_items = fields.List(fields.Dict())\n global_operator_extra_links = fields.List(fields.Dict())\n operator_extra_links = fields.List(fields.Dict())\n source = fields.String()\n\n\nclass PluginCollection(NamedTuple):\n \"\"\"Plugin List\"\"\"\n\n plugins: List\n total_entries: int\n\n\nclass PluginCollectionSchema(Schema):\n \"\"\"Plugin Collection List\"\"\"\n\n plugins = fields.List(fields.Nested(PluginSchema))\n total_entries = fields.Int()\n\n\nplugin_schema = PluginSchema()\nplugin_collection_schema = PluginCollectionSchema()\n", "path": "airflow/api_connexion/schemas/plugin_schema.py"}]}
| 2,072 | 422 |
gh_patches_debug_37428
|
rasdani/github-patches
|
git_diff
|
spack__spack-15179
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove 'spack bootstrap' from the commands
As a Spack maintainer I want to remove the `spack bootstrap` command (outdated since #14062) so that I could reduce the amount of boilerplate code in the project.
### Rationale
The `spack bootstrap` command was used to "Bootstrap packages needed for spack to run smoothly" and in reality it has always just installed `environment-modules~X`. Since #14062 shell integration doesn't require `environment-modules` anymore making the command outdated. I would therefore remove that command from the code base.
### Description
Just remove the command and any test / package associated only with it.
### Additional information
Opening the issue to check what is the consensus towards this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/spack/spack/cmd/bootstrap.py`
Content:
```
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 import llnl.util.cpu
7 import llnl.util.tty as tty
8
9 import spack.repo
10 import spack.spec
11 import spack.cmd.common.arguments as arguments
12
13 description = "Bootstrap packages needed for spack to run smoothly"
14 section = "admin"
15 level = "long"
16
17
18 def setup_parser(subparser):
19 arguments.add_common_arguments(subparser, ['jobs'])
20 subparser.add_argument(
21 '--keep-prefix', action='store_true', dest='keep_prefix',
22 help="don't remove the install prefix if installation fails")
23 subparser.add_argument(
24 '--keep-stage', action='store_true', dest='keep_stage',
25 help="don't remove the build stage if installation succeeds")
26 arguments.add_common_arguments(subparser, ['no_checksum'])
27 subparser.add_argument(
28 '-v', '--verbose', action='store_true', dest='verbose',
29 help="display verbose build output while installing")
30
31 cache_group = subparser.add_mutually_exclusive_group()
32 cache_group.add_argument(
33 '--use-cache', action='store_true', dest='use_cache', default=True,
34 help="check for pre-built Spack packages in mirrors (default)")
35 cache_group.add_argument(
36 '--no-cache', action='store_false', dest='use_cache', default=True,
37 help="do not check for pre-built Spack packages in mirrors")
38 cache_group.add_argument(
39 '--cache-only', action='store_true', dest='cache_only', default=False,
40 help="only install package from binary mirrors")
41
42 cd_group = subparser.add_mutually_exclusive_group()
43 arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
44
45
46 def bootstrap(parser, args, **kwargs):
47 kwargs.update({
48 'keep_prefix': args.keep_prefix,
49 'keep_stage': args.keep_stage,
50 'install_deps': 'dependencies',
51 'verbose': args.verbose,
52 'dirty': args.dirty,
53 'use_cache': args.use_cache,
54 'cache_only': args.cache_only
55 })
56
57 # Define requirement dictionary defining general specs which need
58 # to be satisfied, and the specs to install when the general spec
59 # isn't satisfied.
60 requirement_dict = {
61 # Install environment-modules with generic optimizations
62 'environment-modules': 'environment-modules~X target={0}'.format(
63 llnl.util.cpu.host().family
64 )
65 }
66
67 for requirement in requirement_dict:
68 installed_specs = spack.store.db.query(requirement)
69 if(len(installed_specs) > 0):
70 tty.msg("Requirement %s is satisfied with installed "
71 "package %s" % (requirement, installed_specs[0]))
72 else:
73 # Install requirement
74 spec_to_install = spack.spec.Spec(requirement_dict[requirement])
75 spec_to_install.concretize()
76 tty.msg("Installing %s to satisfy requirement for %s" %
77 (spec_to_install, requirement))
78 kwargs['explicit'] = True
79 package = spack.repo.get(spec_to_install)
80 package.do_install(**kwargs)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py
deleted file mode 100644
--- a/lib/spack/spack/cmd/bootstrap.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
-# Spack Project Developers. See the top-level COPYRIGHT file for details.
-#
-# SPDX-License-Identifier: (Apache-2.0 OR MIT)
-
-import llnl.util.cpu
-import llnl.util.tty as tty
-
-import spack.repo
-import spack.spec
-import spack.cmd.common.arguments as arguments
-
-description = "Bootstrap packages needed for spack to run smoothly"
-section = "admin"
-level = "long"
-
-
-def setup_parser(subparser):
- arguments.add_common_arguments(subparser, ['jobs'])
- subparser.add_argument(
- '--keep-prefix', action='store_true', dest='keep_prefix',
- help="don't remove the install prefix if installation fails")
- subparser.add_argument(
- '--keep-stage', action='store_true', dest='keep_stage',
- help="don't remove the build stage if installation succeeds")
- arguments.add_common_arguments(subparser, ['no_checksum'])
- subparser.add_argument(
- '-v', '--verbose', action='store_true', dest='verbose',
- help="display verbose build output while installing")
-
- cache_group = subparser.add_mutually_exclusive_group()
- cache_group.add_argument(
- '--use-cache', action='store_true', dest='use_cache', default=True,
- help="check for pre-built Spack packages in mirrors (default)")
- cache_group.add_argument(
- '--no-cache', action='store_false', dest='use_cache', default=True,
- help="do not check for pre-built Spack packages in mirrors")
- cache_group.add_argument(
- '--cache-only', action='store_true', dest='cache_only', default=False,
- help="only install package from binary mirrors")
-
- cd_group = subparser.add_mutually_exclusive_group()
- arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
-
-
-def bootstrap(parser, args, **kwargs):
- kwargs.update({
- 'keep_prefix': args.keep_prefix,
- 'keep_stage': args.keep_stage,
- 'install_deps': 'dependencies',
- 'verbose': args.verbose,
- 'dirty': args.dirty,
- 'use_cache': args.use_cache,
- 'cache_only': args.cache_only
- })
-
- # Define requirement dictionary defining general specs which need
- # to be satisfied, and the specs to install when the general spec
- # isn't satisfied.
- requirement_dict = {
- # Install environment-modules with generic optimizations
- 'environment-modules': 'environment-modules~X target={0}'.format(
- llnl.util.cpu.host().family
- )
- }
-
- for requirement in requirement_dict:
- installed_specs = spack.store.db.query(requirement)
- if(len(installed_specs) > 0):
- tty.msg("Requirement %s is satisfied with installed "
- "package %s" % (requirement, installed_specs[0]))
- else:
- # Install requirement
- spec_to_install = spack.spec.Spec(requirement_dict[requirement])
- spec_to_install.concretize()
- tty.msg("Installing %s to satisfy requirement for %s" %
- (spec_to_install, requirement))
- kwargs['explicit'] = True
- package = spack.repo.get(spec_to_install)
- package.do_install(**kwargs)
|
{"golden_diff": "diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py\ndeleted file mode 100644\n--- a/lib/spack/spack/cmd/bootstrap.py\n+++ /dev/null\n@@ -1,80 +0,0 @@\n-# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n-# Spack Project Developers. See the top-level COPYRIGHT file for details.\n-#\n-# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n-\n-import llnl.util.cpu\n-import llnl.util.tty as tty\n-\n-import spack.repo\n-import spack.spec\n-import spack.cmd.common.arguments as arguments\n-\n-description = \"Bootstrap packages needed for spack to run smoothly\"\n-section = \"admin\"\n-level = \"long\"\n-\n-\n-def setup_parser(subparser):\n- arguments.add_common_arguments(subparser, ['jobs'])\n- subparser.add_argument(\n- '--keep-prefix', action='store_true', dest='keep_prefix',\n- help=\"don't remove the install prefix if installation fails\")\n- subparser.add_argument(\n- '--keep-stage', action='store_true', dest='keep_stage',\n- help=\"don't remove the build stage if installation succeeds\")\n- arguments.add_common_arguments(subparser, ['no_checksum'])\n- subparser.add_argument(\n- '-v', '--verbose', action='store_true', dest='verbose',\n- help=\"display verbose build output while installing\")\n-\n- cache_group = subparser.add_mutually_exclusive_group()\n- cache_group.add_argument(\n- '--use-cache', action='store_true', dest='use_cache', default=True,\n- help=\"check for pre-built Spack packages in mirrors (default)\")\n- cache_group.add_argument(\n- '--no-cache', action='store_false', dest='use_cache', default=True,\n- help=\"do not check for pre-built Spack packages in mirrors\")\n- cache_group.add_argument(\n- '--cache-only', action='store_true', dest='cache_only', default=False,\n- help=\"only install package from binary mirrors\")\n-\n- cd_group = subparser.add_mutually_exclusive_group()\n- arguments.add_common_arguments(cd_group, ['clean', 'dirty'])\n-\n-\n-def bootstrap(parser, args, **kwargs):\n- kwargs.update({\n- 'keep_prefix': args.keep_prefix,\n- 'keep_stage': args.keep_stage,\n- 'install_deps': 'dependencies',\n- 'verbose': args.verbose,\n- 'dirty': args.dirty,\n- 'use_cache': args.use_cache,\n- 'cache_only': args.cache_only\n- })\n-\n- # Define requirement dictionary defining general specs which need\n- # to be satisfied, and the specs to install when the general spec\n- # isn't satisfied.\n- requirement_dict = {\n- # Install environment-modules with generic optimizations\n- 'environment-modules': 'environment-modules~X target={0}'.format(\n- llnl.util.cpu.host().family\n- )\n- }\n-\n- for requirement in requirement_dict:\n- installed_specs = spack.store.db.query(requirement)\n- if(len(installed_specs) > 0):\n- tty.msg(\"Requirement %s is satisfied with installed \"\n- \"package %s\" % (requirement, installed_specs[0]))\n- else:\n- # Install requirement\n- spec_to_install = spack.spec.Spec(requirement_dict[requirement])\n- spec_to_install.concretize()\n- tty.msg(\"Installing %s to satisfy requirement for %s\" %\n- (spec_to_install, requirement))\n- kwargs['explicit'] = True\n- package = spack.repo.get(spec_to_install)\n- package.do_install(**kwargs)\n", "issue": "Remove 'spack bootstrap' from the commands\nAs a Spack maintainer I want to remove the `spack bootstrap` command (outdated since #14062) so that I could reduce the amount of boilerplate code in the project.\r\n\r\n### Rationale\r\n\r\nThe `spack bootstrap` command was used to \"Bootstrap packages needed for spack to run smoothly\" and in reality it has always just installed `environment-modules~X`. Since #14062 shell integration doesn't require `environment-modules` anymore making the command outdated. I would therefore remove that command from the code base.\r\n\r\n### Description\r\n\r\nJust remove the command and any test / package associated only with it.\r\n\r\n\r\n### Additional information\r\n\r\nOpening the issue to check what is the consensus towards this.\r\n\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport llnl.util.cpu\nimport llnl.util.tty as tty\n\nimport spack.repo\nimport spack.spec\nimport spack.cmd.common.arguments as arguments\n\ndescription = \"Bootstrap packages needed for spack to run smoothly\"\nsection = \"admin\"\nlevel = \"long\"\n\n\ndef setup_parser(subparser):\n arguments.add_common_arguments(subparser, ['jobs'])\n subparser.add_argument(\n '--keep-prefix', action='store_true', dest='keep_prefix',\n help=\"don't remove the install prefix if installation fails\")\n subparser.add_argument(\n '--keep-stage', action='store_true', dest='keep_stage',\n help=\"don't remove the build stage if installation succeeds\")\n arguments.add_common_arguments(subparser, ['no_checksum'])\n subparser.add_argument(\n '-v', '--verbose', action='store_true', dest='verbose',\n help=\"display verbose build output while installing\")\n\n cache_group = subparser.add_mutually_exclusive_group()\n cache_group.add_argument(\n '--use-cache', action='store_true', dest='use_cache', default=True,\n help=\"check for pre-built Spack packages in mirrors (default)\")\n cache_group.add_argument(\n '--no-cache', action='store_false', dest='use_cache', default=True,\n help=\"do not check for pre-built Spack packages in mirrors\")\n cache_group.add_argument(\n '--cache-only', action='store_true', dest='cache_only', default=False,\n help=\"only install package from binary mirrors\")\n\n cd_group = subparser.add_mutually_exclusive_group()\n arguments.add_common_arguments(cd_group, ['clean', 'dirty'])\n\n\ndef bootstrap(parser, args, **kwargs):\n kwargs.update({\n 'keep_prefix': args.keep_prefix,\n 'keep_stage': args.keep_stage,\n 'install_deps': 'dependencies',\n 'verbose': args.verbose,\n 'dirty': args.dirty,\n 'use_cache': args.use_cache,\n 'cache_only': args.cache_only\n })\n\n # Define requirement dictionary defining general specs which need\n # to be satisfied, and the specs to install when the general spec\n # isn't satisfied.\n requirement_dict = {\n # Install environment-modules with generic optimizations\n 'environment-modules': 'environment-modules~X target={0}'.format(\n llnl.util.cpu.host().family\n )\n }\n\n for requirement in requirement_dict:\n installed_specs = spack.store.db.query(requirement)\n if(len(installed_specs) > 0):\n tty.msg(\"Requirement %s is satisfied with installed \"\n \"package %s\" % (requirement, installed_specs[0]))\n else:\n # Install requirement\n spec_to_install = spack.spec.Spec(requirement_dict[requirement])\n spec_to_install.concretize()\n tty.msg(\"Installing %s to satisfy requirement for %s\" %\n (spec_to_install, requirement))\n kwargs['explicit'] = True\n package = spack.repo.get(spec_to_install)\n package.do_install(**kwargs)\n", "path": "lib/spack/spack/cmd/bootstrap.py"}], "after_files": [{"content": null, "path": "lib/spack/spack/cmd/bootstrap.py"}]}
| 1,270 | 811 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.