problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_5672 | rasdani/github-patches | git_diff | sosreport__sos-471 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[block] Don't use parted human readable output - rhbz #1183770
Changed the parted command to return data in sectors units
instead of human readable form.
Signed-off-by: Shane Bradley [email protected]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sos/plugins/block.py`
Content:
```
1 # This program is free software; you can redistribute it and/or modify
2 # it under the terms of the GNU General Public License as published by
3 # the Free Software Foundation; either version 2 of the License, or
4 # (at your option) any later version.
5
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14
15 import os
16 from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
17
18
19 class Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
20 """Block device information
21 """
22
23 plugin_name = 'block'
24 profiles = ('storage', 'hardware')
25
26 def setup(self):
27 self.add_cmd_output([
28 "lsblk",
29 "blkid -c /dev/null",
30 "ls -lanR /dev",
31 "ls -lanR /sys/block"
32 ])
33
34 # legacy location for non-/run distributions
35 self.add_copy_spec([
36 "/etc/blkid.tab",
37 "/run/blkid/blkid.tab",
38 "/proc/partitions",
39 "/proc/diskstats"
40 ])
41
42 if os.path.isdir("/sys/block"):
43 for disk in os.listdir("/sys/block"):
44 if disk in [".", ".."] or disk.startswith("ram"):
45 continue
46 disk_path = os.path.join('/dev/', disk)
47 self.add_cmd_output([
48 "udevadm info -ap /sys/block/%s" % (disk),
49 "parted -s %s print" % (disk_path),
50 "fdisk -l %s" % disk_path
51 ])
52
53 # vim: et ts=4 sw=4
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sos/plugins/block.py b/sos/plugins/block.py
--- a/sos/plugins/block.py
+++ b/sos/plugins/block.py
@@ -46,7 +46,7 @@
disk_path = os.path.join('/dev/', disk)
self.add_cmd_output([
"udevadm info -ap /sys/block/%s" % (disk),
- "parted -s %s print" % (disk_path),
+ "parted -s %s unit s print" % (disk_path),
"fdisk -l %s" % disk_path
])
| {"golden_diff": "diff --git a/sos/plugins/block.py b/sos/plugins/block.py\n--- a/sos/plugins/block.py\n+++ b/sos/plugins/block.py\n@@ -46,7 +46,7 @@\n disk_path = os.path.join('/dev/', disk)\n self.add_cmd_output([\n \"udevadm info -ap /sys/block/%s\" % (disk),\n- \"parted -s %s print\" % (disk_path),\n+ \"parted -s %s unit s print\" % (disk_path),\n \"fdisk -l %s\" % disk_path\n ])\n", "issue": "[block] Don't use parted human readable output - rhbz #1183770\nChanged the parted command to return data in sectors units\ninstead of human readable form.\n\nSigned-off-by: Shane Bradley [email protected]\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nimport os\nfrom sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin\n\n\nclass Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):\n \"\"\"Block device information\n \"\"\"\n\n plugin_name = 'block'\n profiles = ('storage', 'hardware')\n\n def setup(self):\n self.add_cmd_output([\n \"lsblk\",\n \"blkid -c /dev/null\",\n \"ls -lanR /dev\",\n \"ls -lanR /sys/block\"\n ])\n\n # legacy location for non-/run distributions\n self.add_copy_spec([\n \"/etc/blkid.tab\",\n \"/run/blkid/blkid.tab\",\n \"/proc/partitions\",\n \"/proc/diskstats\"\n ])\n\n if os.path.isdir(\"/sys/block\"):\n for disk in os.listdir(\"/sys/block\"):\n if disk in [\".\", \"..\"] or disk.startswith(\"ram\"):\n continue\n disk_path = os.path.join('/dev/', disk)\n self.add_cmd_output([\n \"udevadm info -ap /sys/block/%s\" % (disk),\n \"parted -s %s print\" % (disk_path),\n \"fdisk -l %s\" % disk_path\n ])\n\n# vim: et ts=4 sw=4\n", "path": "sos/plugins/block.py"}], "after_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.\n\nimport os\nfrom sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin\n\n\nclass Block(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):\n \"\"\"Block device information\n \"\"\"\n\n plugin_name = 'block'\n profiles = ('storage', 'hardware')\n\n def setup(self):\n self.add_cmd_output([\n \"lsblk\",\n \"blkid -c /dev/null\",\n \"ls -lanR /dev\",\n \"ls -lanR /sys/block\"\n ])\n\n # legacy location for non-/run distributions\n self.add_copy_spec([\n \"/etc/blkid.tab\",\n \"/run/blkid/blkid.tab\",\n \"/proc/partitions\",\n \"/proc/diskstats\"\n ])\n\n if os.path.isdir(\"/sys/block\"):\n for disk in os.listdir(\"/sys/block\"):\n if disk in [\".\", \"..\"] or disk.startswith(\"ram\"):\n continue\n disk_path = os.path.join('/dev/', disk)\n self.add_cmd_output([\n \"udevadm info -ap /sys/block/%s\" % (disk),\n \"parted -s %s unit s print\" % (disk_path),\n \"fdisk -l %s\" % disk_path\n ])\n\n# vim: et ts=4 sw=4\n", "path": "sos/plugins/block.py"}]} | 848 | 128 |
gh_patches_debug_50224 | rasdani/github-patches | git_diff | pex-tool__pex-1692 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.74
On the docket:
+ [x] Add support for locking VCS requirements. (#1687)
+ [x] Fix `--lock` for multiplatform via sdists. (#1689)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.73"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.73"
+__version__ = "2.1.74"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.73\"\n+__version__ = \"2.1.74\"\n", "issue": "Release 2.1.74\nOn the docket:\r\n+ [x] Add support for locking VCS requirements. (#1687)\r\n+ [x] Fix `--lock` for multiplatform via sdists. (#1689)\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.73\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.74\"\n", "path": "pex/version.py"}]} | 363 | 96 |
gh_patches_debug_19495 | rasdani/github-patches | git_diff | Pyomo__pyomo-1273 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyNumero support on Windows
We need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.
PyNumero support on Windows
We need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/contrib/pynumero/extensions/utils.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10 from ctypes.util import find_library
11 import sys
12 import os
13
14
15 def find_pynumero_library(library_name):
16
17 asl_path = find_library(library_name)
18 if asl_path is not None:
19 return asl_path
20 else:
21 # try looking into extensions directory now
22 file_path = os.path.abspath(__file__)
23 dir_path = os.path.dirname(file_path)
24
25 if os.name in ['nt', 'dos']:
26 libname = 'lib/Windows/lib{}.dll'.format(library_name)
27 elif sys.platform in ['darwin']:
28 libname = 'lib/Darwin/lib{}.dylib'.format(library_name)
29 else:
30 libname = 'lib/Linux/lib{}.so'.format(library_name)
31
32 asl_lib_path = os.path.join(dir_path, libname)
33
34 if os.path.exists(asl_lib_path):
35 return asl_lib_path
36 return None
37
38
39 def found_pynumero_libraries():
40
41 p1 = find_pynumero_library('pynumero_ASL')
42 p2 = find_pynumero_library('pynumero_SPARSE')
43
44 if p1 is not None and p2 is not None:
45 return True
46 return False
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/contrib/pynumero/extensions/utils.py b/pyomo/contrib/pynumero/extensions/utils.py
--- a/pyomo/contrib/pynumero/extensions/utils.py
+++ b/pyomo/contrib/pynumero/extensions/utils.py
@@ -14,9 +14,14 @@
def find_pynumero_library(library_name):
- asl_path = find_library(library_name)
- if asl_path is not None:
- return asl_path
+ lib_path = find_library(library_name)
+ if lib_path is not None:
+ return lib_path
+
+ # On windows the library is prefixed with 'lib'
+ lib_path = find_library('lib'+library_name)
+ if lib_path is not None:
+ return lib_path
else:
# try looking into extensions directory now
file_path = os.path.abspath(__file__)
@@ -29,10 +34,10 @@
else:
libname = 'lib/Linux/lib{}.so'.format(library_name)
- asl_lib_path = os.path.join(dir_path, libname)
+ lib_path = os.path.join(dir_path, libname)
- if os.path.exists(asl_lib_path):
- return asl_lib_path
+ if os.path.exists(lib_path):
+ return lib_path
return None
| {"golden_diff": "diff --git a/pyomo/contrib/pynumero/extensions/utils.py b/pyomo/contrib/pynumero/extensions/utils.py\n--- a/pyomo/contrib/pynumero/extensions/utils.py\n+++ b/pyomo/contrib/pynumero/extensions/utils.py\n@@ -14,9 +14,14 @@\n \n def find_pynumero_library(library_name):\n \n- asl_path = find_library(library_name)\n- if asl_path is not None:\n- return asl_path\n+ lib_path = find_library(library_name)\n+ if lib_path is not None:\n+ return lib_path\n+\n+ # On windows the library is prefixed with 'lib'\n+ lib_path = find_library('lib'+library_name)\n+ if lib_path is not None:\n+ return lib_path\n else:\n # try looking into extensions directory now\n file_path = os.path.abspath(__file__)\n@@ -29,10 +34,10 @@\n else:\n libname = 'lib/Linux/lib{}.so'.format(library_name)\n \n- asl_lib_path = os.path.join(dir_path, libname)\n+ lib_path = os.path.join(dir_path, libname)\n \n- if os.path.exists(asl_lib_path):\n- return asl_lib_path\n+ if os.path.exists(lib_path):\n+ return lib_path\n return None\n", "issue": "PyNumero support on Windows\nWe need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.\nPyNumero support on Windows\nWe need to make sure PyNumero installs/runs on Windows and make sure any extra installation steps are well documented. Some discussion on this was started in #1253. When this is resolved we also need to make sure to enable the tests currently being skipped on Windows.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\nfrom ctypes.util import find_library\nimport sys\nimport os\n\n\ndef find_pynumero_library(library_name):\n\n asl_path = find_library(library_name)\n if asl_path is not None:\n return asl_path\n else:\n # try looking into extensions directory now\n file_path = os.path.abspath(__file__)\n dir_path = os.path.dirname(file_path)\n\n if os.name in ['nt', 'dos']:\n libname = 'lib/Windows/lib{}.dll'.format(library_name)\n elif sys.platform in ['darwin']:\n libname = 'lib/Darwin/lib{}.dylib'.format(library_name)\n else:\n libname = 'lib/Linux/lib{}.so'.format(library_name)\n\n asl_lib_path = os.path.join(dir_path, libname)\n\n if os.path.exists(asl_lib_path):\n return asl_lib_path\n return None\n\n\ndef found_pynumero_libraries():\n\n p1 = find_pynumero_library('pynumero_ASL')\n p2 = find_pynumero_library('pynumero_SPARSE')\n\n if p1 is not None and p2 is not None:\n return True\n return False\n", "path": "pyomo/contrib/pynumero/extensions/utils.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\nfrom ctypes.util import find_library\nimport sys\nimport os\n\n\ndef find_pynumero_library(library_name):\n\n lib_path = find_library(library_name)\n if lib_path is not None:\n return lib_path\n\n # On windows the library is prefixed with 'lib'\n lib_path = find_library('lib'+library_name)\n if lib_path is not None:\n return lib_path\n else:\n # try looking into extensions directory now\n file_path = os.path.abspath(__file__)\n dir_path = os.path.dirname(file_path)\n\n if os.name in ['nt', 'dos']:\n libname = 'lib/Windows/lib{}.dll'.format(library_name)\n elif sys.platform in ['darwin']:\n libname = 'lib/Darwin/lib{}.dylib'.format(library_name)\n else:\n libname = 'lib/Linux/lib{}.so'.format(library_name)\n\n lib_path = os.path.join(dir_path, libname)\n\n if os.path.exists(lib_path):\n return lib_path\n return None\n\n\ndef found_pynumero_libraries():\n\n p1 = find_pynumero_library('pynumero_ASL')\n p2 = find_pynumero_library('pynumero_SPARSE')\n\n if p1 is not None and p2 is not None:\n return True\n return False\n", "path": "pyomo/contrib/pynumero/extensions/utils.py"}]} | 842 | 300 |
gh_patches_debug_3268 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1158 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AU battery returning type error
```
fetch_production("AUS-SA") ->
Traceback (most recent call last):
File "AU.py", line 558, in <module>
print(fetch_production('AUS-SA'))
File "AU.py", line 422, in fetch_production
data['storage']['battery'] = AU_battery.fetch_SA_battery()
File "/home/chris/electricitymap/parsers/lib/AU_battery.py", line 30, in fetch_SA_battery
latest = json.loads(data[-1])
File "/usr/lib/python3.5/json/__init__.py", line 312, in loads
s.__class__.__name__))
TypeError: the JSON object must be str, not 'bytes'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/lib/AU_battery.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """Parser for South Australia's 129MWh battery built by Tesla."""
4 import arrow
5 import json
6 import requests
7
8 # nemlog_url gets generation status in 5 min intervals.
9
10
11 def fetch_SA_battery(session=None):
12 """
13 Makes a request to the nemlog api for South Australia battery data.
14 Returns a float or None.
15 """
16
17 today = arrow.now('Australia/Adelaide')
18 current = today.format('YYYYMMDD')
19 old = today.shift(days=-2).format('YYYYMMDD')
20 nemlog_url = 'http://nemlog.com.au/api/unit/HPRL1/{}/{}/json'.format(old, current)
21
22 s = session or requests.Session()
23 req = s.get(nemlog_url)
24
25 data = []
26 for line in req.iter_lines():
27 data.append(line)
28
29 try:
30 latest = json.loads(data[-1])
31 except IndexError:
32 # No data available.
33 return None
34
35 state = float(latest["SCADAVALUE"])
36
37 # Source classifies charge/discharge opposite to EM.
38 battery_status = -1 * state
39
40 return battery_status
41
42
43 if __name__ == '__main__':
44 print('fetch_SA_battery() ->')
45 print(fetch_SA_battery())
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/lib/AU_battery.py b/parsers/lib/AU_battery.py
--- a/parsers/lib/AU_battery.py
+++ b/parsers/lib/AU_battery.py
@@ -21,11 +21,9 @@
s = session or requests.Session()
req = s.get(nemlog_url)
-
data = []
- for line in req.iter_lines():
+ for line in req.iter_lines(decode_unicode=True):
data.append(line)
-
try:
latest = json.loads(data[-1])
except IndexError:
| {"golden_diff": "diff --git a/parsers/lib/AU_battery.py b/parsers/lib/AU_battery.py\n--- a/parsers/lib/AU_battery.py\n+++ b/parsers/lib/AU_battery.py\n@@ -21,11 +21,9 @@\n \n s = session or requests.Session()\n req = s.get(nemlog_url)\n-\n data = []\n- for line in req.iter_lines():\n+ for line in req.iter_lines(decode_unicode=True):\n data.append(line)\n-\n try:\n latest = json.loads(data[-1])\n except IndexError:\n", "issue": "AU battery returning type error\n```\r\nfetch_production(\"AUS-SA\") ->\r\nTraceback (most recent call last):\r\n File \"AU.py\", line 558, in <module>\r\n print(fetch_production('AUS-SA'))\r\n File \"AU.py\", line 422, in fetch_production\r\n data['storage']['battery'] = AU_battery.fetch_SA_battery()\r\n File \"/home/chris/electricitymap/parsers/lib/AU_battery.py\", line 30, in fetch_SA_battery\r\n latest = json.loads(data[-1])\r\n File \"/usr/lib/python3.5/json/__init__.py\", line 312, in loads\r\n s.__class__.__name__))\r\nTypeError: the JSON object must be str, not 'bytes'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for South Australia's 129MWh battery built by Tesla.\"\"\"\nimport arrow\nimport json\nimport requests\n\n# nemlog_url gets generation status in 5 min intervals.\n\n\ndef fetch_SA_battery(session=None):\n \"\"\"\n Makes a request to the nemlog api for South Australia battery data.\n Returns a float or None.\n \"\"\"\n\n today = arrow.now('Australia/Adelaide')\n current = today.format('YYYYMMDD')\n old = today.shift(days=-2).format('YYYYMMDD')\n nemlog_url = 'http://nemlog.com.au/api/unit/HPRL1/{}/{}/json'.format(old, current)\n\n s = session or requests.Session()\n req = s.get(nemlog_url)\n\n data = []\n for line in req.iter_lines():\n data.append(line)\n\n try:\n latest = json.loads(data[-1])\n except IndexError:\n # No data available.\n return None\n\n state = float(latest[\"SCADAVALUE\"])\n\n # Source classifies charge/discharge opposite to EM.\n battery_status = -1 * state\n\n return battery_status\n\n\nif __name__ == '__main__':\n print('fetch_SA_battery() ->')\n print(fetch_SA_battery())\n", "path": "parsers/lib/AU_battery.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for South Australia's 129MWh battery built by Tesla.\"\"\"\nimport arrow\nimport json\nimport requests\n\n# nemlog_url gets generation status in 5 min intervals.\n\n\ndef fetch_SA_battery(session=None):\n \"\"\"\n Makes a request to the nemlog api for South Australia battery data.\n Returns a float or None.\n \"\"\"\n\n today = arrow.now('Australia/Adelaide')\n current = today.format('YYYYMMDD')\n old = today.shift(days=-2).format('YYYYMMDD')\n nemlog_url = 'http://nemlog.com.au/api/unit/HPRL1/{}/{}/json'.format(old, current)\n\n s = session or requests.Session()\n req = s.get(nemlog_url)\n data = []\n for line in req.iter_lines(decode_unicode=True):\n data.append(line)\n try:\n latest = json.loads(data[-1])\n except IndexError:\n # No data available.\n return None\n\n state = float(latest[\"SCADAVALUE\"])\n\n # Source classifies charge/discharge opposite to EM.\n battery_status = -1 * state\n\n return battery_status\n\n\nif __name__ == '__main__':\n print('fetch_SA_battery() ->')\n print(fetch_SA_battery())\n", "path": "parsers/lib/AU_battery.py"}]} | 786 | 121 |
gh_patches_debug_16023 | rasdani/github-patches | git_diff | databricks__koalas-161 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show pandas style Table of Contents on the left side in docs
Right now our docs show a weird Table of Contents for only a section, rather than the entire doc. Can we fix it so it shows the Table of Contents of the entire docs, e.g. start from the top level?
<img width="647" alt="Screen Shot 2019-04-23 at 4 40 38 PM" src="https://user-images.githubusercontent.com/323388/56622865-9351b600-65e6-11e9-98b3-7930660b1c93.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15 from databricks import koalas
16 sys.path.insert(0, os.path.abspath('.'))
17
18
19 # -- Project information -----------------------------------------------------
20
21 project = 'Koalas'
22 copyright = '2019, Databricks'
23 author = 'The Koalas Team'
24
25 # The full version, including alpha/beta/rc tags
26 release = os.environ.get('RELEASE_VERSION', koalas.__version__)
27
28
29 # -- General configuration ---------------------------------------------------
30
31 # If your documentation needs a minimal Sphinx version, state it here.
32 needs_sphinx = '1.2'
33
34 # Add any Sphinx extension module names here, as strings. They can be
35 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
36 # ones.
37 extensions = [
38 'sphinx.ext.autodoc',
39 'sphinx.ext.viewcode',
40 'numpydoc', # handle NumPy documentation formatted docstrings. Needs to install
41 'nbsphinx', # Jupyter Notebook. Needs to install
42 ]
43
44 # Add any paths that contain templates here, relative to this directory.
45 templates_path = ['_templates']
46
47 # List of patterns, relative to source directory, that match files and
48 # directories to ignore when looking for source files.
49 # This pattern also affects html_static_path and html_extra_path.
50 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
51
52 # The name of the Pygments (syntax highlighting) style to use.
53 pygments_style = 'sphinx'
54
55 # The master toctree document.
56 master_doc = 'index'
57
58 numpydoc_show_class_members = False
59
60 # -- Options for auto output -------------------------------------------------
61
62 autoclass_content = 'both'
63 autosummary_generate = True
64
65
66 # -- Options for HTML output -------------------------------------------------
67
68 # The theme to use for HTML and HTML Help pages. See the documentation for
69 # a list of builtin themes.
70 #
71 html_theme = 'nature'
72
73 # Add any paths that contain custom static files (such as style sheets) here,
74 # relative to this directory. They are copied after the builtin static files,
75 # so a file named "default.css" will overwrite the builtin "default.css".
76 html_static_path = ['_static']
77
78 # If false, no index is generated.
79 html_use_index = False
80
81 # If false, no module index is generated.
82 html_domain_indices = False
83
84
85 # -- Options for manual page output ---------------------------------------
86
87 # One entry per manual page. List of tuples
88 # (source start file, name, description, authors, manual section).
89 man_pages = [
90 ('index', 'databricks.koalas', u'databricks.koalas Documentation',
91 [u'Author'], 1)
92 ]
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -68,13 +68,16 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = 'nature'
+html_theme = 'nature_with_gtoc'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
+# Add any paths that contain custom themes here, relative to this directory.
+html_theme_path = ['themes']
+
# If false, no index is generated.
html_use_index = False
| {"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -68,13 +68,16 @@\n # The theme to use for HTML and HTML Help pages. See the documentation for\n # a list of builtin themes.\n #\n-html_theme = 'nature'\n+html_theme = 'nature_with_gtoc'\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = ['_static']\n \n+# Add any paths that contain custom themes here, relative to this directory.\n+html_theme_path = ['themes']\n+\n # If false, no index is generated.\n html_use_index = False\n", "issue": "Show pandas style Table of Contents on the left side in docs\nRight now our docs show a weird Table of Contents for only a section, rather than the entire doc. Can we fix it so it shows the Table of Contents of the entire docs, e.g. start from the top level?\r\n\r\n<img width=\"647\" alt=\"Screen Shot 2019-04-23 at 4 40 38 PM\" src=\"https://user-images.githubusercontent.com/323388/56622865-9351b600-65e6-11e9-98b3-7930660b1c93.png\">\r\n\r\n\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom databricks import koalas\nsys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Koalas'\ncopyright = '2019, Databricks'\nauthor = 'The Koalas Team'\n\n# The full version, including alpha/beta/rc tags\nrelease = os.environ.get('RELEASE_VERSION', koalas.__version__)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.2'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'numpydoc', # handle NumPy documentation formatted docstrings. Needs to install\n 'nbsphinx', # Jupyter Notebook. Needs to install\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# The master toctree document.\nmaster_doc = 'index'\n\nnumpydoc_show_class_members = False\n\n# -- Options for auto output -------------------------------------------------\n\nautoclass_content = 'both'\nautosummary_generate = True\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'nature'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'databricks.koalas', u'databricks.koalas Documentation',\n [u'Author'], 1)\n]\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nfrom databricks import koalas\nsys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Koalas'\ncopyright = '2019, Databricks'\nauthor = 'The Koalas Team'\n\n# The full version, including alpha/beta/rc tags\nrelease = os.environ.get('RELEASE_VERSION', koalas.__version__)\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\nneeds_sphinx = '1.2'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'numpydoc', # handle NumPy documentation formatted docstrings. Needs to install\n 'nbsphinx', # Jupyter Notebook. Needs to install\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# The master toctree document.\nmaster_doc = 'index'\n\nnumpydoc_show_class_members = False\n\n# -- Options for auto output -------------------------------------------------\n\nautoclass_content = 'both'\nautosummary_generate = True\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'nature_with_gtoc'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['themes']\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If false, no module index is generated.\nhtml_domain_indices = False\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'databricks.koalas', u'databricks.koalas Documentation',\n [u'Author'], 1)\n]\n", "path": "docs/source/conf.py"}]} | 1,273 | 182 |
gh_patches_debug_23082 | rasdani/github-patches | git_diff | microsoft__playwright-python-401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Interactive mode (REPL) Error !!!
**pip install playwright==0.162.2**
from playwright import sync_playwright
**playwright = sync_playwright().start()**
Traceback (most recent call last):
File "<pyshell#1>", line 1, in
playwright = sync_playwright().start()
File "C:\Python37\lib\site-packages\playwright_init_.py", line 34, in sync_playwright
return SyncPlaywrightContextManager()
File "C:\Python37\lib\site-packages\playwright\main.py", line 81, in init
self._connection = run_driver()
File "C:\Python37\lib\site-packages\playwright\main.py", line 76, in run_driver
return loop.run_until_complete(run_driver_async())
File "C:\Python37\lib\asyncio\base_events.py", line 587, in run_until_complete
return future.result()
File "C:\Python37\lib\site-packages\playwright\main.py", line 61, in run_driver_async
stderr=_get_stderr_fileno(),
File "C:\Python37\lib\site-packages\playwright\main.py", line 54, in _get_stderr_fileno
return sys.stderr.fileno()
**AttributeError: 'NoneType' object has no attribute 'fileno'**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playwright/_impl/_transport.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import json
17 import os
18 import sys
19 from pathlib import Path
20 from typing import Dict
21
22
23 class Transport:
24 def __init__(self, driver_executable: Path) -> None:
25 super().__init__()
26 self.on_message = lambda _: None
27 self._stopped = False
28 self._driver_executable = driver_executable
29 self._loop: asyncio.AbstractEventLoop
30
31 def stop(self) -> None:
32 self._stopped = True
33 self._output.close()
34
35 async def run(self) -> None:
36 self._loop = asyncio.get_running_loop()
37 driver_executable = self._driver_executable
38
39 proc = await asyncio.create_subprocess_exec(
40 str(driver_executable),
41 "run-driver",
42 stdin=asyncio.subprocess.PIPE,
43 stdout=asyncio.subprocess.PIPE,
44 stderr=sys.stderr,
45 limit=32768,
46 )
47 assert proc.stdout
48 assert proc.stdin
49 self._output = proc.stdin
50
51 while not self._stopped:
52 try:
53 buffer = await proc.stdout.readexactly(4)
54 length = int.from_bytes(buffer, byteorder="little", signed=False)
55 buffer = bytes(0)
56 while length:
57 to_read = min(length, 32768)
58 data = await proc.stdout.readexactly(to_read)
59 length -= to_read
60 if len(buffer):
61 buffer = buffer + data
62 else:
63 buffer = data
64 obj = json.loads(buffer)
65
66 if "DEBUGP" in os.environ: # pragma: no cover
67 print("\x1b[33mRECV>\x1b[0m", json.dumps(obj, indent=2))
68 self.on_message(obj)
69 except asyncio.IncompleteReadError:
70 break
71 await asyncio.sleep(0)
72
73 def send(self, message: Dict) -> None:
74 msg = json.dumps(message)
75 if "DEBUGP" in os.environ: # pragma: no cover
76 print("\x1b[32mSEND>\x1b[0m", json.dumps(message, indent=2))
77 data = msg.encode()
78 self._output.write(
79 len(data).to_bytes(4, byteorder="little", signed=False) + data
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py
--- a/playwright/_impl/_transport.py
+++ b/playwright/_impl/_transport.py
@@ -13,11 +13,25 @@
# limitations under the License.
import asyncio
+import io
import json
import os
import sys
from pathlib import Path
-from typing import Dict
+from typing import Dict, Optional
+
+
+# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77
+def _get_stderr_fileno() -> Optional[int]:
+ try:
+ return sys.stderr.fileno()
+ except (AttributeError, io.UnsupportedOperation):
+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.
+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors
+ # This is potentially dangerous, but the best we can do.
+ if not hasattr(sys, "__stderr__"):
+ return None
+ return sys.__stderr__.fileno()
class Transport:
@@ -41,7 +55,7 @@
"run-driver",
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=sys.stderr,
+ stderr=_get_stderr_fileno(),
limit=32768,
)
assert proc.stdout
| {"golden_diff": "diff --git a/playwright/_impl/_transport.py b/playwright/_impl/_transport.py\n--- a/playwright/_impl/_transport.py\n+++ b/playwright/_impl/_transport.py\n@@ -13,11 +13,25 @@\n # limitations under the License.\n \n import asyncio\n+import io\n import json\n import os\n import sys\n from pathlib import Path\n-from typing import Dict\n+from typing import Dict, Optional\n+\n+\n+# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\n+def _get_stderr_fileno() -> Optional[int]:\n+ try:\n+ return sys.stderr.fileno()\n+ except (AttributeError, io.UnsupportedOperation):\n+ # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n+ # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n+ # This is potentially dangerous, but the best we can do.\n+ if not hasattr(sys, \"__stderr__\"):\n+ return None\n+ return sys.__stderr__.fileno()\n \n \n class Transport:\n@@ -41,7 +55,7 @@\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n- stderr=sys.stderr,\n+ stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n", "issue": "Interactive mode (REPL) Error !!!\n**pip install playwright==0.162.2**\r\n\r\nfrom playwright import sync_playwright\r\n**playwright = sync_playwright().start()**\r\n\r\nTraceback (most recent call last):\r\nFile \"<pyshell#1>\", line 1, in\r\nplaywright = sync_playwright().start()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright_init_.py\", line 34, in sync_playwright\r\nreturn SyncPlaywrightContextManager()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 81, in init\r\nself._connection = run_driver()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 76, in run_driver\r\nreturn loop.run_until_complete(run_driver_async())\r\nFile \"C:\\Python37\\lib\\asyncio\\base_events.py\", line 587, in run_until_complete\r\nreturn future.result()\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 61, in run_driver_async\r\nstderr=_get_stderr_fileno(),\r\nFile \"C:\\Python37\\lib\\site-packages\\playwright\\main.py\", line 54, in _get_stderr_fileno\r\nreturn sys.stderr.fileno()\r\n**AttributeError: 'NoneType' object has no attribute 'fileno'**\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n driver_executable = self._driver_executable\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport io\nimport json\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\n\n# Sourced from: https://github.com/pytest-dev/pytest/blob/da01ee0a4bb0af780167ecd228ab3ad249511302/src/_pytest/faulthandler.py#L69-L77\ndef _get_stderr_fileno() -> Optional[int]:\n try:\n return sys.stderr.fileno()\n except (AttributeError, io.UnsupportedOperation):\n # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file.\n # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors\n # This is potentially dangerous, but the best we can do.\n if not hasattr(sys, \"__stderr__\"):\n return None\n return sys.__stderr__.fileno()\n\n\nclass Transport:\n def __init__(self, driver_executable: Path) -> None:\n super().__init__()\n self.on_message = lambda _: None\n self._stopped = False\n self._driver_executable = driver_executable\n self._loop: asyncio.AbstractEventLoop\n\n def stop(self) -> None:\n self._stopped = True\n self._output.close()\n\n async def run(self) -> None:\n self._loop = asyncio.get_running_loop()\n driver_executable = self._driver_executable\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n \"run-driver\",\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=_get_stderr_fileno(),\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n self._output = proc.stdin\n\n while not self._stopped:\n try:\n buffer = await proc.stdout.readexactly(4)\n length = int.from_bytes(buffer, byteorder=\"little\", signed=False)\n buffer = bytes(0)\n while length:\n to_read = min(length, 32768)\n data = await proc.stdout.readexactly(to_read)\n length -= to_read\n if len(buffer):\n buffer = buffer + data\n else:\n buffer = data\n obj = json.loads(buffer)\n\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[33mRECV>\\x1b[0m\", json.dumps(obj, indent=2))\n self.on_message(obj)\n except asyncio.IncompleteReadError:\n break\n await asyncio.sleep(0)\n\n def send(self, message: Dict) -> None:\n msg = json.dumps(message)\n if \"DEBUGP\" in os.environ: # pragma: no cover\n print(\"\\x1b[32mSEND>\\x1b[0m\", json.dumps(message, indent=2))\n data = msg.encode()\n self._output.write(\n len(data).to_bytes(4, byteorder=\"little\", signed=False) + data\n )\n", "path": "playwright/_impl/_transport.py"}]} | 1,331 | 350 |
gh_patches_debug_26087 | rasdani/github-patches | git_diff | nvaccess__nvda-9208 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NVDA hangs up in terminal, when a large piece of text is loaded
I know that normal user doesn't have this problem.
But developers, mainly developers working in terminal, could have.
When large piece of text is loaded to the terminal at the same time, for example, 10000 characters of more, NVDA is hanging up. Also, after a moment, the system hangs up.
The only way is to wait.
NVDA is not reading the text, it's reading pieces of text, then a moment of silence, different piece, silence...
For example, I can call this in ruby by writing
for i in 1..100000
print("A fragment number #{i.to_s} ")
end
Also, we can find this error, when we'll using in terminal app, which writes big pieces of text.
In console commands, like tree, we won't observe this eror, because it isn't loading of text at the same time, there's a while between printing new files.
What is interesting...
The problem is hanging up the all system, you can not open task manager or other apps.
Thank you for help
Greetings,
Dawid Pieper
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/winInputHook.py`
Content:
```
1 #winInputHook.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 import threading
8 import comtypes.client
9 import time
10 from ctypes import *
11 from ctypes.wintypes import *
12 from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
13
14 class KBDLLHOOKSTRUCT(Structure):
15 _fields_=[
16 ('vkCode',DWORD),
17 ('scanCode',DWORD),
18 ('flags',DWORD),
19 ('time',DWORD),
20 ('dwExtraInfo',DWORD),
21 ]
22
23 class MSLLHOOKSTRUCT(Structure):
24 _fields_=[
25 ('pt',POINT),
26 ('mouseData',DWORD),
27 ('flags',DWORD),
28 ('time',DWORD),
29 ('dwExtraInfo',DWORD),
30 ]
31
32 keyDownCallback=None
33 keyUpCallback=None
34 mouseCallback=None
35
36 @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
37 def keyboardHook(code,wParam,lParam):
38 if code!=HC_ACTION:
39 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
40 kbd=KBDLLHOOKSTRUCT.from_address(lParam)
41 if keyUpCallback and kbd.flags&LLKHF_UP:
42 if not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
43 return 1
44 elif keyDownCallback:
45 if not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):
46 return 1
47 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
48
49 @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
50 def mouseHook(code,wParam,lParam):
51 if code!=HC_ACTION:
52 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
53 msll=MSLLHOOKSTRUCT.from_address(lParam)
54 if mouseCallback:
55 if not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):
56 return 1
57 return windll.user32.CallNextHookEx(0,code,wParam,lParam)
58
59 hookThread=None
60 hookThreadRefCount=0
61
62 def hookThreadFunc():
63 keyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)
64 if keyHookID==0:
65 raise OSError("Could not register keyboard hook")
66 mouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)
67 if mouseHookID==0:
68 raise OSError("Could not register mouse hook")
69 msg=MSG()
70 while windll.user32.GetMessageW(byref(msg),None,0,0):
71 pass
72 if windll.user32.UnhookWindowsHookEx(keyHookID)==0:
73 raise OSError("could not unregister key hook %s"%keyHookID)
74 if windll.user32.UnhookWindowsHookEx(mouseHookID)==0:
75 raise OSError("could not unregister mouse hook %s"%mouseHookID)
76
77 def initialize():
78 global hookThread, hookThreadRefCount
79 hookThreadRefCount+=1
80 if hookThreadRefCount==1:
81 hookThread=threading.Thread(target=hookThreadFunc)
82 hookThread.start()
83
84 def setCallbacks(keyUp=None,keyDown=None,mouse=None):
85 global keyUpCallback, keyDownCallback, mouseCallback
86 if keyUp:
87 keyUpCallback=keyUp
88 if keyDown:
89 keyDownCallback=keyDown
90 if mouse:
91 mouseCallback=mouse
92
93 def terminate():
94 global hookThread, hookThreadRefCount
95 if not hookThread:
96 raise RuntimeError("winInputHook not running")
97 hookThreadRefCount-=1
98 if hookThreadRefCount==0:
99 windll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)
100 hookThread.join()
101 hookThread=None
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/source/winInputHook.py b/source/winInputHook.py
--- a/source/winInputHook.py
+++ b/source/winInputHook.py
@@ -10,6 +10,7 @@
from ctypes import *
from ctypes.wintypes import *
from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED
+import watchdog
class KBDLLHOOKSTRUCT(Structure):
_fields_=[
@@ -35,7 +36,7 @@
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def keyboardHook(code,wParam,lParam):
- if code!=HC_ACTION:
+ if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
kbd=KBDLLHOOKSTRUCT.from_address(lParam)
if keyUpCallback and kbd.flags&LLKHF_UP:
@@ -48,7 +49,7 @@
@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)
def mouseHook(code,wParam,lParam):
- if code!=HC_ACTION:
+ if watchdog.isAttemptingRecovery or code!=HC_ACTION:
return windll.user32.CallNextHookEx(0,code,wParam,lParam)
msll=MSLLHOOKSTRUCT.from_address(lParam)
if mouseCallback:
| {"golden_diff": "diff --git a/source/winInputHook.py b/source/winInputHook.py\n--- a/source/winInputHook.py\n+++ b/source/winInputHook.py\n@@ -10,6 +10,7 @@\n from ctypes import *\r\n from ctypes.wintypes import *\r\n from win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\n+import watchdog\r\n \r\n class KBDLLHOOKSTRUCT(Structure):\r\n \t_fields_=[\r\n@@ -35,7 +36,7 @@\n \r\n @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\n def keyboardHook(code,wParam,lParam):\r\n-\tif code!=HC_ACTION:\r\n+\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n \t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n \tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n \tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n@@ -48,7 +49,7 @@\n \r\n @WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\n def mouseHook(code,wParam,lParam):\r\n-\tif code!=HC_ACTION:\r\n+\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n \t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n \tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n \tif mouseCallback:\n", "issue": "NVDA hangs up in terminal, when a large piece of text is loaded\nI know that normal user doesn't have this problem.\nBut developers, mainly developers working in terminal, could have.\n\nWhen large piece of text is loaded to the terminal at the same time, for example, 10000 characters of more, NVDA is hanging up. Also, after a moment, the system hangs up.\nThe only way is to wait.\nNVDA is not reading the text, it's reading pieces of text, then a moment of silence, different piece, silence...\n\nFor example, I can call this in ruby by writing\n\nfor i in 1..100000\nprint(\"A fragment number #{i.to_s} \")\nend\n\nAlso, we can find this error, when we'll using in terminal app, which writes big pieces of text.\nIn console commands, like tree, we won't observe this eror, because it isn't loading of text at the same time, there's a while between printing new files.\n\nWhat is interesting...\nThe problem is hanging up the all system, you can not open task manager or other apps.\n\nThank you for help\nGreetings,\nDawid Pieper\n\n", "before_files": [{"content": "#winInputHook.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport threading\r\nimport comtypes.client\r\nimport time\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nfrom win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\n\r\nclass KBDLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('vkCode',DWORD),\r\n\t\t('scanCode',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nclass MSLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('pt',POINT),\r\n\t\t('mouseData',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nkeyDownCallback=None\r\nkeyUpCallback=None\r\nmouseCallback=None\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef keyboardHook(code,wParam,lParam):\r\n\tif code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n\tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n\t\tif not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\telif keyDownCallback:\r\n\t\tif not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef mouseHook(code,wParam,lParam):\r\n\tif code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n\tif mouseCallback:\r\n\t\tif not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\nhookThread=None\r\nhookThreadRefCount=0\r\n\r\ndef hookThreadFunc():\r\n\tkeyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif keyHookID==0:\r\n\t\traise OSError(\"Could not register keyboard hook\")\r\n\tmouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif mouseHookID==0:\r\n\t\traise OSError(\"Could not register mouse hook\")\r\n\tmsg=MSG()\r\n\twhile windll.user32.GetMessageW(byref(msg),None,0,0):\r\n\t\tpass\r\n\tif windll.user32.UnhookWindowsHookEx(keyHookID)==0:\r\n\t\traise OSError(\"could not unregister key hook %s\"%keyHookID)\r\n\tif windll.user32.UnhookWindowsHookEx(mouseHookID)==0:\r\n\t\traise OSError(\"could not unregister mouse hook %s\"%mouseHookID)\r\n\r\ndef initialize():\r\n\tglobal hookThread, hookThreadRefCount\r\n\thookThreadRefCount+=1\r\n\tif hookThreadRefCount==1:\r\n\t\thookThread=threading.Thread(target=hookThreadFunc)\r\n\t\thookThread.start()\r\n\r\ndef setCallbacks(keyUp=None,keyDown=None,mouse=None):\r\n\tglobal keyUpCallback, keyDownCallback, mouseCallback\r\n\tif keyUp:\r\n\t\tkeyUpCallback=keyUp\r\n\tif keyDown:\r\n\t\tkeyDownCallback=keyDown\r\n\tif mouse:\r\n\t\tmouseCallback=mouse\r\n\r\ndef terminate():\r\n\tglobal hookThread, hookThreadRefCount\r\n\tif not hookThread:\r\n\t\traise RuntimeError(\"winInputHook not running\")\r\n\thookThreadRefCount-=1\r\n\tif hookThreadRefCount==0:\r\n\t\twindll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)\r\n\t\thookThread.join()\r\n\t\thookThread=None\r\n", "path": "source/winInputHook.py"}], "after_files": [{"content": "#winInputHook.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport threading\r\nimport comtypes.client\r\nimport time\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nfrom win32con import WM_QUIT, HC_ACTION, WH_KEYBOARD_LL, LLKHF_UP, LLKHF_EXTENDED, LLKHF_INJECTED, WH_MOUSE_LL, LLMHF_INJECTED\r\nimport watchdog\r\n\r\nclass KBDLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('vkCode',DWORD),\r\n\t\t('scanCode',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nclass MSLLHOOKSTRUCT(Structure):\r\n\t_fields_=[\r\n\t\t('pt',POINT),\r\n\t\t('mouseData',DWORD),\r\n\t\t('flags',DWORD),\r\n\t\t('time',DWORD),\r\n\t\t('dwExtraInfo',DWORD),\r\n\t]\r\n\r\nkeyDownCallback=None\r\nkeyUpCallback=None\r\nmouseCallback=None\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef keyboardHook(code,wParam,lParam):\r\n\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tkbd=KBDLLHOOKSTRUCT.from_address(lParam)\r\n\tif keyUpCallback and kbd.flags&LLKHF_UP:\r\n\t\tif not keyUpCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\telif keyDownCallback:\r\n\t\tif not keyDownCallback(kbd.vkCode,kbd.scanCode,bool(kbd.flags&LLKHF_EXTENDED),bool(kbd.flags&LLKHF_INJECTED)):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\n@WINFUNCTYPE(c_long,c_int,WPARAM,LPARAM)\r\ndef mouseHook(code,wParam,lParam):\r\n\tif watchdog.isAttemptingRecovery or code!=HC_ACTION:\r\n\t\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\tmsll=MSLLHOOKSTRUCT.from_address(lParam)\r\n\tif mouseCallback:\r\n\t\tif not mouseCallback(wParam,msll.pt.x,msll.pt.y,msll.flags&LLMHF_INJECTED):\r\n\t\t\treturn 1\r\n\treturn windll.user32.CallNextHookEx(0,code,wParam,lParam)\r\n\r\nhookThread=None\r\nhookThreadRefCount=0\r\n\r\ndef hookThreadFunc():\r\n\tkeyHookID=windll.user32.SetWindowsHookExW(WH_KEYBOARD_LL,keyboardHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif keyHookID==0:\r\n\t\traise OSError(\"Could not register keyboard hook\")\r\n\tmouseHookID=windll.user32.SetWindowsHookExW(WH_MOUSE_LL,mouseHook,windll.kernel32.GetModuleHandleW(None),0)\r\n\tif mouseHookID==0:\r\n\t\traise OSError(\"Could not register mouse hook\")\r\n\tmsg=MSG()\r\n\twhile windll.user32.GetMessageW(byref(msg),None,0,0):\r\n\t\tpass\r\n\tif windll.user32.UnhookWindowsHookEx(keyHookID)==0:\r\n\t\traise OSError(\"could not unregister key hook %s\"%keyHookID)\r\n\tif windll.user32.UnhookWindowsHookEx(mouseHookID)==0:\r\n\t\traise OSError(\"could not unregister mouse hook %s\"%mouseHookID)\r\n\r\ndef initialize():\r\n\tglobal hookThread, hookThreadRefCount\r\n\thookThreadRefCount+=1\r\n\tif hookThreadRefCount==1:\r\n\t\thookThread=threading.Thread(target=hookThreadFunc)\r\n\t\thookThread.start()\r\n\r\ndef setCallbacks(keyUp=None,keyDown=None,mouse=None):\r\n\tglobal keyUpCallback, keyDownCallback, mouseCallback\r\n\tif keyUp:\r\n\t\tkeyUpCallback=keyUp\r\n\tif keyDown:\r\n\t\tkeyDownCallback=keyDown\r\n\tif mouse:\r\n\t\tmouseCallback=mouse\r\n\r\ndef terminate():\r\n\tglobal hookThread, hookThreadRefCount\r\n\tif not hookThread:\r\n\t\traise RuntimeError(\"winInputHook not running\")\r\n\thookThreadRefCount-=1\r\n\tif hookThreadRefCount==0:\r\n\t\twindll.user32.PostThreadMessageW(hookThread.ident,WM_QUIT,0,0)\r\n\t\thookThread.join()\r\n\t\thookThread=None\r\n", "path": "source/winInputHook.py"}]} | 1,689 | 315 |
gh_patches_debug_3947 | rasdani/github-patches | git_diff | openai__gym-558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Box2d won't find some RAND_LIMIT_swigconstant
Hello!
It's probably some silly mistake on my side, but i wasn't able to fix by random lever pulling, as usual.
Installing Box2d as in instuctions (using `pip install -e .[all]`) will throw error when trying to use some of Box2D examples.
Code that reproduces the issue:
```
import gym
atari = gym.make('LunarLander-v0')
atari.reset()
```
```
[2016-05-16 02:14:25,430] Making new env: LunarLander-v0
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-f89e78f4410b> in <module>()
1 import gym
----> 2 atari = gym.make('LunarLander-v0')
3 atari.reset()
4 #plt.imshow(atari.render('rgb_array'))
/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self, id)
77 logger.info('Making new env: %s', id)
78 spec = self.spec(id)
---> 79 return spec.make()
80
81 def all(self):
/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self)
52 raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
53
---> 54 cls = load(self._entry_point)
55 env = cls(**self._kwargs)
56
/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in load(name)
11 def load(name):
12 entry_point = pkg_resources.EntryPoint.parse('x={}'.format(name))
---> 13 result = entry_point.load(False)
14 return result
15
/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in load(self, require, *args, **kwargs)
2378 if require:
2379 self.require(*args, **kwargs)
-> 2380 return self.resolve()
2381
2382 def resolve(self):
/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in resolve(self)
2384 Resolve the entry point from its module and attrs.
2385 """
-> 2386 module = __import__(self.module_name, fromlist=['__name__'], level=0)
2387 try:
2388 return functools.reduce(getattr, self.attrs, module)
/home/jheuristic/yozhik/gym/gym/envs/box2d/__init__.py in <module>()
----> 1 from gym.envs.box2d.lunar_lander import LunarLander
2 from gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore
/home/jheuristic/yozhik/gym/gym/envs/box2d/lunar_lander.py in <module>()
3 from six.moves import xrange
4
----> 5 import Box2D
6 from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)
7
/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/__init__.py in <module>()
18 # 3. This notice may not be removed or altered from any source distribution.
19 #
---> 20 from .Box2D import *
21 __author__ = '$Date$'
22 __version__ = '2.3.1'
/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/Box2D.py in <module>()
433 return _Box2D.b2CheckPolygon(shape, additional_checks)
434
--> 435 _Box2D.RAND_LIMIT_swigconstant(_Box2D)
436 RAND_LIMIT = _Box2D.RAND_LIMIT
437
AttributeError: 'module' object has no attribute 'RAND_LIMIT_swigconstant'
```
What didn't help:
```
pip uninstall gym
apt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev python-opengl
git clone https://github.com/openai/gym
cd gym
pip install -e .[all] --upgrade
```
The OS is Ubuntu 14.04 Server x64
It may be a clue that i am running the thing from inside python2 virtualenv (with all numpys, etc. installed)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import sys, os.path
3
4 # Don't import gym module here, since deps may not be installed
5 sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'gym'))
6 from version import VERSION
7
8 # Environment-specific dependencies.
9 extras = {
10 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],
11 'board_game' : ['pachi-py>=0.0.19'],
12 'box2d': ['box2d-py'],
13 'classic_control': ['PyOpenGL'],
14 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],
15 'parameter_tuning': ['keras', 'theano'],
16 }
17
18 # Meta dependency groups.
19 all_deps = []
20 for group_name in extras:
21 all_deps += extras[group_name]
22 extras['all'] = all_deps
23
24 setup(name='gym',
25 version=VERSION,
26 description='The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents.',
27 url='https://github.com/openai/gym',
28 author='OpenAI',
29 author_email='[email protected]',
30 license='',
31 packages=[package for package in find_packages()
32 if package.startswith('gym')],
33 zip_safe=False,
34 install_requires=[
35 'numpy>=1.10.4', 'requests>=2.0', 'six', 'pyglet>=1.2.0',
36 ],
37 extras_require=extras,
38 package_data={'gym': ['envs/mujoco/assets/*.xml', 'envs/classic_control/assets/*.png']},
39 tests_require=['pytest', 'mock'],
40 )
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
extras = {
'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],
'board_game' : ['pachi-py>=0.0.19'],
- 'box2d': ['box2d-py'],
+ 'box2d': ['Box2D-kengz'],
'classic_control': ['PyOpenGL'],
'mujoco': ['mujoco_py>=0.4.3', 'imageio'],
'parameter_tuning': ['keras', 'theano'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n extras = {\n 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],\n 'board_game' : ['pachi-py>=0.0.19'],\n- 'box2d': ['box2d-py'],\n+ 'box2d': ['Box2D-kengz'],\n 'classic_control': ['PyOpenGL'],\n 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],\n 'parameter_tuning': ['keras', 'theano'],\n", "issue": "Box2d won't find some RAND_LIMIT_swigconstant\nHello!\n\nIt's probably some silly mistake on my side, but i wasn't able to fix by random lever pulling, as usual.\n\nInstalling Box2d as in instuctions (using `pip install -e .[all]`) will throw error when trying to use some of Box2D examples.\n\nCode that reproduces the issue:\n\n```\nimport gym\natari = gym.make('LunarLander-v0')\natari.reset()\n```\n\n```\n[2016-05-16 02:14:25,430] Making new env: LunarLander-v0\n\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-1-f89e78f4410b> in <module>()\n 1 import gym\n----> 2 atari = gym.make('LunarLander-v0')\n 3 atari.reset()\n 4 #plt.imshow(atari.render('rgb_array'))\n\n/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self, id)\n 77 logger.info('Making new env: %s', id)\n 78 spec = self.spec(id)\n---> 79 return spec.make()\n 80 \n 81 def all(self):\n\n/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in make(self)\n 52 raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))\n 53 \n---> 54 cls = load(self._entry_point)\n 55 env = cls(**self._kwargs)\n 56 \n\n/home/jheuristic/yozhik/gym/gym/envs/registration.pyc in load(name)\n 11 def load(name):\n 12 entry_point = pkg_resources.EntryPoint.parse('x={}'.format(name))\n---> 13 result = entry_point.load(False)\n 14 return result\n 15 \n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in load(self, require, *args, **kwargs)\n 2378 if require:\n 2379 self.require(*args, **kwargs)\n-> 2380 return self.resolve()\n 2381 \n 2382 def resolve(self):\n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/pkg_resources/__init__.pyc in resolve(self)\n 2384 Resolve the entry point from its module and attrs.\n 2385 \"\"\"\n-> 2386 module = __import__(self.module_name, fromlist=['__name__'], level=0)\n 2387 try:\n 2388 return functools.reduce(getattr, self.attrs, module)\n\n/home/jheuristic/yozhik/gym/gym/envs/box2d/__init__.py in <module>()\n----> 1 from gym.envs.box2d.lunar_lander import LunarLander\n 2 from gym.envs.box2d.bipedal_walker import BipedalWalker, BipedalWalkerHardcore\n\n/home/jheuristic/yozhik/gym/gym/envs/box2d/lunar_lander.py in <module>()\n 3 from six.moves import xrange\n 4 \n----> 5 import Box2D\n 6 from Box2D.b2 import (edgeShape, circleShape, fixtureDef, polygonShape, revoluteJointDef, contactListener)\n 7 \n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/__init__.py in <module>()\n 18 # 3. This notice may not be removed or altered from any source distribution.\n 19 #\n---> 20 from .Box2D import *\n 21 __author__ = '$Date$'\n 22 __version__ = '2.3.1'\n\n/home/jheuristic/thenv/local/lib/python2.7/site-packages/Box2D/Box2D.py in <module>()\n 433 return _Box2D.b2CheckPolygon(shape, additional_checks)\n 434 \n--> 435 _Box2D.RAND_LIMIT_swigconstant(_Box2D)\n 436 RAND_LIMIT = _Box2D.RAND_LIMIT\n 437 \n\nAttributeError: 'module' object has no attribute 'RAND_LIMIT_swigconstant'\n\n```\n\nWhat didn't help:\n\n```\npip uninstall gym\napt-get install -y python-numpy python-dev cmake zlib1g-dev libjpeg-dev xvfb libav-tools xorg-dev python-opengl\ngit clone https://github.com/openai/gym\ncd gym\npip install -e .[all] --upgrade\n```\n\nThe OS is Ubuntu 14.04 Server x64\nIt may be a clue that i am running the thing from inside python2 virtualenv (with all numpys, etc. installed)\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport sys, os.path\n\n# Don't import gym module here, since deps may not be installed\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), 'gym'))\nfrom version import VERSION\n\n# Environment-specific dependencies.\nextras = {\n 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],\n 'board_game' : ['pachi-py>=0.0.19'],\n 'box2d': ['box2d-py'],\n 'classic_control': ['PyOpenGL'],\n 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],\n 'parameter_tuning': ['keras', 'theano'],\n}\n\n# Meta dependency groups.\nall_deps = []\nfor group_name in extras:\n all_deps += extras[group_name]\nextras['all'] = all_deps\n\nsetup(name='gym',\n version=VERSION,\n description='The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents.',\n url='https://github.com/openai/gym',\n author='OpenAI',\n author_email='[email protected]',\n license='',\n packages=[package for package in find_packages()\n if package.startswith('gym')],\n zip_safe=False,\n install_requires=[\n 'numpy>=1.10.4', 'requests>=2.0', 'six', 'pyglet>=1.2.0',\n ],\n extras_require=extras,\n package_data={'gym': ['envs/mujoco/assets/*.xml', 'envs/classic_control/assets/*.png']},\n tests_require=['pytest', 'mock'],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport sys, os.path\n\n# Don't import gym module here, since deps may not be installed\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), 'gym'))\nfrom version import VERSION\n\n# Environment-specific dependencies.\nextras = {\n 'atari': ['atari_py>=0.0.21', 'Pillow', 'PyOpenGL'],\n 'board_game' : ['pachi-py>=0.0.19'],\n 'box2d': ['Box2D-kengz'],\n 'classic_control': ['PyOpenGL'],\n 'mujoco': ['mujoco_py>=0.4.3', 'imageio'],\n 'parameter_tuning': ['keras', 'theano'],\n}\n\n# Meta dependency groups.\nall_deps = []\nfor group_name in extras:\n all_deps += extras[group_name]\nextras['all'] = all_deps\n\nsetup(name='gym',\n version=VERSION,\n description='The OpenAI Gym: A toolkit for developing and comparing your reinforcement learning agents.',\n url='https://github.com/openai/gym',\n author='OpenAI',\n author_email='[email protected]',\n license='',\n packages=[package for package in find_packages()\n if package.startswith('gym')],\n zip_safe=False,\n install_requires=[\n 'numpy>=1.10.4', 'requests>=2.0', 'six', 'pyglet>=1.2.0',\n ],\n extras_require=extras,\n package_data={'gym': ['envs/mujoco/assets/*.xml', 'envs/classic_control/assets/*.png']},\n tests_require=['pytest', 'mock'],\n)\n", "path": "setup.py"}]} | 1,825 | 151 |
gh_patches_debug_33666 | rasdani/github-patches | git_diff | google__fuzzbench-776 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move/publish reports of experimental experiments under fuzzbench.com/reports/experimental
Experimental experiments are experiments with fuzzers not in this list:
https://github.com/google/fuzzbench/blob/master/service/core-fuzzers.yaml
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `experiment/reporter.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """A module containing the interface used by an experiment for generating
16 reports."""
17 import os
18 import posixpath
19
20 from common import experiment_utils
21 from common import experiment_path as exp_path
22 from common import filesystem
23 from common import filestore_utils
24 from common import logs
25 from common import yaml_utils
26 from analysis import generate_report
27 from analysis import data_utils
28
29 CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',
30 'core-fuzzers.yaml')
31
32 logger = logs.Logger('reporter') # pylint: disable=invalid-name
33
34
35 def get_reports_dir():
36 """Return reports directory."""
37 return exp_path.path('reports')
38
39
40 def output_report(experiment_config: dict,
41 in_progress=False,
42 coverage_report=False):
43 """Generate the HTML report and write it to |web_bucket|."""
44 experiment_name = experiment_utils.get_experiment_name()
45 web_filestore_path = posixpath.join(experiment_config['report_filestore'],
46 experiment_name)
47
48 reports_dir = get_reports_dir()
49
50 core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
51 fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))
52
53 # Don't merge with nonprivate experiments until the very end as doing it
54 # while the experiment is in progress will produce unusable realtime
55 # results.
56 merge_with_nonprivate = (not in_progress and experiment_config.get(
57 'merge_with_nonprivate', False))
58
59 try:
60 logger.debug('Generating report.')
61 filesystem.recreate_directory(reports_dir)
62 generate_report.generate_report(
63 [experiment_name],
64 str(reports_dir),
65 report_name=experiment_name,
66 fuzzers=fuzzers,
67 in_progress=in_progress,
68 merge_with_clobber_nonprivate=merge_with_nonprivate,
69 coverage_report=coverage_report)
70 filestore_utils.rsync(
71 str(reports_dir),
72 web_filestore_path,
73 delete=False, # Don't remove existing coverage jsons.
74 gsutil_options=[
75 '-h', 'Cache-Control:public,max-age=0,no-transform'
76 ])
77 logger.debug('Done generating report.')
78 except data_utils.EmptyDataError:
79 logs.warning('No snapshot data.')
80 except Exception: # pylint: disable=broad-except
81 logger.error('Error generating HTML report.')
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/experiment/reporter.py b/experiment/reporter.py
--- a/experiment/reporter.py
+++ b/experiment/reporter.py
@@ -26,8 +26,9 @@
from analysis import generate_report
from analysis import data_utils
-CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',
- 'core-fuzzers.yaml')
+CORE_FUZZERS_YAML = os.path.abspath(
+ os.path.join(os.path.dirname(__file__), '..', 'service',
+ 'core-fuzzers.yaml'))
logger = logs.Logger('reporter') # pylint: disable=invalid-name
@@ -37,18 +38,29 @@
return exp_path.path('reports')
+def get_core_fuzzers():
+ """Return list of core fuzzers to be used for merging experiment data."""
+ return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
+
+
def output_report(experiment_config: dict,
in_progress=False,
coverage_report=False):
"""Generate the HTML report and write it to |web_bucket|."""
experiment_name = experiment_utils.get_experiment_name()
- web_filestore_path = posixpath.join(experiment_config['report_filestore'],
- experiment_name)
-
reports_dir = get_reports_dir()
- core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']
- fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))
+ core_fuzzers = set(get_core_fuzzers())
+ experiment_fuzzers = set(experiment_config['fuzzers'])
+ fuzzers = experiment_fuzzers.union(core_fuzzers)
+
+ # Calculate path to store report files in filestore.
+ web_filestore_path = experiment_config['report_filestore']
+ if not fuzzers.issubset(core_fuzzers):
+ # This means that we are running an experimental report with fuzzers
+ # not in the core list. So, store these in |experimental| sub-directory.
+ web_filestore_path = os.path.join(web_filestore_path, 'experimental')
+ web_filestore_path = posixpath.join(web_filestore_path, experiment_name)
# Don't merge with nonprivate experiments until the very end as doing it
# while the experiment is in progress will produce unusable realtime
| {"golden_diff": "diff --git a/experiment/reporter.py b/experiment/reporter.py\n--- a/experiment/reporter.py\n+++ b/experiment/reporter.py\n@@ -26,8 +26,9 @@\n from analysis import generate_report\n from analysis import data_utils\n \n-CORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',\n- 'core-fuzzers.yaml')\n+CORE_FUZZERS_YAML = os.path.abspath(\n+ os.path.join(os.path.dirname(__file__), '..', 'service',\n+ 'core-fuzzers.yaml'))\n \n logger = logs.Logger('reporter') # pylint: disable=invalid-name\n \n@@ -37,18 +38,29 @@\n return exp_path.path('reports')\n \n \n+def get_core_fuzzers():\n+ \"\"\"Return list of core fuzzers to be used for merging experiment data.\"\"\"\n+ return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n+\n+\n def output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n- web_filestore_path = posixpath.join(experiment_config['report_filestore'],\n- experiment_name)\n-\n reports_dir = get_reports_dir()\n \n- core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n- fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))\n+ core_fuzzers = set(get_core_fuzzers())\n+ experiment_fuzzers = set(experiment_config['fuzzers'])\n+ fuzzers = experiment_fuzzers.union(core_fuzzers)\n+\n+ # Calculate path to store report files in filestore.\n+ web_filestore_path = experiment_config['report_filestore']\n+ if not fuzzers.issubset(core_fuzzers):\n+ # This means that we are running an experimental report with fuzzers\n+ # not in the core list. So, store these in |experimental| sub-directory.\n+ web_filestore_path = os.path.join(web_filestore_path, 'experimental')\n+ web_filestore_path = posixpath.join(web_filestore_path, experiment_name)\n \n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n", "issue": "Move/publish reports of experimental experiments under fuzzbench.com/reports/experimental\nExperimental experiments are experiments with fuzzers not in this list:\r\nhttps://github.com/google/fuzzbench/blob/master/service/core-fuzzers.yaml\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A module containing the interface used by an experiment for generating\nreports.\"\"\"\nimport os\nimport posixpath\n\nfrom common import experiment_utils\nfrom common import experiment_path as exp_path\nfrom common import filesystem\nfrom common import filestore_utils\nfrom common import logs\nfrom common import yaml_utils\nfrom analysis import generate_report\nfrom analysis import data_utils\n\nCORE_FUZZERS_YAML = os.path.join(os.path.dirname(__file__), '..', 'service',\n 'core-fuzzers.yaml')\n\nlogger = logs.Logger('reporter') # pylint: disable=invalid-name\n\n\ndef get_reports_dir():\n \"\"\"Return reports directory.\"\"\"\n return exp_path.path('reports')\n\n\ndef output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n web_filestore_path = posixpath.join(experiment_config['report_filestore'],\n experiment_name)\n\n reports_dir = get_reports_dir()\n\n core_fuzzers = yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n fuzzers = sorted(set(experiment_config['fuzzers']).union(set(core_fuzzers)))\n\n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n # results.\n merge_with_nonprivate = (not in_progress and experiment_config.get(\n 'merge_with_nonprivate', False))\n\n try:\n logger.debug('Generating report.')\n filesystem.recreate_directory(reports_dir)\n generate_report.generate_report(\n [experiment_name],\n str(reports_dir),\n report_name=experiment_name,\n fuzzers=fuzzers,\n in_progress=in_progress,\n merge_with_clobber_nonprivate=merge_with_nonprivate,\n coverage_report=coverage_report)\n filestore_utils.rsync(\n str(reports_dir),\n web_filestore_path,\n delete=False, # Don't remove existing coverage jsons.\n gsutil_options=[\n '-h', 'Cache-Control:public,max-age=0,no-transform'\n ])\n logger.debug('Done generating report.')\n except data_utils.EmptyDataError:\n logs.warning('No snapshot data.')\n except Exception: # pylint: disable=broad-except\n logger.error('Error generating HTML report.')\n", "path": "experiment/reporter.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A module containing the interface used by an experiment for generating\nreports.\"\"\"\nimport os\nimport posixpath\n\nfrom common import experiment_utils\nfrom common import experiment_path as exp_path\nfrom common import filesystem\nfrom common import filestore_utils\nfrom common import logs\nfrom common import yaml_utils\nfrom analysis import generate_report\nfrom analysis import data_utils\n\nCORE_FUZZERS_YAML = os.path.abspath(\n os.path.join(os.path.dirname(__file__), '..', 'service',\n 'core-fuzzers.yaml'))\n\nlogger = logs.Logger('reporter') # pylint: disable=invalid-name\n\n\ndef get_reports_dir():\n \"\"\"Return reports directory.\"\"\"\n return exp_path.path('reports')\n\n\ndef get_core_fuzzers():\n \"\"\"Return list of core fuzzers to be used for merging experiment data.\"\"\"\n return yaml_utils.read(CORE_FUZZERS_YAML)['fuzzers']\n\n\ndef output_report(experiment_config: dict,\n in_progress=False,\n coverage_report=False):\n \"\"\"Generate the HTML report and write it to |web_bucket|.\"\"\"\n experiment_name = experiment_utils.get_experiment_name()\n reports_dir = get_reports_dir()\n\n core_fuzzers = set(get_core_fuzzers())\n experiment_fuzzers = set(experiment_config['fuzzers'])\n fuzzers = experiment_fuzzers.union(core_fuzzers)\n\n # Calculate path to store report files in filestore.\n web_filestore_path = experiment_config['report_filestore']\n if not fuzzers.issubset(core_fuzzers):\n # This means that we are running an experimental report with fuzzers\n # not in the core list. So, store these in |experimental| sub-directory.\n web_filestore_path = os.path.join(web_filestore_path, 'experimental')\n web_filestore_path = posixpath.join(web_filestore_path, experiment_name)\n\n # Don't merge with nonprivate experiments until the very end as doing it\n # while the experiment is in progress will produce unusable realtime\n # results.\n merge_with_nonprivate = (not in_progress and experiment_config.get(\n 'merge_with_nonprivate', False))\n\n try:\n logger.debug('Generating report.')\n filesystem.recreate_directory(reports_dir)\n generate_report.generate_report(\n [experiment_name],\n str(reports_dir),\n report_name=experiment_name,\n fuzzers=fuzzers,\n in_progress=in_progress,\n merge_with_clobber_nonprivate=merge_with_nonprivate,\n coverage_report=coverage_report)\n filestore_utils.rsync(\n str(reports_dir),\n web_filestore_path,\n delete=False, # Don't remove existing coverage jsons.\n gsutil_options=[\n '-h', 'Cache-Control:public,max-age=0,no-transform'\n ])\n logger.debug('Done generating report.')\n except data_utils.EmptyDataError:\n logs.warning('No snapshot data.')\n except Exception: # pylint: disable=broad-except\n logger.error('Error generating HTML report.')\n", "path": "experiment/reporter.py"}]} | 1,098 | 525 |
gh_patches_debug_31036 | rasdani/github-patches | git_diff | goauthentik__authentik-6325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
docker compose run --rm server ldap_sync - doesn't work anymore to debug
**Describe the bug**
this command doesnt work anymore as described here (https://goauthentik.io/docs/troubleshooting/ldap_source)
```
docker compose run --rm server ldap_sync nxnet
```
it will just create a backgorund taks instead of running it in foreground!
**To Reproduce**
```
docker compose run --rm server ldap_sync SLUGofLDAPsource
```
**Expected behavior**
it will run an LDAP- synchronization in the foreground and see any errors or warnings that might happen directly
**Logs**
{"event": "Task published", "level": "info", "logger": "authentik.root.celery", "pid": 7, "task_id": "17af668f-1d9f-4732-a0eb-044c4a16beed", "task_name": "authentik.sources.ldap.tasks.ldap_sync", "timestamp": "2023-07-20T15:10:12.575247"}
**Version and Deployment (please complete the following information):**
- authentik version: 2023.6.1
- Deployment: docker compose
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/ldap/management/commands/ldap_sync.py`
Content:
```
1 """LDAP Sync"""
2 from django.core.management.base import BaseCommand
3 from structlog.stdlib import get_logger
4
5 from authentik.sources.ldap.models import LDAPSource
6 from authentik.sources.ldap.tasks import ldap_sync_single
7
8 LOGGER = get_logger()
9
10
11 class Command(BaseCommand):
12 """Run sync for an LDAP Source"""
13
14 def add_arguments(self, parser):
15 parser.add_argument("source_slugs", nargs="+", type=str)
16
17 def handle(self, **options):
18 for source_slug in options["source_slugs"]:
19 source = LDAPSource.objects.filter(slug=source_slug).first()
20 if not source:
21 LOGGER.warning("Source does not exist", slug=source_slug)
22 continue
23 ldap_sync_single(source)
24
```
Path: `authentik/sources/ldap/sync/users.py`
Content:
```
1 """Sync LDAP Users into authentik"""
2 from typing import Generator
3
4 from django.core.exceptions import FieldError
5 from django.db.utils import IntegrityError
6 from ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE
7
8 from authentik.core.models import User
9 from authentik.events.models import Event, EventAction
10 from authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer
11 from authentik.sources.ldap.sync.vendor.freeipa import FreeIPA
12 from authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory
13
14
15 class UserLDAPSynchronizer(BaseLDAPSynchronizer):
16 """Sync LDAP Users into authentik"""
17
18 @staticmethod
19 def name() -> str:
20 return "users"
21
22 def get_objects(self, **kwargs) -> Generator:
23 return self.search_paginator(
24 search_base=self.base_dn_users,
25 search_filter=self._source.user_object_filter,
26 search_scope=SUBTREE,
27 attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],
28 **kwargs,
29 )
30
31 def sync(self, page_data: list) -> int:
32 """Iterate over all LDAP Users and create authentik_core.User instances"""
33 if not self._source.sync_users:
34 self.message("User syncing is disabled for this Source")
35 return -1
36 user_count = 0
37 for user in page_data:
38 if "attributes" not in user:
39 continue
40 attributes = user.get("attributes", {})
41 user_dn = self._flatten(user.get("entryDN", user.get("dn")))
42 if self._source.object_uniqueness_field not in attributes:
43 self.message(
44 f"Cannot find uniqueness field in attributes: '{user_dn}'",
45 attributes=attributes.keys(),
46 dn=user_dn,
47 )
48 continue
49 uniq = self._flatten(attributes[self._source.object_uniqueness_field])
50 try:
51 defaults = self.build_user_properties(user_dn, **attributes)
52 self._logger.debug("Creating user with attributes", **defaults)
53 if "username" not in defaults:
54 raise IntegrityError("Username was not set by propertymappings")
55 ak_user, created = self.update_or_create_attributes(
56 User, {f"attributes__{LDAP_UNIQUENESS}": uniq}, defaults
57 )
58 except (IntegrityError, FieldError, TypeError, AttributeError) as exc:
59 Event.new(
60 EventAction.CONFIGURATION_ERROR,
61 message=(
62 f"Failed to create user: {str(exc)} "
63 "To merge new user with existing user, set the user's "
64 f"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'"
65 ),
66 source=self._source,
67 dn=user_dn,
68 ).save()
69 else:
70 self._logger.debug("Synced User", user=ak_user.username, created=created)
71 user_count += 1
72 MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)
73 FreeIPA(self._source).sync(attributes, ak_user, created)
74 return user_count
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/ldap/management/commands/ldap_sync.py b/authentik/sources/ldap/management/commands/ldap_sync.py
--- a/authentik/sources/ldap/management/commands/ldap_sync.py
+++ b/authentik/sources/ldap/management/commands/ldap_sync.py
@@ -3,7 +3,10 @@
from structlog.stdlib import get_logger
from authentik.sources.ldap.models import LDAPSource
-from authentik.sources.ldap.tasks import ldap_sync_single
+from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer
+from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer
+from authentik.sources.ldap.sync.users import UserLDAPSynchronizer
+from authentik.sources.ldap.tasks import ldap_sync_paginator
LOGGER = get_logger()
@@ -20,4 +23,10 @@
if not source:
LOGGER.warning("Source does not exist", slug=source_slug)
continue
- ldap_sync_single(source)
+ tasks = (
+ ldap_sync_paginator(source, UserLDAPSynchronizer)
+ + ldap_sync_paginator(source, GroupLDAPSynchronizer)
+ + ldap_sync_paginator(source, MembershipLDAPSynchronizer)
+ )
+ for task in tasks:
+ task()
diff --git a/authentik/sources/ldap/sync/users.py b/authentik/sources/ldap/sync/users.py
--- a/authentik/sources/ldap/sync/users.py
+++ b/authentik/sources/ldap/sync/users.py
@@ -49,7 +49,7 @@
uniq = self._flatten(attributes[self._source.object_uniqueness_field])
try:
defaults = self.build_user_properties(user_dn, **attributes)
- self._logger.debug("Creating user with attributes", **defaults)
+ self._logger.debug("Writing user with attributes", **defaults)
if "username" not in defaults:
raise IntegrityError("Username was not set by propertymappings")
ak_user, created = self.update_or_create_attributes(
| {"golden_diff": "diff --git a/authentik/sources/ldap/management/commands/ldap_sync.py b/authentik/sources/ldap/management/commands/ldap_sync.py\n--- a/authentik/sources/ldap/management/commands/ldap_sync.py\n+++ b/authentik/sources/ldap/management/commands/ldap_sync.py\n@@ -3,7 +3,10 @@\n from structlog.stdlib import get_logger\n \n from authentik.sources.ldap.models import LDAPSource\n-from authentik.sources.ldap.tasks import ldap_sync_single\n+from authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer\n+from authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer\n+from authentik.sources.ldap.sync.users import UserLDAPSynchronizer\n+from authentik.sources.ldap.tasks import ldap_sync_paginator\n \n LOGGER = get_logger()\n \n@@ -20,4 +23,10 @@\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n- ldap_sync_single(source)\n+ tasks = (\n+ ldap_sync_paginator(source, UserLDAPSynchronizer)\n+ + ldap_sync_paginator(source, GroupLDAPSynchronizer)\n+ + ldap_sync_paginator(source, MembershipLDAPSynchronizer)\n+ )\n+ for task in tasks:\n+ task()\ndiff --git a/authentik/sources/ldap/sync/users.py b/authentik/sources/ldap/sync/users.py\n--- a/authentik/sources/ldap/sync/users.py\n+++ b/authentik/sources/ldap/sync/users.py\n@@ -49,7 +49,7 @@\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n- self._logger.debug(\"Creating user with attributes\", **defaults)\n+ self._logger.debug(\"Writing user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n", "issue": "docker compose run --rm server ldap_sync - doesn't work anymore to debug\n**Describe the bug**\r\nthis command doesnt work anymore as described here (https://goauthentik.io/docs/troubleshooting/ldap_source)\r\n```\r\ndocker compose run --rm server ldap_sync nxnet\r\n```\r\nit will just create a backgorund taks instead of running it in foreground!\r\n\r\n**To Reproduce**\r\n```\r\ndocker compose run --rm server ldap_sync SLUGofLDAPsource\r\n```\r\n\r\n**Expected behavior**\r\nit will run an LDAP- synchronization in the foreground and see any errors or warnings that might happen directly\r\n\r\n**Logs**\r\n{\"event\": \"Task published\", \"level\": \"info\", \"logger\": \"authentik.root.celery\", \"pid\": 7, \"task_id\": \"17af668f-1d9f-4732-a0eb-044c4a16beed\", \"task_name\": \"authentik.sources.ldap.tasks.ldap_sync\", \"timestamp\": \"2023-07-20T15:10:12.575247\"}\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.6.1\r\n- Deployment: docker compose\r\n\r\n\n", "before_files": [{"content": "\"\"\"LDAP Sync\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.tasks import ldap_sync_single\n\nLOGGER = get_logger()\n\n\nclass Command(BaseCommand):\n \"\"\"Run sync for an LDAP Source\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"source_slugs\", nargs=\"+\", type=str)\n\n def handle(self, **options):\n for source_slug in options[\"source_slugs\"]:\n source = LDAPSource.objects.filter(slug=source_slug).first()\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n ldap_sync_single(source)\n", "path": "authentik/sources/ldap/management/commands/ldap_sync.py"}, {"content": "\"\"\"Sync LDAP Users into authentik\"\"\"\nfrom typing import Generator\n\nfrom django.core.exceptions import FieldError\nfrom django.db.utils import IntegrityError\nfrom ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE\n\nfrom authentik.core.models import User\nfrom authentik.events.models import Event, EventAction\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\nfrom authentik.sources.ldap.sync.vendor.freeipa import FreeIPA\nfrom authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory\n\n\nclass UserLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users into authentik\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"users\"\n\n def get_objects(self, **kwargs) -> Generator:\n return self.search_paginator(\n search_base=self.base_dn_users,\n search_filter=self._source.user_object_filter,\n search_scope=SUBTREE,\n attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],\n **kwargs,\n )\n\n def sync(self, page_data: list) -> int:\n \"\"\"Iterate over all LDAP Users and create authentik_core.User instances\"\"\"\n if not self._source.sync_users:\n self.message(\"User syncing is disabled for this Source\")\n return -1\n user_count = 0\n for user in page_data:\n if \"attributes\" not in user:\n continue\n attributes = user.get(\"attributes\", {})\n user_dn = self._flatten(user.get(\"entryDN\", user.get(\"dn\")))\n if self._source.object_uniqueness_field not in attributes:\n self.message(\n f\"Cannot find uniqueness field in attributes: '{user_dn}'\",\n attributes=attributes.keys(),\n dn=user_dn,\n )\n continue\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n self._logger.debug(\"Creating user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n User, {f\"attributes__{LDAP_UNIQUENESS}\": uniq}, defaults\n )\n except (IntegrityError, FieldError, TypeError, AttributeError) as exc:\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=(\n f\"Failed to create user: {str(exc)} \"\n \"To merge new user with existing user, set the user's \"\n f\"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'\"\n ),\n source=self._source,\n dn=user_dn,\n ).save()\n else:\n self._logger.debug(\"Synced User\", user=ak_user.username, created=created)\n user_count += 1\n MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)\n FreeIPA(self._source).sync(attributes, ak_user, created)\n return user_count\n", "path": "authentik/sources/ldap/sync/users.py"}], "after_files": [{"content": "\"\"\"LDAP Sync\"\"\"\nfrom django.core.management.base import BaseCommand\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.ldap.models import LDAPSource\nfrom authentik.sources.ldap.sync.groups import GroupLDAPSynchronizer\nfrom authentik.sources.ldap.sync.membership import MembershipLDAPSynchronizer\nfrom authentik.sources.ldap.sync.users import UserLDAPSynchronizer\nfrom authentik.sources.ldap.tasks import ldap_sync_paginator\n\nLOGGER = get_logger()\n\n\nclass Command(BaseCommand):\n \"\"\"Run sync for an LDAP Source\"\"\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"source_slugs\", nargs=\"+\", type=str)\n\n def handle(self, **options):\n for source_slug in options[\"source_slugs\"]:\n source = LDAPSource.objects.filter(slug=source_slug).first()\n if not source:\n LOGGER.warning(\"Source does not exist\", slug=source_slug)\n continue\n tasks = (\n ldap_sync_paginator(source, UserLDAPSynchronizer)\n + ldap_sync_paginator(source, GroupLDAPSynchronizer)\n + ldap_sync_paginator(source, MembershipLDAPSynchronizer)\n )\n for task in tasks:\n task()\n", "path": "authentik/sources/ldap/management/commands/ldap_sync.py"}, {"content": "\"\"\"Sync LDAP Users into authentik\"\"\"\nfrom typing import Generator\n\nfrom django.core.exceptions import FieldError\nfrom django.db.utils import IntegrityError\nfrom ldap3 import ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES, SUBTREE\n\nfrom authentik.core.models import User\nfrom authentik.events.models import Event, EventAction\nfrom authentik.sources.ldap.sync.base import LDAP_UNIQUENESS, BaseLDAPSynchronizer\nfrom authentik.sources.ldap.sync.vendor.freeipa import FreeIPA\nfrom authentik.sources.ldap.sync.vendor.ms_ad import MicrosoftActiveDirectory\n\n\nclass UserLDAPSynchronizer(BaseLDAPSynchronizer):\n \"\"\"Sync LDAP Users into authentik\"\"\"\n\n @staticmethod\n def name() -> str:\n return \"users\"\n\n def get_objects(self, **kwargs) -> Generator:\n return self.search_paginator(\n search_base=self.base_dn_users,\n search_filter=self._source.user_object_filter,\n search_scope=SUBTREE,\n attributes=[ALL_ATTRIBUTES, ALL_OPERATIONAL_ATTRIBUTES],\n **kwargs,\n )\n\n def sync(self, page_data: list) -> int:\n \"\"\"Iterate over all LDAP Users and create authentik_core.User instances\"\"\"\n if not self._source.sync_users:\n self.message(\"User syncing is disabled for this Source\")\n return -1\n user_count = 0\n for user in page_data:\n if \"attributes\" not in user:\n continue\n attributes = user.get(\"attributes\", {})\n user_dn = self._flatten(user.get(\"entryDN\", user.get(\"dn\")))\n if self._source.object_uniqueness_field not in attributes:\n self.message(\n f\"Cannot find uniqueness field in attributes: '{user_dn}'\",\n attributes=attributes.keys(),\n dn=user_dn,\n )\n continue\n uniq = self._flatten(attributes[self._source.object_uniqueness_field])\n try:\n defaults = self.build_user_properties(user_dn, **attributes)\n self._logger.debug(\"Writing user with attributes\", **defaults)\n if \"username\" not in defaults:\n raise IntegrityError(\"Username was not set by propertymappings\")\n ak_user, created = self.update_or_create_attributes(\n User, {f\"attributes__{LDAP_UNIQUENESS}\": uniq}, defaults\n )\n except (IntegrityError, FieldError, TypeError, AttributeError) as exc:\n Event.new(\n EventAction.CONFIGURATION_ERROR,\n message=(\n f\"Failed to create user: {str(exc)} \"\n \"To merge new user with existing user, set the user's \"\n f\"Attribute '{LDAP_UNIQUENESS}' to '{uniq}'\"\n ),\n source=self._source,\n dn=user_dn,\n ).save()\n else:\n self._logger.debug(\"Synced User\", user=ak_user.username, created=created)\n user_count += 1\n MicrosoftActiveDirectory(self._source).sync(attributes, ak_user, created)\n FreeIPA(self._source).sync(attributes, ak_user, created)\n return user_count\n", "path": "authentik/sources/ldap/sync/users.py"}]} | 1,556 | 452 |
gh_patches_debug_51991 | rasdani/github-patches | git_diff | pydantic__pydantic-391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
Include a PEP 561 marker file
# Feature Request
Hi,
The new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.
I think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.
https://mypy.readthedocs.io/en/latest/installed_packages.html
https://www.python.org/dev/peps/pep-0561/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from importlib.machinery import SourceFileLoader
3 from pathlib import Path
4 from setuptools import setup
5
6
7 class ReplaceLinks:
8 def __init__(self):
9 self.links = set()
10
11 def replace_issues(self, m):
12 id = m.group(1)
13 self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')
14 return f'`#{id}`_'
15
16 def replace_users(self, m):
17 name = m.group(2)
18 self.links.add(f'.. _@{name}: https://github.com/{name}')
19 return f'{m.group(1)}`@{name}`_'
20
21 def extra(self):
22 return '\n\n' + '\n'.join(self.links) + '\n'
23
24
25 description = 'Data validation and settings management using python 3.6 type hinting'
26 THIS_DIR = Path(__file__).resolve().parent
27 try:
28 history = THIS_DIR.joinpath('HISTORY.rst').read_text()
29
30 replacer = ReplaceLinks()
31 history = re.sub(r'#(\d+)', replacer.replace_issues, history)
32 history = re.sub(r'( +)@(\w+)', replacer.replace_users, history, flags=re.I)
33 history = re.sub(r'@@', '@', history)
34 history += replacer.extra()
35
36 long_description = '\n\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])
37 except FileNotFoundError:
38 long_description = description + '.\n\nSee https://pydantic-docs.helpmanual.io/ for documentation.'
39
40 # avoid loading the package before requirements are installed:
41 version = SourceFileLoader('version', 'pydantic/version.py').load_module()
42
43 setup(
44 name='pydantic',
45 version=str(version.VERSION),
46 description=description,
47 long_description=long_description,
48 classifiers=[
49 'Development Status :: 5 - Production/Stable',
50 'Programming Language :: Python',
51 'Programming Language :: Python :: 3',
52 'Programming Language :: Python :: 3 :: Only',
53 'Programming Language :: Python :: 3.6',
54 'Programming Language :: Python :: 3.7',
55 'Intended Audience :: Developers',
56 'Intended Audience :: Information Technology',
57 'Intended Audience :: System Administrators',
58 'License :: OSI Approved :: MIT License',
59 'Operating System :: Unix',
60 'Operating System :: POSIX :: Linux',
61 'Environment :: Console',
62 'Environment :: MacOS X',
63 'Topic :: Software Development :: Libraries :: Python Modules',
64 'Topic :: Internet',
65 ],
66 author='Samuel Colvin',
67 author_email='[email protected]',
68 url='https://github.com/samuelcolvin/pydantic',
69 license='MIT',
70 packages=['pydantic'],
71 python_requires='>=3.6',
72 zip_safe=True,
73 install_requires=[
74 'dataclasses>=0.6;python_version<"3.7"'
75 ],
76 extras_require={
77 'ujson': ['ujson>=1.35'],
78 'email': ['email-validator>=1.0.3'],
79 }
80 )
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -68,8 +68,9 @@
url='https://github.com/samuelcolvin/pydantic',
license='MIT',
packages=['pydantic'],
+ package_data={'pydantic': ['py.typed']},
python_requires='>=3.6',
- zip_safe=True,
+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html
install_requires=[
'dataclasses>=0.6;python_version<"3.7"'
],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -68,8 +68,9 @@\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n+ package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n- zip_safe=True,\n+ zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n", "issue": "Include a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\nInclude a PEP 561 marker file\n# Feature Request\r\n\r\nHi,\r\n\r\nThe new version 0.19 has improved typing support which is great, but looks like it doesn't work out of the box. I had similar problems as described in #245 , but after adding the installation to MYPYPATH it works fine.\r\n\r\nI think a PEP 561 marker file `py.typed` should be added so that tools like mypy can utilize the inline type information without any configuration. Reading mypy docs looks like there is a downside that `zip_safe` must be disabled for this.\r\n\r\nhttps://mypy.readthedocs.io/en/latest/installed_packages.html\r\nhttps://www.python.org/dev/peps/pep-0561/\n", "before_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n python_requires='>=3.6',\n zip_safe=True,\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom importlib.machinery import SourceFileLoader\nfrom pathlib import Path\nfrom setuptools import setup\n\n\nclass ReplaceLinks:\n def __init__(self):\n self.links = set()\n\n def replace_issues(self, m):\n id = m.group(1)\n self.links.add(f'.. _#{id}: https://github.com/samuelcolvin/pydantic/issues/{id}')\n return f'`#{id}`_'\n\n def replace_users(self, m):\n name = m.group(2)\n self.links.add(f'.. _@{name}: https://github.com/{name}')\n return f'{m.group(1)}`@{name}`_'\n\n def extra(self):\n return '\\n\\n' + '\\n'.join(self.links) + '\\n'\n\n\ndescription = 'Data validation and settings management using python 3.6 type hinting'\nTHIS_DIR = Path(__file__).resolve().parent\ntry:\n history = THIS_DIR.joinpath('HISTORY.rst').read_text()\n\n replacer = ReplaceLinks()\n history = re.sub(r'#(\\d+)', replacer.replace_issues, history)\n history = re.sub(r'( +)@(\\w+)', replacer.replace_users, history, flags=re.I)\n history = re.sub(r'@@', '@', history)\n history += replacer.extra()\n\n long_description = '\\n\\n'.join([THIS_DIR.joinpath('README.rst').read_text(), history])\nexcept FileNotFoundError:\n long_description = description + '.\\n\\nSee https://pydantic-docs.helpmanual.io/ for documentation.'\n\n# avoid loading the package before requirements are installed:\nversion = SourceFileLoader('version', 'pydantic/version.py').load_module()\n\nsetup(\n name='pydantic',\n version=str(version.VERSION),\n description=description,\n long_description=long_description,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: Unix',\n 'Operating System :: POSIX :: Linux',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Internet',\n ],\n author='Samuel Colvin',\n author_email='[email protected]',\n url='https://github.com/samuelcolvin/pydantic',\n license='MIT',\n packages=['pydantic'],\n package_data={'pydantic': ['py.typed']},\n python_requires='>=3.6',\n zip_safe=False, # https://mypy.readthedocs.io/en/latest/installed_packages.html\n install_requires=[\n 'dataclasses>=0.6;python_version<\"3.7\"'\n ],\n extras_require={\n 'ujson': ['ujson>=1.35'],\n 'email': ['email-validator>=1.0.3'],\n }\n)\n", "path": "setup.py"}]} | 1,403 | 134 |
gh_patches_debug_4719 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
valid_batch_size
In the train_single.py file , lines 120 to 124
```
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields)
```
should be changed
```
def train_iter_fct(): return build_dataset_iter(
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
lazily_load_dataset("valid", opt), fields, opt, is_train=False)
```
If it doesn't, it will not use `valid_batch_size`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/train_single.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Training on a single process
4 """
5 from __future__ import division
6
7 import argparse
8 import os
9 import random
10 import torch
11
12 import onmt.opts as opts
13
14 from onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \
15 _load_fields, _collect_report_features
16 from onmt.model_builder import build_model
17 from onmt.utils.optimizers import build_optim
18 from onmt.trainer import build_trainer
19 from onmt.models import build_model_saver
20 from onmt.utils.logging import init_logger, logger
21
22
23 def _check_save_model_path(opt):
24 save_model_path = os.path.abspath(opt.save_model)
25 model_dirname = os.path.dirname(save_model_path)
26 if not os.path.exists(model_dirname):
27 os.makedirs(model_dirname)
28
29
30 def _tally_parameters(model):
31 n_params = sum([p.nelement() for p in model.parameters()])
32 enc = 0
33 dec = 0
34 for name, param in model.named_parameters():
35 if 'encoder' in name:
36 enc += param.nelement()
37 elif 'decoder' or 'generator' in name:
38 dec += param.nelement()
39 return n_params, enc, dec
40
41
42 def training_opt_postprocessing(opt):
43 if opt.word_vec_size != -1:
44 opt.src_word_vec_size = opt.word_vec_size
45 opt.tgt_word_vec_size = opt.word_vec_size
46
47 if opt.layers != -1:
48 opt.enc_layers = opt.layers
49 opt.dec_layers = opt.layers
50
51 opt.brnn = (opt.encoder_type == "brnn")
52
53 if opt.rnn_type == "SRU" and not opt.gpuid:
54 raise AssertionError("Using SRU requires -gpuid set.")
55
56 if torch.cuda.is_available() and not opt.gpuid:
57 logger.info("WARNING: You have a CUDA device, should run with -gpuid")
58
59 if opt.gpuid:
60 torch.cuda.set_device(opt.device_id)
61 if opt.seed > 0:
62 # this one is needed for torchtext random call (shuffled iterator)
63 # in multi gpu it ensures datasets are read in the same order
64 random.seed(opt.seed)
65 # These ensure same initialization in multi gpu mode
66 torch.manual_seed(opt.seed)
67 torch.cuda.manual_seed(opt.seed)
68
69 return opt
70
71
72 def main(opt):
73 opt = training_opt_postprocessing(opt)
74 init_logger(opt.log_file)
75 # Load checkpoint if we resume from a previous training.
76 if opt.train_from:
77 logger.info('Loading checkpoint from %s' % opt.train_from)
78 checkpoint = torch.load(opt.train_from,
79 map_location=lambda storage, loc: storage)
80 model_opt = checkpoint['opt']
81 else:
82 checkpoint = None
83 model_opt = opt
84
85 # Peek the first dataset to determine the data_type.
86 # (All datasets have the same data_type).
87 first_dataset = next(lazily_load_dataset("train", opt))
88 data_type = first_dataset.data_type
89
90 # Load fields generated from preprocess phase.
91 fields = _load_fields(first_dataset, data_type, opt, checkpoint)
92
93 # Report src/tgt features.
94
95 src_features, tgt_features = _collect_report_features(fields)
96 for j, feat in enumerate(src_features):
97 logger.info(' * src feature %d size = %d'
98 % (j, len(fields[feat].vocab)))
99 for j, feat in enumerate(tgt_features):
100 logger.info(' * tgt feature %d size = %d'
101 % (j, len(fields[feat].vocab)))
102
103 # Build model.
104 model = build_model(model_opt, opt, fields, checkpoint)
105 n_params, enc, dec = _tally_parameters(model)
106 logger.info('encoder: %d' % enc)
107 logger.info('decoder: %d' % dec)
108 logger.info('* number of parameters: %d' % n_params)
109 _check_save_model_path(opt)
110
111 # Build optimizer.
112 optim = build_optim(model, opt, checkpoint)
113
114 # Build model saver
115 model_saver = build_model_saver(model_opt, opt, model, fields, optim)
116
117 trainer = build_trainer(
118 opt, model, fields, optim, data_type, model_saver=model_saver)
119
120 def train_iter_fct(): return build_dataset_iter(
121 lazily_load_dataset("train", opt), fields, opt)
122
123 def valid_iter_fct(): return build_dataset_iter(
124 lazily_load_dataset("valid", opt), fields, opt)
125
126 # Do training.
127 trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
128 opt.valid_steps)
129
130 if opt.tensorboard:
131 trainer.report_manager.tensorboard_writer.close()
132
133
134 if __name__ == "__main__":
135 parser = argparse.ArgumentParser(
136 description='train.py',
137 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
138
139 opts.add_md_help_argument(parser)
140 opts.model_opts(parser)
141 opts.train_opts(parser)
142
143 opt = parser.parse_args()
144 main(opt)
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/train_single.py b/onmt/train_single.py
--- a/onmt/train_single.py
+++ b/onmt/train_single.py
@@ -121,7 +121,7 @@
lazily_load_dataset("train", opt), fields, opt)
def valid_iter_fct(): return build_dataset_iter(
- lazily_load_dataset("valid", opt), fields, opt)
+ lazily_load_dataset("valid", opt), fields, opt, is_train=False)
# Do training.
trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,
| {"golden_diff": "diff --git a/onmt/train_single.py b/onmt/train_single.py\n--- a/onmt/train_single.py\n+++ b/onmt/train_single.py\n@@ -121,7 +121,7 @@\n lazily_load_dataset(\"train\", opt), fields, opt)\n \n def valid_iter_fct(): return build_dataset_iter(\n- lazily_load_dataset(\"valid\", opt), fields, opt)\n+ lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\n \n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n", "issue": "valid_batch_size\nIn the train_single.py file , lines 120 to 124\r\n```\r\ndef train_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"train\", opt), fields, opt)\r\n\r\n def valid_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"valid\", opt), fields)\r\n```\r\nshould be changed\r\n```\r\ndef train_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"train\", opt), fields, opt)\r\n\r\n def valid_iter_fct(): return build_dataset_iter(\r\n lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\r\n```\r\nIf it doesn't, it will not use `valid_batch_size`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\n Training on a single process\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport os\nimport random\nimport torch\n\nimport onmt.opts as opts\n\nfrom onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \\\n _load_fields, _collect_report_features\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import build_optim\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n elif 'decoder' or 'generator' in name:\n dec += param.nelement()\n return n_params, enc, dec\n\n\ndef training_opt_postprocessing(opt):\n if opt.word_vec_size != -1:\n opt.src_word_vec_size = opt.word_vec_size\n opt.tgt_word_vec_size = opt.word_vec_size\n\n if opt.layers != -1:\n opt.enc_layers = opt.layers\n opt.dec_layers = opt.layers\n\n opt.brnn = (opt.encoder_type == \"brnn\")\n\n if opt.rnn_type == \"SRU\" and not opt.gpuid:\n raise AssertionError(\"Using SRU requires -gpuid set.\")\n\n if torch.cuda.is_available() and not opt.gpuid:\n logger.info(\"WARNING: You have a CUDA device, should run with -gpuid\")\n\n if opt.gpuid:\n torch.cuda.set_device(opt.device_id)\n if opt.seed > 0:\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(opt.seed)\n # These ensure same initialization in multi gpu mode\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n\n return opt\n\n\ndef main(opt):\n opt = training_opt_postprocessing(opt)\n init_logger(opt.log_file)\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n model_opt = checkpoint['opt']\n else:\n checkpoint = None\n model_opt = opt\n\n # Peek the first dataset to determine the data_type.\n # (All datasets have the same data_type).\n first_dataset = next(lazily_load_dataset(\"train\", opt))\n data_type = first_dataset.data_type\n\n # Load fields generated from preprocess phase.\n fields = _load_fields(first_dataset, data_type, opt, checkpoint)\n\n # Report src/tgt features.\n\n src_features, tgt_features = _collect_report_features(fields)\n for j, feat in enumerate(src_features):\n logger.info(' * src feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n for j, feat in enumerate(tgt_features):\n logger.info(' * tgt feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = build_optim(model, opt, checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, model, fields, optim, data_type, model_saver=model_saver)\n\n def train_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"train\", opt), fields, opt)\n\n def valid_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"valid\", opt), fields, opt)\n\n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n", "path": "onmt/train_single.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\n Training on a single process\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport os\nimport random\nimport torch\n\nimport onmt.opts as opts\n\nfrom onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \\\n _load_fields, _collect_report_features\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import build_optim\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n elif 'decoder' or 'generator' in name:\n dec += param.nelement()\n return n_params, enc, dec\n\n\ndef training_opt_postprocessing(opt):\n if opt.word_vec_size != -1:\n opt.src_word_vec_size = opt.word_vec_size\n opt.tgt_word_vec_size = opt.word_vec_size\n\n if opt.layers != -1:\n opt.enc_layers = opt.layers\n opt.dec_layers = opt.layers\n\n opt.brnn = (opt.encoder_type == \"brnn\")\n\n if opt.rnn_type == \"SRU\" and not opt.gpuid:\n raise AssertionError(\"Using SRU requires -gpuid set.\")\n\n if torch.cuda.is_available() and not opt.gpuid:\n logger.info(\"WARNING: You have a CUDA device, should run with -gpuid\")\n\n if opt.gpuid:\n torch.cuda.set_device(opt.device_id)\n if opt.seed > 0:\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(opt.seed)\n # These ensure same initialization in multi gpu mode\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n\n return opt\n\n\ndef main(opt):\n opt = training_opt_postprocessing(opt)\n init_logger(opt.log_file)\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n model_opt = checkpoint['opt']\n else:\n checkpoint = None\n model_opt = opt\n\n # Peek the first dataset to determine the data_type.\n # (All datasets have the same data_type).\n first_dataset = next(lazily_load_dataset(\"train\", opt))\n data_type = first_dataset.data_type\n\n # Load fields generated from preprocess phase.\n fields = _load_fields(first_dataset, data_type, opt, checkpoint)\n\n # Report src/tgt features.\n\n src_features, tgt_features = _collect_report_features(fields)\n for j, feat in enumerate(src_features):\n logger.info(' * src feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n for j, feat in enumerate(tgt_features):\n logger.info(' * tgt feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = build_optim(model, opt, checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, model, fields, optim, data_type, model_saver=model_saver)\n\n def train_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"train\", opt), fields, opt)\n\n def valid_iter_fct(): return build_dataset_iter(\n lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\n\n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n", "path": "onmt/train_single.py"}]} | 1,830 | 130 |
gh_patches_debug_38208 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Restructure Numbers provider
# Feature request
## Thesis
While I was implementing the ``matrix()`` function in the ``Numbers`` provider and I was thinking about some changes that we could make in this provider:
- Add a function ``complex(start, end, length)`` that return a random array of complex numbers
- Make the API uniform, so that every function in the ``Numbers`` provider has the arguments ``start, end, length`` (where possible). Maybe in the ``complex()`` function we can add ``start_real, end_real, start_imaginary, end_imaginary`` ?
- Remove the function ``ranting()`` and add an argument ``decimal_digits`` in the function ``floats()`` to specify the number of decimal digits to keep.
## Reasoning
I think these changes would make the provider more uniform and easy to use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/providers/numbers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Provides data related to numbers."""
4
5 from typing import List, Union
6
7 from mimesis.providers.base import BaseProvider
8
9 __all__ = ['Numbers']
10
11
12 class Numbers(BaseProvider):
13 """Class for generating numbers."""
14
15 class Meta:
16 """Class for metadata."""
17
18 name = 'numbers'
19
20 def floats(self, n: int = 2) -> List[float]:
21 """Generate a list of random float numbers.
22
23 :param n: Raise 10 to the 'n' power.
24 :return: The list of floating-point numbers.
25 """
26 nums = [self.random.random()
27 for _ in range(10 ** int(n))]
28 return nums
29
30 def integers(self, start: int = 0, end: int = 10,
31 length: int = 10) -> List[int]:
32 """Generate a list of random integers.
33
34 Integers can be negative or positive numbers.
35 .. note: You can use both positive and negative numbers.
36
37 :param start: Start.
38 :param end: End.
39 :param length: Length of list.
40 :return: List of integers.
41
42 :Example:
43 [-20, -19, -18, -17]
44 """
45 return self.random.randints(
46 length, start, end)
47
48 @staticmethod
49 def primes(start: int = 1, end: int = 999) -> List[int]:
50 """Generate a list of prime numbers.
51
52 :param start: First value of range.
53 :param end: Last value of range.
54 :return: A list of prime numbers from start to end.
55 """
56 # TODO: It should generate random primes with passed length.
57 sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)
58 sieve = [True] * sieve_size
59
60 primes = [] # list of primes
61 # add 2 to the list if it's in the given range
62 if end >= 2:
63 primes.append(2)
64 for i in range(sieve_size):
65 if sieve[i]:
66 value_at_i = i * 2 + 3
67 primes.append(value_at_i)
68 for j in range(i, sieve_size, value_at_i):
69 sieve[j] = False
70
71 chop_index = 0
72 for i in range(len(primes)):
73 if primes[i] >= start:
74 chop_index = i
75 break
76 return primes[chop_index:]
77
78 def digit(self, to_bin: bool = False) -> Union[str, int]:
79 """Get a random digit.
80
81 :param to_bin: If True then convert to binary.
82 :return: Digit.
83
84 :Example:
85 4.
86 """
87 digit = self.random.randint(0, 9)
88
89 if to_bin:
90 return bin(digit)
91
92 return digit
93
94 def between(self, minimum: int = 1, maximum: int = 1000) -> int:
95 """Generate a random number between minimum and maximum.
96
97 :param minimum: Minimum of range.
98 :param maximum: Maximum of range.
99 :return: Number.
100 """
101 return self.random.randint(minimum, maximum)
102
103 def rating(self, maximum: float = 5.0) -> float:
104 """Generate a random rating for something.
105
106 :param maximum: Maximum value (default is 5.0).
107 :return: Rating.
108
109 :Example:
110 4.7
111 """
112 return self.random.uniform(0, maximum, 1)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mimesis/providers/numbers.py b/mimesis/providers/numbers.py
--- a/mimesis/providers/numbers.py
+++ b/mimesis/providers/numbers.py
@@ -17,18 +17,20 @@
name = 'numbers'
- def floats(self, n: int = 2) -> List[float]:
+ def floats(self, start: float = 0, end: float = 1, n: int = 10,
+ rounding: int = 15) -> List[float]:
"""Generate a list of random float numbers.
- :param n: Raise 10 to the 'n' power.
+ :param start: Start range.
+ :param end: End range.
+ :param n: Length of the list.
+ :param rounding: Max number of decimal digits.
:return: The list of floating-point numbers.
"""
- nums = [self.random.random()
- for _ in range(10 ** int(n))]
- return nums
+ return [self.random.uniform(start, end, rounding) for _ in range(n)]
def integers(self, start: int = 0, end: int = 10,
- length: int = 10) -> List[int]:
+ n: int = 10) -> List[int]:
"""Generate a list of random integers.
Integers can be negative or positive numbers.
@@ -36,14 +38,33 @@
:param start: Start.
:param end: End.
- :param length: Length of list.
+ :param n: Length of list.
:return: List of integers.
:Example:
[-20, -19, -18, -17]
"""
- return self.random.randints(
- length, start, end)
+ return self.random.randints(n, start, end)
+
+ def complexes(self, start_real: float = 0, end_real: float = 1,
+ start_imag: float = 0, end_imag: float = 1,
+ rounding_real: int = 15, rounding_imag: int = 15,
+ n: int = 10) -> List[complex]:
+ """Generate a list of random complex numbers.
+
+ :param start_real: Start real range.
+ :param end_real: End real range.
+ :param start_imag: Start imaginary range.
+ :param end_imag: End imaginary range.
+ :param rounding_real: Rounding real part.
+ :param rounding_imag: Roungind imaginary part.
+ :param n: Length of the list.
+ :return: A list of random complex numbers.
+ """
+ return [
+ complex(self.random.uniform(start_real, end_real, rounding_real),
+ self.random.uniform(start_imag, end_imag, rounding_imag))
+ for _ in range(n)]
@staticmethod
def primes(start: int = 1, end: int = 999) -> List[int]:
@@ -99,14 +120,3 @@
:return: Number.
"""
return self.random.randint(minimum, maximum)
-
- def rating(self, maximum: float = 5.0) -> float:
- """Generate a random rating for something.
-
- :param maximum: Maximum value (default is 5.0).
- :return: Rating.
-
- :Example:
- 4.7
- """
- return self.random.uniform(0, maximum, 1)
| {"golden_diff": "diff --git a/mimesis/providers/numbers.py b/mimesis/providers/numbers.py\n--- a/mimesis/providers/numbers.py\n+++ b/mimesis/providers/numbers.py\n@@ -17,18 +17,20 @@\n \n name = 'numbers'\n \n- def floats(self, n: int = 2) -> List[float]:\n+ def floats(self, start: float = 0, end: float = 1, n: int = 10,\n+ rounding: int = 15) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n \n- :param n: Raise 10 to the 'n' power.\n+ :param start: Start range.\n+ :param end: End range.\n+ :param n: Length of the list.\n+ :param rounding: Max number of decimal digits.\n :return: The list of floating-point numbers.\n \"\"\"\n- nums = [self.random.random()\n- for _ in range(10 ** int(n))]\n- return nums\n+ return [self.random.uniform(start, end, rounding) for _ in range(n)]\n \n def integers(self, start: int = 0, end: int = 10,\n- length: int = 10) -> List[int]:\n+ n: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n \n Integers can be negative or positive numbers.\n@@ -36,14 +38,33 @@\n \n :param start: Start.\n :param end: End.\n- :param length: Length of list.\n+ :param n: Length of list.\n :return: List of integers.\n \n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n- return self.random.randints(\n- length, start, end)\n+ return self.random.randints(n, start, end)\n+\n+ def complexes(self, start_real: float = 0, end_real: float = 1,\n+ start_imag: float = 0, end_imag: float = 1,\n+ rounding_real: int = 15, rounding_imag: int = 15,\n+ n: int = 10) -> List[complex]:\n+ \"\"\"Generate a list of random complex numbers.\n+\n+ :param start_real: Start real range.\n+ :param end_real: End real range.\n+ :param start_imag: Start imaginary range.\n+ :param end_imag: End imaginary range.\n+ :param rounding_real: Rounding real part.\n+ :param rounding_imag: Roungind imaginary part.\n+ :param n: Length of the list.\n+ :return: A list of random complex numbers.\n+ \"\"\"\n+ return [\n+ complex(self.random.uniform(start_real, end_real, rounding_real),\n+ self.random.uniform(start_imag, end_imag, rounding_imag))\n+ for _ in range(n)]\n \n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n@@ -99,14 +120,3 @@\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n-\n- def rating(self, maximum: float = 5.0) -> float:\n- \"\"\"Generate a random rating for something.\n-\n- :param maximum: Maximum value (default is 5.0).\n- :return: Rating.\n-\n- :Example:\n- 4.7\n- \"\"\"\n- return self.random.uniform(0, maximum, 1)\n", "issue": "Restructure Numbers provider\n# Feature request\r\n\r\n## Thesis\r\n\r\nWhile I was implementing the ``matrix()`` function in the ``Numbers`` provider and I was thinking about some changes that we could make in this provider:\r\n\r\n- Add a function ``complex(start, end, length)`` that return a random array of complex numbers\r\n- Make the API uniform, so that every function in the ``Numbers`` provider has the arguments ``start, end, length`` (where possible). Maybe in the ``complex()`` function we can add ``start_real, end_real, start_imaginary, end_imaginary`` ?\r\n- Remove the function ``ranting()`` and add an argument ``decimal_digits`` in the function ``floats()`` to specify the number of decimal digits to keep.\r\n\r\n## Reasoning\r\n\r\nI think these changes would make the provider more uniform and easy to use.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to numbers.\"\"\"\n\nfrom typing import List, Union\n\nfrom mimesis.providers.base import BaseProvider\n\n__all__ = ['Numbers']\n\n\nclass Numbers(BaseProvider):\n \"\"\"Class for generating numbers.\"\"\"\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'numbers'\n\n def floats(self, n: int = 2) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n\n :param n: Raise 10 to the 'n' power.\n :return: The list of floating-point numbers.\n \"\"\"\n nums = [self.random.random()\n for _ in range(10 ** int(n))]\n return nums\n\n def integers(self, start: int = 0, end: int = 10,\n length: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n\n Integers can be negative or positive numbers.\n .. note: You can use both positive and negative numbers.\n\n :param start: Start.\n :param end: End.\n :param length: Length of list.\n :return: List of integers.\n\n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n return self.random.randints(\n length, start, end)\n\n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n \"\"\"Generate a list of prime numbers.\n\n :param start: First value of range.\n :param end: Last value of range.\n :return: A list of prime numbers from start to end.\n \"\"\"\n # TODO: It should generate random primes with passed length.\n sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)\n sieve = [True] * sieve_size\n\n primes = [] # list of primes\n # add 2 to the list if it's in the given range\n if end >= 2:\n primes.append(2)\n for i in range(sieve_size):\n if sieve[i]:\n value_at_i = i * 2 + 3\n primes.append(value_at_i)\n for j in range(i, sieve_size, value_at_i):\n sieve[j] = False\n\n chop_index = 0\n for i in range(len(primes)):\n if primes[i] >= start:\n chop_index = i\n break\n return primes[chop_index:]\n\n def digit(self, to_bin: bool = False) -> Union[str, int]:\n \"\"\"Get a random digit.\n\n :param to_bin: If True then convert to binary.\n :return: Digit.\n\n :Example:\n 4.\n \"\"\"\n digit = self.random.randint(0, 9)\n\n if to_bin:\n return bin(digit)\n\n return digit\n\n def between(self, minimum: int = 1, maximum: int = 1000) -> int:\n \"\"\"Generate a random number between minimum and maximum.\n\n :param minimum: Minimum of range.\n :param maximum: Maximum of range.\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n\n def rating(self, maximum: float = 5.0) -> float:\n \"\"\"Generate a random rating for something.\n\n :param maximum: Maximum value (default is 5.0).\n :return: Rating.\n\n :Example:\n 4.7\n \"\"\"\n return self.random.uniform(0, maximum, 1)\n", "path": "mimesis/providers/numbers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to numbers.\"\"\"\n\nfrom typing import List, Union\n\nfrom mimesis.providers.base import BaseProvider\n\n__all__ = ['Numbers']\n\n\nclass Numbers(BaseProvider):\n \"\"\"Class for generating numbers.\"\"\"\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'numbers'\n\n def floats(self, start: float = 0, end: float = 1, n: int = 10,\n rounding: int = 15) -> List[float]:\n \"\"\"Generate a list of random float numbers.\n\n :param start: Start range.\n :param end: End range.\n :param n: Length of the list.\n :param rounding: Max number of decimal digits.\n :return: The list of floating-point numbers.\n \"\"\"\n return [self.random.uniform(start, end, rounding) for _ in range(n)]\n\n def integers(self, start: int = 0, end: int = 10,\n n: int = 10) -> List[int]:\n \"\"\"Generate a list of random integers.\n\n Integers can be negative or positive numbers.\n .. note: You can use both positive and negative numbers.\n\n :param start: Start.\n :param end: End.\n :param n: Length of list.\n :return: List of integers.\n\n :Example:\n [-20, -19, -18, -17]\n \"\"\"\n return self.random.randints(n, start, end)\n\n def complexes(self, start_real: float = 0, end_real: float = 1,\n start_imag: float = 0, end_imag: float = 1,\n rounding_real: int = 15, rounding_imag: int = 15,\n n: int = 10) -> List[complex]:\n \"\"\"Generate a list of random complex numbers.\n\n :param start_real: Start real range.\n :param end_real: End real range.\n :param start_imag: Start imaginary range.\n :param end_imag: End imaginary range.\n :param rounding_real: Rounding real part.\n :param rounding_imag: Roungind imaginary part.\n :param n: Length of the list.\n :return: A list of random complex numbers.\n \"\"\"\n return [\n complex(self.random.uniform(start_real, end_real, rounding_real),\n self.random.uniform(start_imag, end_imag, rounding_imag))\n for _ in range(n)]\n\n @staticmethod\n def primes(start: int = 1, end: int = 999) -> List[int]:\n \"\"\"Generate a list of prime numbers.\n\n :param start: First value of range.\n :param end: Last value of range.\n :return: A list of prime numbers from start to end.\n \"\"\"\n # TODO: It should generate random primes with passed length.\n sieve_size = (end // 2 - 1) if end % 2 == 0 else (end // 2)\n sieve = [True] * sieve_size\n\n primes = [] # list of primes\n # add 2 to the list if it's in the given range\n if end >= 2:\n primes.append(2)\n for i in range(sieve_size):\n if sieve[i]:\n value_at_i = i * 2 + 3\n primes.append(value_at_i)\n for j in range(i, sieve_size, value_at_i):\n sieve[j] = False\n\n chop_index = 0\n for i in range(len(primes)):\n if primes[i] >= start:\n chop_index = i\n break\n return primes[chop_index:]\n\n def digit(self, to_bin: bool = False) -> Union[str, int]:\n \"\"\"Get a random digit.\n\n :param to_bin: If True then convert to binary.\n :return: Digit.\n\n :Example:\n 4.\n \"\"\"\n digit = self.random.randint(0, 9)\n\n if to_bin:\n return bin(digit)\n\n return digit\n\n def between(self, minimum: int = 1, maximum: int = 1000) -> int:\n \"\"\"Generate a random number between minimum and maximum.\n\n :param minimum: Minimum of range.\n :param maximum: Maximum of range.\n :return: Number.\n \"\"\"\n return self.random.randint(minimum, maximum)\n", "path": "mimesis/providers/numbers.py"}]} | 1,461 | 795 |
gh_patches_debug_40197 | rasdani/github-patches | git_diff | fossasia__open-event-server-8379 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Organizer video tab shows errors if there are video rooms not using BBB
Organizer video tab shows errors if there are video rooms not using BBB.


Compare https://eventyay.com/events/3ea940a8/video/all (only for organizer accessible)
Related to https://github.com/fossasia/open-event-frontend/pull/7927
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/video_recordings.py`
Content:
```
1 from datetime import datetime
2
3 from flask_rest_jsonapi import ResourceDetail, ResourceList
4 from flask_rest_jsonapi.resource import ResourceRelationship
5
6 from app.api.helpers.db import get_or_create, safe_query_kwargs
7 from app.api.helpers.errors import ForbiddenError, UnprocessableEntityError
8 from app.api.helpers.permission_manager import has_access
9 from app.api.helpers.permissions import jwt_required
10 from app.api.schema.video_recordings import VideoRecordingSchema
11 from app.api.video_channels.bbb import BigBlueButton
12 from app.models import db
13 from app.models.video_recording import VideoRecording
14 from app.models.video_stream import VideoStream
15
16
17 class VideoRecordingList(ResourceList):
18 def before_get(self, args, kwargs):
19 if kwargs.get('video_stream_id'):
20 stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')
21
22 if not has_access('is_organizer', event_id=stream.event_id):
23 raise ForbiddenError(
24 {'pointer': 'event_id'},
25 'You need to be the event organizer to access video recordings.',
26 )
27
28 params = dict(
29 meetingID=stream.extra['response']['meetingID'],
30 )
31 channel = stream.channel
32 bbb = BigBlueButton(channel.api_url, channel.api_key)
33 result = bbb.request('getRecordings', params)
34
35 if result.data['response']['recordings']:
36 recordings = []
37 if type(result.data['response']['recordings']['recording']) is list:
38 recordings = result.data['response']['recordings']['recording']
39 else:
40 recordings.append(result.data['response']['recordings']['recording'])
41 for recording in recordings:
42 get_or_create(
43 VideoRecording,
44 bbb_record_id=recording['recordID'],
45 participants=recording['participants'],
46 url=recording['playback']['format']['url'],
47 start_time=datetime.fromtimestamp(
48 int(int(recording['startTime']) / 1000)
49 ),
50 end_time=datetime.fromtimestamp(
51 int(int(recording['endTime']) / 1000)
52 ),
53 video_stream=stream,
54 )
55
56 def query(self, view_kwargs):
57 query_ = VideoRecording.query
58 if view_kwargs.get('video_stream_id'):
59 stream = safe_query_kwargs(VideoStream, view_kwargs, 'video_stream_id')
60 query_ = VideoRecording.query.filter(
61 VideoRecording.video_stream_id == stream.id
62 )
63 else:
64 if not has_access('is_admin'):
65 raise ForbiddenError(
66 {'pointer': 'user'},
67 'You need to be the admin to access video recordings.',
68 )
69
70 return query_
71
72 methods = ['GET']
73 view_kwargs = True
74 decorators = (jwt_required,)
75 schema = VideoRecordingSchema
76 data_layer = {
77 'session': db.session,
78 'model': VideoRecording,
79 'methods': {
80 'query': query,
81 'before_get': before_get,
82 },
83 }
84
85
86 class VideoRecordingDetail(ResourceDetail):
87 def before_get_object(self, view_kwargs):
88 if view_kwargs.get('video_stream_id'):
89 video_stream = safe_query_kwargs(
90 VideoStream,
91 view_kwargs,
92 'video_stream_id',
93 )
94 view_kwargs['id'] = video_stream.id
95
96 def after_get_object(self, video_recording, view_kwargs):
97 if not has_access('is_organizer', event_id=video_recording.video_stream.event_id):
98 raise ForbiddenError(
99 {'pointer': 'event_id'},
100 'You need to be the event organizer to access video recordings.',
101 )
102
103 def before_delete_object(self, video_recording, kwargs):
104 """
105 before delete object method for recording detail
106 :param obj:
107 :param kwargs:
108 :return:
109 """
110 if not has_access('is_admin'):
111 raise ForbiddenError(
112 {'source': 'User'}, 'You are not authorized to access this.'
113 )
114 stream = video_recording.video_stream
115 params = dict(
116 recordID=video_recording.bbb_record_id,
117 )
118 channel = stream.channel
119 bbb = BigBlueButton(channel.api_url, channel.api_key)
120 result = bbb.request('deleteRecordings', params)
121
122 if not result.success:
123 raise UnprocessableEntityError(
124 {'source': 'recording_id'}, 'error while deleting recording'
125 )
126
127 methods = ['GET', 'DELETE']
128 schema = VideoRecordingSchema
129 decorators = (jwt_required,)
130 data_layer = {
131 'session': db.session,
132 'model': VideoRecording,
133 'methods': {
134 'before_get_object': before_get_object,
135 'after_get_object': after_get_object,
136 'before_delete_object': before_delete_object,
137 },
138 }
139
140
141 class VideoRecordingRelationship(ResourceRelationship):
142 schema = VideoRecordingSchema
143 methods = ['GET']
144 data_layer = {'session': db.session, 'model': VideoRecording}
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/video_recordings.py b/app/api/video_recordings.py
--- a/app/api/video_recordings.py
+++ b/app/api/video_recordings.py
@@ -19,39 +19,48 @@
if kwargs.get('video_stream_id'):
stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')
- if not has_access('is_organizer', event_id=stream.event_id):
- raise ForbiddenError(
- {'pointer': 'event_id'},
- 'You need to be the event organizer to access video recordings.',
- )
+ if stream.channel and stream.channel.provider == 'bbb':
+ if not has_access('is_organizer', event_id=stream.event_id):
+ raise ForbiddenError(
+ {'pointer': 'event_id'},
+ 'You need to be the event organizer to access video recordings.',
+ )
- params = dict(
- meetingID=stream.extra['response']['meetingID'],
- )
- channel = stream.channel
- bbb = BigBlueButton(channel.api_url, channel.api_key)
- result = bbb.request('getRecordings', params)
-
- if result.data['response']['recordings']:
- recordings = []
- if type(result.data['response']['recordings']['recording']) is list:
- recordings = result.data['response']['recordings']['recording']
- else:
- recordings.append(result.data['response']['recordings']['recording'])
- for recording in recordings:
- get_or_create(
- VideoRecording,
- bbb_record_id=recording['recordID'],
- participants=recording['participants'],
- url=recording['playback']['format']['url'],
- start_time=datetime.fromtimestamp(
- int(int(recording['startTime']) / 1000)
- ),
- end_time=datetime.fromtimestamp(
- int(int(recording['endTime']) / 1000)
- ),
- video_stream=stream,
+ if stream.extra is not None:
+ params = dict(
+ meetingID=stream.extra['response']['meetingID'],
)
+ channel = stream.channel
+ bbb = BigBlueButton(channel.api_url, channel.api_key)
+ result = bbb.request('getRecordings', params)
+
+ if result.data['response']['recordings']:
+ recordings = []
+ if (
+ type(result.data['response']['recordings']['recording'])
+ is list
+ ):
+ recordings = result.data['response']['recordings'][
+ 'recording'
+ ]
+ else:
+ recordings.append(
+ result.data['response']['recordings']['recording']
+ )
+ for recording in recordings:
+ get_or_create(
+ VideoRecording,
+ bbb_record_id=recording['recordID'],
+ participants=recording['participants'],
+ url=recording['playback']['format']['url'],
+ start_time=datetime.fromtimestamp(
+ int(int(recording['startTime']) / 1000)
+ ),
+ end_time=datetime.fromtimestamp(
+ int(int(recording['endTime']) / 1000)
+ ),
+ video_stream=stream,
+ )
def query(self, view_kwargs):
query_ = VideoRecording.query
| {"golden_diff": "diff --git a/app/api/video_recordings.py b/app/api/video_recordings.py\n--- a/app/api/video_recordings.py\n+++ b/app/api/video_recordings.py\n@@ -19,39 +19,48 @@\n if kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')\n \n- if not has_access('is_organizer', event_id=stream.event_id):\n- raise ForbiddenError(\n- {'pointer': 'event_id'},\n- 'You need to be the event organizer to access video recordings.',\n- )\n+ if stream.channel and stream.channel.provider == 'bbb':\n+ if not has_access('is_organizer', event_id=stream.event_id):\n+ raise ForbiddenError(\n+ {'pointer': 'event_id'},\n+ 'You need to be the event organizer to access video recordings.',\n+ )\n \n- params = dict(\n- meetingID=stream.extra['response']['meetingID'],\n- )\n- channel = stream.channel\n- bbb = BigBlueButton(channel.api_url, channel.api_key)\n- result = bbb.request('getRecordings', params)\n-\n- if result.data['response']['recordings']:\n- recordings = []\n- if type(result.data['response']['recordings']['recording']) is list:\n- recordings = result.data['response']['recordings']['recording']\n- else:\n- recordings.append(result.data['response']['recordings']['recording'])\n- for recording in recordings:\n- get_or_create(\n- VideoRecording,\n- bbb_record_id=recording['recordID'],\n- participants=recording['participants'],\n- url=recording['playback']['format']['url'],\n- start_time=datetime.fromtimestamp(\n- int(int(recording['startTime']) / 1000)\n- ),\n- end_time=datetime.fromtimestamp(\n- int(int(recording['endTime']) / 1000)\n- ),\n- video_stream=stream,\n+ if stream.extra is not None:\n+ params = dict(\n+ meetingID=stream.extra['response']['meetingID'],\n )\n+ channel = stream.channel\n+ bbb = BigBlueButton(channel.api_url, channel.api_key)\n+ result = bbb.request('getRecordings', params)\n+\n+ if result.data['response']['recordings']:\n+ recordings = []\n+ if (\n+ type(result.data['response']['recordings']['recording'])\n+ is list\n+ ):\n+ recordings = result.data['response']['recordings'][\n+ 'recording'\n+ ]\n+ else:\n+ recordings.append(\n+ result.data['response']['recordings']['recording']\n+ )\n+ for recording in recordings:\n+ get_or_create(\n+ VideoRecording,\n+ bbb_record_id=recording['recordID'],\n+ participants=recording['participants'],\n+ url=recording['playback']['format']['url'],\n+ start_time=datetime.fromtimestamp(\n+ int(int(recording['startTime']) / 1000)\n+ ),\n+ end_time=datetime.fromtimestamp(\n+ int(int(recording['endTime']) / 1000)\n+ ),\n+ video_stream=stream,\n+ )\n \n def query(self, view_kwargs):\n query_ = VideoRecording.query\n", "issue": "Organizer video tab shows errors if there are video rooms not using BBB\nOrganizer video tab shows errors if there are video rooms not using BBB.\r\n\r\n\r\n\r\n\r\nCompare https://eventyay.com/events/3ea940a8/video/all (only for organizer accessible)\r\n\r\nRelated to https://github.com/fossasia/open-event-frontend/pull/7927\n", "before_files": [{"content": "from datetime import datetime\n\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom flask_rest_jsonapi.resource import ResourceRelationship\n\nfrom app.api.helpers.db import get_or_create, safe_query_kwargs\nfrom app.api.helpers.errors import ForbiddenError, UnprocessableEntityError\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.schema.video_recordings import VideoRecordingSchema\nfrom app.api.video_channels.bbb import BigBlueButton\nfrom app.models import db\nfrom app.models.video_recording import VideoRecording\nfrom app.models.video_stream import VideoStream\n\n\nclass VideoRecordingList(ResourceList):\n def before_get(self, args, kwargs):\n if kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')\n\n if not has_access('is_organizer', event_id=stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n params = dict(\n meetingID=stream.extra['response']['meetingID'],\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('getRecordings', params)\n\n if result.data['response']['recordings']:\n recordings = []\n if type(result.data['response']['recordings']['recording']) is list:\n recordings = result.data['response']['recordings']['recording']\n else:\n recordings.append(result.data['response']['recordings']['recording'])\n for recording in recordings:\n get_or_create(\n VideoRecording,\n bbb_record_id=recording['recordID'],\n participants=recording['participants'],\n url=recording['playback']['format']['url'],\n start_time=datetime.fromtimestamp(\n int(int(recording['startTime']) / 1000)\n ),\n end_time=datetime.fromtimestamp(\n int(int(recording['endTime']) / 1000)\n ),\n video_stream=stream,\n )\n\n def query(self, view_kwargs):\n query_ = VideoRecording.query\n if view_kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, view_kwargs, 'video_stream_id')\n query_ = VideoRecording.query.filter(\n VideoRecording.video_stream_id == stream.id\n )\n else:\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'pointer': 'user'},\n 'You need to be the admin to access video recordings.',\n )\n\n return query_\n\n methods = ['GET']\n view_kwargs = True\n decorators = (jwt_required,)\n schema = VideoRecordingSchema\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'query': query,\n 'before_get': before_get,\n },\n }\n\n\nclass VideoRecordingDetail(ResourceDetail):\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('video_stream_id'):\n video_stream = safe_query_kwargs(\n VideoStream,\n view_kwargs,\n 'video_stream_id',\n )\n view_kwargs['id'] = video_stream.id\n\n def after_get_object(self, video_recording, view_kwargs):\n if not has_access('is_organizer', event_id=video_recording.video_stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n def before_delete_object(self, video_recording, kwargs):\n \"\"\"\n before delete object method for recording detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'source': 'User'}, 'You are not authorized to access this.'\n )\n stream = video_recording.video_stream\n params = dict(\n recordID=video_recording.bbb_record_id,\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('deleteRecordings', params)\n\n if not result.success:\n raise UnprocessableEntityError(\n {'source': 'recording_id'}, 'error while deleting recording'\n )\n\n methods = ['GET', 'DELETE']\n schema = VideoRecordingSchema\n decorators = (jwt_required,)\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'before_get_object': before_get_object,\n 'after_get_object': after_get_object,\n 'before_delete_object': before_delete_object,\n },\n }\n\n\nclass VideoRecordingRelationship(ResourceRelationship):\n schema = VideoRecordingSchema\n methods = ['GET']\n data_layer = {'session': db.session, 'model': VideoRecording}\n", "path": "app/api/video_recordings.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList\nfrom flask_rest_jsonapi.resource import ResourceRelationship\n\nfrom app.api.helpers.db import get_or_create, safe_query_kwargs\nfrom app.api.helpers.errors import ForbiddenError, UnprocessableEntityError\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.permissions import jwt_required\nfrom app.api.schema.video_recordings import VideoRecordingSchema\nfrom app.api.video_channels.bbb import BigBlueButton\nfrom app.models import db\nfrom app.models.video_recording import VideoRecording\nfrom app.models.video_stream import VideoStream\n\n\nclass VideoRecordingList(ResourceList):\n def before_get(self, args, kwargs):\n if kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, kwargs, 'video_stream_id', 'id')\n\n if stream.channel and stream.channel.provider == 'bbb':\n if not has_access('is_organizer', event_id=stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n if stream.extra is not None:\n params = dict(\n meetingID=stream.extra['response']['meetingID'],\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('getRecordings', params)\n\n if result.data['response']['recordings']:\n recordings = []\n if (\n type(result.data['response']['recordings']['recording'])\n is list\n ):\n recordings = result.data['response']['recordings'][\n 'recording'\n ]\n else:\n recordings.append(\n result.data['response']['recordings']['recording']\n )\n for recording in recordings:\n get_or_create(\n VideoRecording,\n bbb_record_id=recording['recordID'],\n participants=recording['participants'],\n url=recording['playback']['format']['url'],\n start_time=datetime.fromtimestamp(\n int(int(recording['startTime']) / 1000)\n ),\n end_time=datetime.fromtimestamp(\n int(int(recording['endTime']) / 1000)\n ),\n video_stream=stream,\n )\n\n def query(self, view_kwargs):\n query_ = VideoRecording.query\n if view_kwargs.get('video_stream_id'):\n stream = safe_query_kwargs(VideoStream, view_kwargs, 'video_stream_id')\n query_ = VideoRecording.query.filter(\n VideoRecording.video_stream_id == stream.id\n )\n else:\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'pointer': 'user'},\n 'You need to be the admin to access video recordings.',\n )\n\n return query_\n\n methods = ['GET']\n view_kwargs = True\n decorators = (jwt_required,)\n schema = VideoRecordingSchema\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'query': query,\n 'before_get': before_get,\n },\n }\n\n\nclass VideoRecordingDetail(ResourceDetail):\n def before_get_object(self, view_kwargs):\n if view_kwargs.get('video_stream_id'):\n video_stream = safe_query_kwargs(\n VideoStream,\n view_kwargs,\n 'video_stream_id',\n )\n view_kwargs['id'] = video_stream.id\n\n def after_get_object(self, video_recording, view_kwargs):\n if not has_access('is_organizer', event_id=video_recording.video_stream.event_id):\n raise ForbiddenError(\n {'pointer': 'event_id'},\n 'You need to be the event organizer to access video recordings.',\n )\n\n def before_delete_object(self, video_recording, kwargs):\n \"\"\"\n before delete object method for recording detail\n :param obj:\n :param kwargs:\n :return:\n \"\"\"\n if not has_access('is_admin'):\n raise ForbiddenError(\n {'source': 'User'}, 'You are not authorized to access this.'\n )\n stream = video_recording.video_stream\n params = dict(\n recordID=video_recording.bbb_record_id,\n )\n channel = stream.channel\n bbb = BigBlueButton(channel.api_url, channel.api_key)\n result = bbb.request('deleteRecordings', params)\n\n if not result.success:\n raise UnprocessableEntityError(\n {'source': 'recording_id'}, 'error while deleting recording'\n )\n\n methods = ['GET', 'DELETE']\n schema = VideoRecordingSchema\n decorators = (jwt_required,)\n data_layer = {\n 'session': db.session,\n 'model': VideoRecording,\n 'methods': {\n 'before_get_object': before_get_object,\n 'after_get_object': after_get_object,\n 'before_delete_object': before_delete_object,\n },\n }\n\n\nclass VideoRecordingRelationship(ResourceRelationship):\n schema = VideoRecordingSchema\n methods = ['GET']\n data_layer = {'session': db.session, 'model': VideoRecording}\n", "path": "app/api/video_recordings.py"}]} | 1,869 | 733 |
gh_patches_debug_3599 | rasdani/github-patches | git_diff | certbot__certbot-606 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
nginx plugin destroys config
I have a config file called webp.conf in /etc/nginx/conf.d/ which works great.
After running letsencrypt -d example.org run the webp.conf is broken because it's missing a closing }
https://pastebin.mozilla.org/8837365
Line 18 gets removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `letsencrypt-nginx/letsencrypt_nginx/nginxparser.py`
Content:
```
1 """Very low-level nginx config parser based on pyparsing."""
2 import string
3
4 from pyparsing import (
5 Literal, White, Word, alphanums, CharsNotIn, Forward, Group,
6 Optional, OneOrMore, Regex, ZeroOrMore)
7 from pyparsing import stringEnd
8 from pyparsing import restOfLine
9
10 class RawNginxParser(object):
11 # pylint: disable=expression-not-assigned
12 """A class that parses nginx configuration with pyparsing."""
13
14 # constants
15 left_bracket = Literal("{").suppress()
16 right_bracket = Literal("}").suppress()
17 semicolon = Literal(";").suppress()
18 space = White().suppress()
19 key = Word(alphanums + "_/")
20 # Matches anything that is not a special character AND any chars in single
21 # or double quotes
22 value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+")
23 location = CharsNotIn("{};," + string.whitespace)
24 # modifier for location uri [ = | ~ | ~* | ^~ ]
25 modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
26
27 # rules
28 comment = Literal('#') + restOfLine()
29 assignment = (key + Optional(space + value, default=None) + semicolon)
30 location_statement = Optional(space + modifier) + Optional(space + location)
31 if_statement = Literal("if") + space + Regex(r"\(.+\)") + space
32 block = Forward()
33
34 block << Group(
35 (Group(key + location_statement) ^ Group(if_statement))
36 + left_bracket
37 + Group(ZeroOrMore(Group(comment | assignment) | block))
38 + right_bracket)
39
40 script = OneOrMore(Group(comment | assignment) | block) + stringEnd
41
42 def __init__(self, source):
43 self.source = source
44
45 def parse(self):
46 """Returns the parsed tree."""
47 return self.script.parseString(self.source)
48
49 def as_list(self):
50 """Returns the parsed tree as a list."""
51 return self.parse().asList()
52
53
54 class RawNginxDumper(object):
55 # pylint: disable=too-few-public-methods
56 """A class that dumps nginx configuration from the provided tree."""
57 def __init__(self, blocks, indentation=4):
58 self.blocks = blocks
59 self.indentation = indentation
60
61 def __iter__(self, blocks=None, current_indent=0, spacer=' '):
62 """Iterates the dumped nginx content."""
63 blocks = blocks or self.blocks
64 for key, values in blocks:
65 indentation = spacer * current_indent
66 if isinstance(key, list):
67 if current_indent:
68 yield ''
69 yield indentation + spacer.join(key) + ' {'
70
71 for parameter in values:
72 dumped = self.__iter__([parameter], current_indent + self.indentation)
73 for line in dumped:
74 yield line
75
76 yield indentation + '}'
77 else:
78 if key == '#':
79 yield spacer * current_indent + key + values
80 else:
81 if values is None:
82 yield spacer * current_indent + key + ';'
83 else:
84 yield spacer * current_indent + key + spacer + values + ';'
85
86 def as_string(self):
87 """Return the parsed block as a string."""
88 return '\n'.join(self) + '\n'
89
90
91 # Shortcut functions to respect Python's serialization interface
92 # (like pyyaml, picker or json)
93
94 def loads(source):
95 """Parses from a string.
96
97 :param str souce: The string to parse
98 :returns: The parsed tree
99 :rtype: list
100
101 """
102 return RawNginxParser(source).as_list()
103
104
105 def load(_file):
106 """Parses from a file.
107
108 :param file _file: The file to parse
109 :returns: The parsed tree
110 :rtype: list
111
112 """
113 return loads(_file.read())
114
115
116 def dumps(blocks, indentation=4):
117 """Dump to a string.
118
119 :param list block: The parsed tree
120 :param int indentation: The number of spaces to indent
121 :rtype: str
122
123 """
124 return RawNginxDumper(blocks, indentation).as_string()
125
126
127 def dump(blocks, _file, indentation=4):
128 """Dump to a file.
129
130 :param list block: The parsed tree
131 :param file _file: The file to dump to
132 :param int indentation: The number of spaces to indent
133 :rtype: NoneType
134
135 """
136 return _file.write(dumps(blocks, indentation))
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
--- a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
+++ b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py
@@ -37,7 +37,7 @@
+ Group(ZeroOrMore(Group(comment | assignment) | block))
+ right_bracket)
- script = OneOrMore(Group(comment | assignment) | block) + stringEnd
+ script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd
def __init__(self, source):
self.source = source
| {"golden_diff": "diff --git a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n--- a/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n+++ b/letsencrypt-nginx/letsencrypt_nginx/nginxparser.py\n@@ -37,7 +37,7 @@\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n \n- script = OneOrMore(Group(comment | assignment) | block) + stringEnd\n+ script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd\n \n def __init__(self, source):\n self.source = source\n", "issue": "nginx plugin destroys config\nI have a config file called webp.conf in /etc/nginx/conf.d/ which works great.\nAfter running letsencrypt -d example.org run the webp.conf is broken because it's missing a closing }\nhttps://pastebin.mozilla.org/8837365\nLine 18 gets removed.\n\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n left_bracket = Literal(\"{\").suppress()\n right_bracket = Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n space = White().suppress()\n key = Word(alphanums + \"_/\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = Literal('#') + restOfLine()\n assignment = (key + Optional(space + value, default=None) + semicolon)\n location_statement = Optional(space + modifier) + Optional(space + location)\n if_statement = Literal(\"if\") + space + Regex(r\"\\(.+\\)\") + space\n block = Forward()\n\n block << Group(\n (Group(key + location_statement) ^ Group(if_statement))\n + left_bracket\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n\n script = OneOrMore(Group(comment | assignment) | block) + stringEnd\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks, indentation=4):\n self.blocks = blocks\n self.indentation = indentation\n\n def __iter__(self, blocks=None, current_indent=0, spacer=' '):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for key, values in blocks:\n indentation = spacer * current_indent\n if isinstance(key, list):\n if current_indent:\n yield ''\n yield indentation + spacer.join(key) + ' {'\n\n for parameter in values:\n dumped = self.__iter__([parameter], current_indent + self.indentation)\n for line in dumped:\n yield line\n\n yield indentation + '}'\n else:\n if key == '#':\n yield spacer * current_indent + key + values\n else:\n if values is None:\n yield spacer * current_indent + key + ';'\n else:\n yield spacer * current_indent + key + spacer + values + ';'\n\n def as_string(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return '\\n'.join(self) + '\\n'\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return RawNginxParser(source).as_list()\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks, indentation=4):\n \"\"\"Dump to a string.\n\n :param list block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return RawNginxDumper(blocks, indentation).as_string()\n\n\ndef dump(blocks, _file, indentation=4):\n \"\"\"Dump to a file.\n\n :param list block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks, indentation))\n", "path": "letsencrypt-nginx/letsencrypt_nginx/nginxparser.py"}], "after_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n left_bracket = Literal(\"{\").suppress()\n right_bracket = Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n space = White().suppress()\n key = Word(alphanums + \"_/\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = Literal('#') + restOfLine()\n assignment = (key + Optional(space + value, default=None) + semicolon)\n location_statement = Optional(space + modifier) + Optional(space + location)\n if_statement = Literal(\"if\") + space + Regex(r\"\\(.+\\)\") + space\n block = Forward()\n\n block << Group(\n (Group(key + location_statement) ^ Group(if_statement))\n + left_bracket\n + Group(ZeroOrMore(Group(comment | assignment) | block))\n + right_bracket)\n\n script = OneOrMore(Group(comment | assignment) ^ block) + stringEnd\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks, indentation=4):\n self.blocks = blocks\n self.indentation = indentation\n\n def __iter__(self, blocks=None, current_indent=0, spacer=' '):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for key, values in blocks:\n indentation = spacer * current_indent\n if isinstance(key, list):\n if current_indent:\n yield ''\n yield indentation + spacer.join(key) + ' {'\n\n for parameter in values:\n dumped = self.__iter__([parameter], current_indent + self.indentation)\n for line in dumped:\n yield line\n\n yield indentation + '}'\n else:\n if key == '#':\n yield spacer * current_indent + key + values\n else:\n if values is None:\n yield spacer * current_indent + key + ';'\n else:\n yield spacer * current_indent + key + spacer + values + ';'\n\n def as_string(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return '\\n'.join(self) + '\\n'\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return RawNginxParser(source).as_list()\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks, indentation=4):\n \"\"\"Dump to a string.\n\n :param list block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return RawNginxDumper(blocks, indentation).as_string()\n\n\ndef dump(blocks, _file, indentation=4):\n \"\"\"Dump to a file.\n\n :param list block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks, indentation))\n", "path": "letsencrypt-nginx/letsencrypt_nginx/nginxparser.py"}]} | 1,638 | 151 |
gh_patches_debug_16124 | rasdani/github-patches | git_diff | getnikola__nikola-3159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Responsive youtube embed
I'm trying to set an embedded youtube link to 100% width in a .rst file. Is this possible?
I've tried:
```
.. youtube:: 3XsQCkF1SrE
:align: center
:width: 100%
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/compile/rest/youtube.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2018 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """YouTube directive for reStructuredText."""
28
29 from docutils import nodes
30 from docutils.parsers.rst import Directive, directives
31 from nikola.plugins.compile.rest import _align_choice, _align_options_base
32
33 from nikola.plugin_categories import RestExtension
34
35
36 class Plugin(RestExtension):
37 """Plugin for the youtube directive."""
38
39 name = "rest_youtube"
40
41 def set_site(self, site):
42 """Set Nikola site."""
43 self.site = site
44 directives.register_directive('youtube', Youtube)
45 return super(Plugin, self).set_site(site)
46
47
48 CODE = """\
49 <div class="youtube-video{align}">
50 <iframe width="{width}" height="{height}"
51 src="https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent"
52 frameborder="0" allow="encrypted-media" allowfullscreen
53 ></iframe>
54 </div>"""
55
56
57 class Youtube(Directive):
58 """reST extension for inserting youtube embedded videos.
59
60 Usage:
61 .. youtube:: lyViVmaBQDg
62 :height: 400
63 :width: 600
64
65 """
66
67 has_content = True
68 required_arguments = 1
69 option_spec = {
70 "width": directives.positive_int,
71 "height": directives.positive_int,
72 "align": _align_choice
73 }
74
75 def run(self):
76 """Run the youtube directive."""
77 self.check_content()
78 options = {
79 'yid': self.arguments[0],
80 'width': 560,
81 'height': 315,
82 }
83 options.update(self.options)
84 if self.options.get('align') in _align_options_base:
85 options['align'] = ' align-' + self.options['align']
86 else:
87 options['align'] = ''
88 return [nodes.raw('', CODE.format(**options), format='html')]
89
90 def check_content(self):
91 """Check if content exists."""
92 if self.content: # pragma: no cover
93 raise self.warning("This directive does not accept content. The "
94 "'key=value' format for options is deprecated, "
95 "use ':key: value' instead")
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/compile/rest/youtube.py b/nikola/plugins/compile/rest/youtube.py
--- a/nikola/plugins/compile/rest/youtube.py
+++ b/nikola/plugins/compile/rest/youtube.py
@@ -67,8 +67,8 @@
has_content = True
required_arguments = 1
option_spec = {
- "width": directives.positive_int,
- "height": directives.positive_int,
+ "width": directives.unchanged,
+ "height": directives.unchanged,
"align": _align_choice
}
@@ -80,7 +80,7 @@
'width': 560,
'height': 315,
}
- options.update(self.options)
+ options.update({k: v for k, v in self.options.items() if v})
if self.options.get('align') in _align_options_base:
options['align'] = ' align-' + self.options['align']
else:
| {"golden_diff": "diff --git a/nikola/plugins/compile/rest/youtube.py b/nikola/plugins/compile/rest/youtube.py\n--- a/nikola/plugins/compile/rest/youtube.py\n+++ b/nikola/plugins/compile/rest/youtube.py\n@@ -67,8 +67,8 @@\n has_content = True\n required_arguments = 1\n option_spec = {\n- \"width\": directives.positive_int,\n- \"height\": directives.positive_int,\n+ \"width\": directives.unchanged,\n+ \"height\": directives.unchanged,\n \"align\": _align_choice\n }\n \n@@ -80,7 +80,7 @@\n 'width': 560,\n 'height': 315,\n }\n- options.update(self.options)\n+ options.update({k: v for k, v in self.options.items() if v})\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n", "issue": "Responsive youtube embed\nI'm trying to set an embedded youtube link to 100% width in a .rst file. Is this possible?\r\n\r\nI've tried:\r\n\r\n```\r\n.. youtube:: 3XsQCkF1SrE\r\n :align: center\r\n :width: 100%\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"YouTube directive for reStructuredText.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom nikola.plugins.compile.rest import _align_choice, _align_options_base\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for the youtube directive.\"\"\"\n\n name = \"rest_youtube\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n directives.register_directive('youtube', Youtube)\n return super(Plugin, self).set_site(site)\n\n\nCODE = \"\"\"\\\n<div class=\"youtube-video{align}\">\n<iframe width=\"{width}\" height=\"{height}\"\nsrc=\"https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent\"\nframeborder=\"0\" allow=\"encrypted-media\" allowfullscreen\n></iframe>\n</div>\"\"\"\n\n\nclass Youtube(Directive):\n \"\"\"reST extension for inserting youtube embedded videos.\n\n Usage:\n .. youtube:: lyViVmaBQDg\n :height: 400\n :width: 600\n\n \"\"\"\n\n has_content = True\n required_arguments = 1\n option_spec = {\n \"width\": directives.positive_int,\n \"height\": directives.positive_int,\n \"align\": _align_choice\n }\n\n def run(self):\n \"\"\"Run the youtube directive.\"\"\"\n self.check_content()\n options = {\n 'yid': self.arguments[0],\n 'width': 560,\n 'height': 315,\n }\n options.update(self.options)\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n options['align'] = ''\n return [nodes.raw('', CODE.format(**options), format='html')]\n\n def check_content(self):\n \"\"\"Check if content exists.\"\"\"\n if self.content: # pragma: no cover\n raise self.warning(\"This directive does not accept content. The \"\n \"'key=value' format for options is deprecated, \"\n \"use ':key: value' instead\")\n", "path": "nikola/plugins/compile/rest/youtube.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2018 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"YouTube directive for reStructuredText.\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive, directives\nfrom nikola.plugins.compile.rest import _align_choice, _align_options_base\n\nfrom nikola.plugin_categories import RestExtension\n\n\nclass Plugin(RestExtension):\n \"\"\"Plugin for the youtube directive.\"\"\"\n\n name = \"rest_youtube\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.site = site\n directives.register_directive('youtube', Youtube)\n return super(Plugin, self).set_site(site)\n\n\nCODE = \"\"\"\\\n<div class=\"youtube-video{align}\">\n<iframe width=\"{width}\" height=\"{height}\"\nsrc=\"https://www.youtube-nocookie.com/embed/{yid}?rel=0&wmode=transparent\"\nframeborder=\"0\" allow=\"encrypted-media\" allowfullscreen\n></iframe>\n</div>\"\"\"\n\n\nclass Youtube(Directive):\n \"\"\"reST extension for inserting youtube embedded videos.\n\n Usage:\n .. youtube:: lyViVmaBQDg\n :height: 400\n :width: 600\n\n \"\"\"\n\n has_content = True\n required_arguments = 1\n option_spec = {\n \"width\": directives.unchanged,\n \"height\": directives.unchanged,\n \"align\": _align_choice\n }\n\n def run(self):\n \"\"\"Run the youtube directive.\"\"\"\n self.check_content()\n options = {\n 'yid': self.arguments[0],\n 'width': 560,\n 'height': 315,\n }\n options.update({k: v for k, v in self.options.items() if v})\n if self.options.get('align') in _align_options_base:\n options['align'] = ' align-' + self.options['align']\n else:\n options['align'] = ''\n return [nodes.raw('', CODE.format(**options), format='html')]\n\n def check_content(self):\n \"\"\"Check if content exists.\"\"\"\n if self.content: # pragma: no cover\n raise self.warning(\"This directive does not accept content. The \"\n \"'key=value' format for options is deprecated, \"\n \"use ':key: value' instead\")\n", "path": "nikola/plugins/compile/rest/youtube.py"}]} | 1,238 | 224 |
gh_patches_debug_334 | rasdani/github-patches | git_diff | searx__searx-2391 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SUGGESTION: Contacting the instance's maintainer(s)
Hello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/brand.py`
Content:
```
1 GIT_URL = 'https://github.com/searx/searx'
2 GIT_BRANCH = 'master'
3 ISSUE_URL = 'https://github.com/searx/searx/issues'
4 SEARX_URL = 'https://searx.me'
5 DOCS_URL = 'https://searx.github.io/searx'
6 PUBLIC_INSTANCES = 'https://searx.space'
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/brand.py b/searx/brand.py
--- a/searx/brand.py
+++ b/searx/brand.py
@@ -4,3 +4,4 @@
SEARX_URL = 'https://searx.me'
DOCS_URL = 'https://searx.github.io/searx'
PUBLIC_INSTANCES = 'https://searx.space'
+CONTACT_URL = 'mailto:[email protected]'
| {"golden_diff": "diff --git a/searx/brand.py b/searx/brand.py\n--- a/searx/brand.py\n+++ b/searx/brand.py\n@@ -4,3 +4,4 @@\n SEARX_URL = 'https://searx.me'\n DOCS_URL = 'https://searx.github.io/searx'\n PUBLIC_INSTANCES = 'https://searx.space'\n+CONTACT_URL = 'mailto:[email protected]'\n", "issue": "SUGGESTION: Contacting the instance's maintainer(s)\nHello, so I use searx, but I personally think that there should be any way to contact the maintainer(s) of a public instance (email for example). It is harder to trust this awesome service if there is no way to contact the maintainer(s). \r\n\n", "before_files": [{"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\n", "path": "searx/brand.py"}], "after_files": [{"content": "GIT_URL = 'https://github.com/searx/searx'\nGIT_BRANCH = 'master'\nISSUE_URL = 'https://github.com/searx/searx/issues'\nSEARX_URL = 'https://searx.me'\nDOCS_URL = 'https://searx.github.io/searx'\nPUBLIC_INSTANCES = 'https://searx.space'\nCONTACT_URL = 'mailto:[email protected]'\n", "path": "searx/brand.py"}]} | 417 | 98 |
gh_patches_debug_5157 | rasdani/github-patches | git_diff | python__peps-2090 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"PEP numbers never change" verbiage is not in PEP 1
## Problem
The introduction to PEP 0 references PEP 1 as a source for
> PEP numbers are assigned by the PEP editors, and once assigned are never changed [1].
But PEP 1 doesn't say anything about PEP numbers never changing.
## Research
While skimming PEP 1, I found information about the PEP editor assigning a number:
> Once the PEP is ready for the repository, a PEP editor will:
Assign a PEP number (almost always just the next available number, but sometimes it's a special/joke number, like 666 or 3141). (Clarification: For Python 3, numbers in the 3000s were used for Py3k-specific proposals. But now that all new features go into Python 3 only, the process is back to using numbers in the 100s again. Remember that numbers below 100 are meta-PEPs.)
and
> The PEP editors are individuals responsible for managing the administrative and editorial aspects of the PEP workflow (e.g. assigning PEP numbers and changing their status). See PEP Editor Responsibilities & Workflow for details.
But I didn't find any reference to that number never changing.
## Proposal:
Can we change PEP 0's introduction so that the reference is specific to assigning numbers?
```
PEP numbers are assigned by the PEP editors[1], and once assigned are never changed.
```
## Link
https://github.com/python/peps/blob/40ef5625b7d42655f49090ffd2c0860ecf8d1d9f/pep0/constants.py#L22-L27
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pep0/constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 text_type = str
3 title_length = 55
4 author_length = 40
5 table_separator = "== ==== " + "="*title_length + " " + "="*author_length
6 column_format = (
7 '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'
8 ).format(title_length=title_length)
9
10 header = """\
11 PEP: 0
12 Title: Index of Python Enhancement Proposals (PEPs)
13 Version: N/A
14 Last-Modified: %s
15 Author: python-dev <[email protected]>
16 Status: Active
17 Type: Informational
18 Content-Type: text/x-rst
19 Created: 13-Jul-2000
20 """
21
22 intro = """\
23 This PEP contains the index of all Python Enhancement Proposals,
24 known as PEPs. PEP numbers are assigned by the PEP editors, and
25 once assigned are never changed [1_]. The version control history [2_] of
26 the PEP texts represent their historical record.
27 """
28
29 references = """\
30 .. [1] PEP 1: PEP Purpose and Guidelines
31 .. [2] View PEP history online: https://github.com/python/peps
32 """
33
34 footer = """\
35 ..
36 Local Variables:
37 mode: indented-text
38 indent-tabs-mode: nil
39 sentence-end-double-space: t
40 fill-column: 70
41 coding: utf-8
42 End:\
43 """
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pep0/constants.py b/pep0/constants.py
--- a/pep0/constants.py
+++ b/pep0/constants.py
@@ -21,8 +21,8 @@
intro = """\
This PEP contains the index of all Python Enhancement Proposals,
-known as PEPs. PEP numbers are assigned by the PEP editors, and
-once assigned are never changed [1_]. The version control history [2_] of
+known as PEPs. PEP numbers are assigned by the PEP editors[1_], and
+once assigned are never changed. The version control history [2_] of
the PEP texts represent their historical record.
"""
| {"golden_diff": "diff --git a/pep0/constants.py b/pep0/constants.py\n--- a/pep0/constants.py\n+++ b/pep0/constants.py\n@@ -21,8 +21,8 @@\n \n intro = \"\"\"\\\n This PEP contains the index of all Python Enhancement Proposals,\n-known as PEPs. PEP numbers are assigned by the PEP editors, and\n-once assigned are never changed [1_]. The version control history [2_] of\n+known as PEPs. PEP numbers are assigned by the PEP editors[1_], and\n+once assigned are never changed. The version control history [2_] of\n the PEP texts represent their historical record.\n \"\"\"\n", "issue": "\"PEP numbers never change\" verbiage is not in PEP 1\n## Problem\r\n\r\nThe introduction to PEP 0 references PEP 1 as a source for\r\n\r\n> PEP numbers are assigned by the PEP editors, and once assigned are never changed [1].\r\n\r\nBut PEP 1 doesn't say anything about PEP numbers never changing.\r\n\r\n## Research\r\n\r\nWhile skimming PEP 1, I found information about the PEP editor assigning a number:\r\n\r\n> Once the PEP is ready for the repository, a PEP editor will:\r\nAssign a PEP number (almost always just the next available number, but sometimes it's a special/joke number, like 666 or 3141). (Clarification: For Python 3, numbers in the 3000s were used for Py3k-specific proposals. But now that all new features go into Python 3 only, the process is back to using numbers in the 100s again. Remember that numbers below 100 are meta-PEPs.)\r\n\r\nand\r\n\r\n> The PEP editors are individuals responsible for managing the administrative and editorial aspects of the PEP workflow (e.g. assigning PEP numbers and changing their status). See PEP Editor Responsibilities & Workflow for details.\r\n\r\nBut I didn't find any reference to that number never changing. \r\n\r\n## Proposal:\r\n\r\nCan we change PEP 0's introduction so that the reference is specific to assigning numbers?\r\n\r\n```\r\nPEP numbers are assigned by the PEP editors[1], and once assigned are never changed.\r\n```\r\n\r\n## Link\r\n\r\nhttps://github.com/python/peps/blob/40ef5625b7d42655f49090ffd2c0860ecf8d1d9f/pep0/constants.py#L22-L27\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\ntext_type = str\ntitle_length = 55\nauthor_length = 40\ntable_separator = \"== ==== \" + \"=\"*title_length + \" \" + \"=\"*author_length\ncolumn_format = (\n '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'\n).format(title_length=title_length)\n\nheader = \"\"\"\\\nPEP: 0\nTitle: Index of Python Enhancement Proposals (PEPs)\nVersion: N/A\nLast-Modified: %s\nAuthor: python-dev <[email protected]>\nStatus: Active\nType: Informational\nContent-Type: text/x-rst\nCreated: 13-Jul-2000\n\"\"\"\n\nintro = \"\"\"\\\nThis PEP contains the index of all Python Enhancement Proposals,\nknown as PEPs. PEP numbers are assigned by the PEP editors, and\nonce assigned are never changed [1_]. The version control history [2_] of\nthe PEP texts represent their historical record.\n\"\"\"\n\nreferences = \"\"\"\\\n.. [1] PEP 1: PEP Purpose and Guidelines\n.. [2] View PEP history online: https://github.com/python/peps\n\"\"\"\n\nfooter = \"\"\"\f\\\n..\n Local Variables:\n mode: indented-text\n indent-tabs-mode: nil\n sentence-end-double-space: t\n fill-column: 70\n coding: utf-8\n End:\\\n\"\"\"\n", "path": "pep0/constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\ntext_type = str\ntitle_length = 55\nauthor_length = 40\ntable_separator = \"== ==== \" + \"=\"*title_length + \" \" + \"=\"*author_length\ncolumn_format = (\n '%(type)1s%(status)1s %(number)4s %(title)-{title_length}s %(authors)-s'\n).format(title_length=title_length)\n\nheader = \"\"\"\\\nPEP: 0\nTitle: Index of Python Enhancement Proposals (PEPs)\nVersion: N/A\nLast-Modified: %s\nAuthor: python-dev <[email protected]>\nStatus: Active\nType: Informational\nContent-Type: text/x-rst\nCreated: 13-Jul-2000\n\"\"\"\n\nintro = \"\"\"\\\nThis PEP contains the index of all Python Enhancement Proposals,\nknown as PEPs. PEP numbers are assigned by the PEP editors[1_], and\nonce assigned are never changed. The version control history [2_] of\nthe PEP texts represent their historical record.\n\"\"\"\n\nreferences = \"\"\"\\\n.. [1] PEP 1: PEP Purpose and Guidelines\n.. [2] View PEP history online: https://github.com/python/peps\n\"\"\"\n\nfooter = \"\"\"\f\\\n..\n Local Variables:\n mode: indented-text\n indent-tabs-mode: nil\n sentence-end-double-space: t\n fill-column: 70\n coding: utf-8\n End:\\\n\"\"\"\n", "path": "pep0/constants.py"}]} | 1,055 | 157 |
gh_patches_debug_13686 | rasdani/github-patches | git_diff | cobbler__cobbler-3649 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SELinux issue when service is restarted
This issue was initially reported at
https://bugzilla.redhat.com/show_bug.cgi?id=1754430
There is a SELinux denial when the cobblerd service is restarted because of the permissions of the web.ss and others webui_sessions files.
I'm not sure to understand if this is 3.0.x only or also only exists in 2.8.x, but for me there is a need to understand why cobblerd (uid root) tries to read theses files...
Of course it can probably be fixed by using 640 perm on theses files. (to be tested) but it just workaround the problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cobbler/cobblerd.py`
Content:
```
1 """
2 Cobbler daemon for logging remote syslog traffic during automatic installation
3
4 Copyright 2007-2009, Red Hat, Inc and Others
5 Michael DeHaan <michael.dehaan AT gmail>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 02110-1301 USA
21 """
22
23 import binascii
24 import os
25 import pwd
26 import time
27
28 from cobbler import remote
29 from cobbler import utils
30 from cobbler.api import CobblerAPI
31
32
33 def core(cobbler_api: CobblerAPI):
34 """
35 Starts Cobbler.
36
37 :param cobbler_api: The cobbler_api instance which is used for this method.
38 """
39 settings = cobbler_api.settings()
40 xmlrpc_port = settings.xmlrpc_port
41
42 regen_ss_file()
43 do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)
44
45
46 def regen_ss_file():
47 """
48 This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been
49 cleared by Kerberos.
50 """
51 ssfile = "/var/lib/cobbler/web.ss"
52 with open("/dev/urandom", 'rb') as fd:
53 data = fd.read(512)
54
55 with open(ssfile, 'wb', 0o660) as fd:
56 fd.write(binascii.hexlify(data))
57
58 http_user = "apache"
59 family = utils.get_family()
60 if family == "debian":
61 http_user = "www-data"
62 elif family == "suse":
63 http_user = "wwwrun"
64 os.lchown("/var/lib/cobbler/web.ss", pwd.getpwnam(http_user)[2], -1)
65
66
67 def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):
68 """
69 This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.
70
71 :param cobbler_api: The cobbler_api instance which is used for this method.
72 :param settings: The Cobbler settings instance which is used for this method.
73 :param port: The port where the xmlrpc api should run on.
74 """
75 xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)
76 server = remote.CobblerXMLRPCServer(('127.0.0.1', port))
77 server.logRequests = 0 # don't print stuff
78 xinterface.logger.debug("XMLRPC running on %s" % port)
79 server.register_instance(xinterface)
80
81 while True:
82 try:
83 print("SERVING!")
84 server.serve_forever()
85 except IOError:
86 # interrupted? try to serve again
87 time.sleep(0.5)
88
89
90 if __name__ == "__main__":
91 core(CobblerAPI())
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cobbler/cobblerd.py b/cobbler/cobblerd.py
--- a/cobbler/cobblerd.py
+++ b/cobbler/cobblerd.py
@@ -52,7 +52,7 @@
with open("/dev/urandom", 'rb') as fd:
data = fd.read(512)
- with open(ssfile, 'wb', 0o660) as fd:
+ with open(ssfile, 'wb', 0o640) as fd:
fd.write(binascii.hexlify(data))
http_user = "apache"
@@ -61,7 +61,7 @@
http_user = "www-data"
elif family == "suse":
http_user = "wwwrun"
- os.lchown("/var/lib/cobbler/web.ss", pwd.getpwnam(http_user)[2], -1)
+ os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])
def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):
| {"golden_diff": "diff --git a/cobbler/cobblerd.py b/cobbler/cobblerd.py\n--- a/cobbler/cobblerd.py\n+++ b/cobbler/cobblerd.py\n@@ -52,7 +52,7 @@\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n \n- with open(ssfile, 'wb', 0o660) as fd:\n+ with open(ssfile, 'wb', 0o640) as fd:\n fd.write(binascii.hexlify(data))\n \n http_user = \"apache\"\n@@ -61,7 +61,7 @@\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n- os.lchown(\"/var/lib/cobbler/web.ss\", pwd.getpwnam(http_user)[2], -1)\n+ os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])\n \n \n def do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n", "issue": "SELinux issue when service is restarted\nThis issue was initially reported at\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=1754430\r\n\r\nThere is a SELinux denial when the cobblerd service is restarted because of the permissions of the web.ss and others webui_sessions files.\r\n\r\nI'm not sure to understand if this is 3.0.x only or also only exists in 2.8.x, but for me there is a need to understand why cobblerd (uid root) tries to read theses files...\r\nOf course it can probably be fixed by using 640 perm on theses files. (to be tested) but it just workaround the problem.\n", "before_files": [{"content": "\"\"\"\nCobbler daemon for logging remote syslog traffic during automatic installation\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport binascii\nimport os\nimport pwd\nimport time\n\nfrom cobbler import remote\nfrom cobbler import utils\nfrom cobbler.api import CobblerAPI\n\n\ndef core(cobbler_api: CobblerAPI):\n \"\"\"\n Starts Cobbler.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n \"\"\"\n settings = cobbler_api.settings()\n xmlrpc_port = settings.xmlrpc_port\n\n regen_ss_file()\n do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)\n\n\ndef regen_ss_file():\n \"\"\"\n This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been\n cleared by Kerberos.\n \"\"\"\n ssfile = \"/var/lib/cobbler/web.ss\"\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n\n with open(ssfile, 'wb', 0o660) as fd:\n fd.write(binascii.hexlify(data))\n\n http_user = \"apache\"\n family = utils.get_family()\n if family == \"debian\":\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n os.lchown(\"/var/lib/cobbler/web.ss\", pwd.getpwnam(http_user)[2], -1)\n\n\ndef do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n \"\"\"\n This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n :param settings: The Cobbler settings instance which is used for this method.\n :param port: The port where the xmlrpc api should run on.\n \"\"\"\n xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)\n server = remote.CobblerXMLRPCServer(('127.0.0.1', port))\n server.logRequests = 0 # don't print stuff\n xinterface.logger.debug(\"XMLRPC running on %s\" % port)\n server.register_instance(xinterface)\n\n while True:\n try:\n print(\"SERVING!\")\n server.serve_forever()\n except IOError:\n # interrupted? try to serve again\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n core(CobblerAPI())\n", "path": "cobbler/cobblerd.py"}], "after_files": [{"content": "\"\"\"\nCobbler daemon for logging remote syslog traffic during automatic installation\n\nCopyright 2007-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport binascii\nimport os\nimport pwd\nimport time\n\nfrom cobbler import remote\nfrom cobbler import utils\nfrom cobbler.api import CobblerAPI\n\n\ndef core(cobbler_api: CobblerAPI):\n \"\"\"\n Starts Cobbler.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n \"\"\"\n settings = cobbler_api.settings()\n xmlrpc_port = settings.xmlrpc_port\n\n regen_ss_file()\n do_xmlrpc_rw(cobbler_api, settings, xmlrpc_port)\n\n\ndef regen_ss_file():\n \"\"\"\n This is only used for Kerberos auth at the moment. It identifies XMLRPC requests from Apache that have already been\n cleared by Kerberos.\n \"\"\"\n ssfile = \"/var/lib/cobbler/web.ss\"\n with open(\"/dev/urandom\", 'rb') as fd:\n data = fd.read(512)\n\n with open(ssfile, 'wb', 0o640) as fd:\n fd.write(binascii.hexlify(data))\n\n http_user = \"apache\"\n family = utils.get_family()\n if family == \"debian\":\n http_user = \"www-data\"\n elif family == \"suse\":\n http_user = \"wwwrun\"\n os.lchown(ssfile, 0, pwd.getpwnam(http_user)[3])\n\n\ndef do_xmlrpc_rw(cobbler_api: CobblerAPI, settings, port):\n \"\"\"\n This trys to bring up the Cobbler xmlrpc_api and restart it if it fails.\n\n :param cobbler_api: The cobbler_api instance which is used for this method.\n :param settings: The Cobbler settings instance which is used for this method.\n :param port: The port where the xmlrpc api should run on.\n \"\"\"\n xinterface = remote.ProxiedXMLRPCInterface(cobbler_api, remote.CobblerXMLRPCInterface)\n server = remote.CobblerXMLRPCServer(('127.0.0.1', port))\n server.logRequests = 0 # don't print stuff\n xinterface.logger.debug(\"XMLRPC running on %s\" % port)\n server.register_instance(xinterface)\n\n while True:\n try:\n print(\"SERVING!\")\n server.serve_forever()\n except IOError:\n # interrupted? try to serve again\n time.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n core(CobblerAPI())\n", "path": "cobbler/cobblerd.py"}]} | 1,340 | 244 |
gh_patches_debug_25206 | rasdani/github-patches | git_diff | ansible__ansible-modules-extras-1049 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
About option of win_package
I found two issue.
I think Product-ID parameter is not "product_id" , is it "productid"?
Also , it seems the required columns is "yes".
```
fatal: [10.1.1.6]: FAILED! => {"changed": false, "failed": true, "msg": "Missing required argument: productid"
```
Therefore , it take a mistake about "ProductId" below an example on document of win_package:
```
# Playbook example
- name: Install the vc thingy
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `windows/win_package.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2014, Trond Hindenes <[email protected]>, and others
5 #
6 # This file is part of Ansible
7 #
8 # Ansible is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # Ansible is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the GNU General Public License
19 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
20
21 # this is a windows documentation stub. actual code lives in the .ps1
22 # file of the same name
23
24 DOCUMENTATION = '''
25 ---
26 module: win_package
27 version_added: "1.7"
28 short_description: Installs/Uninstalls a installable package, either from local file system or url
29 description:
30 - Installs or uninstalls a package
31 options:
32 path:
33 description:
34 - Location of the package to be installed (either on file system, network share or url)
35 required: true
36 default: null
37 aliases: []
38 name:
39 description:
40 - name of the package. Just for logging reasons, will use the value of path if name isn't specified
41 required: false
42 default: null
43 aliases: []
44 product_id:
45 description:
46 - product id of the installed package (used for checking if already installed)
47 required: false
48 default: null
49 aliases: []
50 arguments:
51 description:
52 - Any arguments the installer needs
53 default: null
54 aliases: []
55 state:
56 description:
57 - Install or Uninstall
58 choices:
59 - present
60 - absent
61 default: present
62 aliases: [ensure]
63 user_name:
64 description:
65 - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
66 default: null
67 aliases: []
68 user_password:
69 description:
70 - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
71 default: null
72 aliases: []
73 author: Trond Hindenes
74 '''
75
76 EXAMPLES = '''
77 # Playbook example
78 - name: Install the vc thingy
79 win_package:
80 name="Microsoft Visual C thingy"
81 path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
82 ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
83 Arguments="/install /passive /norestart"
84
85
86 '''
87
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/windows/win_package.py b/windows/win_package.py
--- a/windows/win_package.py
+++ b/windows/win_package.py
@@ -25,6 +25,7 @@
---
module: win_package
version_added: "1.7"
+author: Trond Hindenes
short_description: Installs/Uninstalls a installable package, either from local file system or url
description:
- Installs or uninstalls a package
@@ -44,9 +45,9 @@
product_id:
description:
- product id of the installed package (used for checking if already installed)
- required: false
+ required: true
default: null
- aliases: []
+ aliases: [productid]
arguments:
description:
- Any arguments the installer needs
@@ -79,7 +80,7 @@
win_package:
name="Microsoft Visual C thingy"
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
- ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
+ Product_Id="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
Arguments="/install /passive /norestart"
| {"golden_diff": "diff --git a/windows/win_package.py b/windows/win_package.py\n--- a/windows/win_package.py\n+++ b/windows/win_package.py\n@@ -25,6 +25,7 @@\n ---\n module: win_package\n version_added: \"1.7\"\n+author: Trond Hindenes\n short_description: Installs/Uninstalls a installable package, either from local file system or url\n description:\n - Installs or uninstalls a package\n@@ -44,9 +45,9 @@\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n- required: false\n+ required: true\n default: null\n- aliases: []\n+ aliases: [productid]\n arguments:\n description:\n - Any arguments the installer needs\n@@ -79,7 +80,7 @@\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n- ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n+ Product_Id=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n", "issue": "About option of win_package\nI found two issue.\n\nI think Product-ID parameter is not \"product_id\" , is it \"productid\"?\nAlso , it seems the required columns is \"yes\".\n\n```\nfatal: [10.1.1.6]: FAILED! => {\"changed\": false, \"failed\": true, \"msg\": \"Missing required argument: productid\"\n```\n\nTherefore , it take a mistake about \"ProductId\" below an example on document of win_package:\n\n```\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Trond Hindenes <[email protected]>, and others\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# this is a windows documentation stub. actual code lives in the .ps1\n# file of the same name\n\nDOCUMENTATION = '''\n---\nmodule: win_package\nversion_added: \"1.7\"\nshort_description: Installs/Uninstalls a installable package, either from local file system or url\ndescription:\n - Installs or uninstalls a package\noptions:\n path:\n description:\n - Location of the package to be installed (either on file system, network share or url)\n required: true\n default: null\n aliases: []\n name:\n description:\n - name of the package. Just for logging reasons, will use the value of path if name isn't specified\n required: false\n default: null\n aliases: []\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n required: false\n default: null\n aliases: []\n arguments:\n description:\n - Any arguments the installer needs\n default: null\n aliases: []\n state:\n description:\n - Install or Uninstall\n choices:\n - present\n - absent\n default: present\n aliases: [ensure]\n user_name:\n description:\n - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.\n default: null\n aliases: []\n user_password:\n description:\n - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.\n default: null\n aliases: []\nauthor: Trond Hindenes\n'''\n\nEXAMPLES = '''\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n ProductId=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n\n\n'''\n\n", "path": "windows/win_package.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2014, Trond Hindenes <[email protected]>, and others\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# this is a windows documentation stub. actual code lives in the .ps1\n# file of the same name\n\nDOCUMENTATION = '''\n---\nmodule: win_package\nversion_added: \"1.7\"\nauthor: Trond Hindenes\nshort_description: Installs/Uninstalls a installable package, either from local file system or url\ndescription:\n - Installs or uninstalls a package\noptions:\n path:\n description:\n - Location of the package to be installed (either on file system, network share or url)\n required: true\n default: null\n aliases: []\n name:\n description:\n - name of the package. Just for logging reasons, will use the value of path if name isn't specified\n required: false\n default: null\n aliases: []\n product_id:\n description:\n - product id of the installed package (used for checking if already installed)\n required: true\n default: null\n aliases: [productid]\n arguments:\n description:\n - Any arguments the installer needs\n default: null\n aliases: []\n state:\n description:\n - Install or Uninstall\n choices:\n - present\n - absent\n default: present\n aliases: [ensure]\n user_name:\n description:\n - Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.\n default: null\n aliases: []\n user_password:\n description:\n - Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.\n default: null\n aliases: []\nauthor: Trond Hindenes\n'''\n\nEXAMPLES = '''\n# Playbook example\n - name: Install the vc thingy\n win_package:\n name=\"Microsoft Visual C thingy\"\n path=\"http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe\"\n Product_Id=\"{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}\"\n Arguments=\"/install /passive /norestart\"\n\n\n'''\n\n", "path": "windows/win_package.py"}]} | 1,388 | 345 |
gh_patches_debug_2029 | rasdani/github-patches | git_diff | netbox-community__netbox-15568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in Tag model
### Deployment Type
Self-hosted
### NetBox Version
v3.7.4
### Python Version
3.8
### Steps to Reproduce
Typo in help_text where "this" is mistakenly repeated.
https://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40
### Expected Behavior
The object type(s) to which this tag can be applied.
### Observed Behavior
The object type(s) to which this this tag can be applied.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/extras/models/tags.py`
Content:
```
1 from django.conf import settings
2 from django.db import models
3 from django.urls import reverse
4 from django.utils.text import slugify
5 from django.utils.translation import gettext_lazy as _
6 from taggit.models import TagBase, GenericTaggedItemBase
7
8 from netbox.models import ChangeLoggedModel
9 from netbox.models.features import CloningMixin, ExportTemplatesMixin
10 from utilities.choices import ColorChoices
11 from utilities.fields import ColorField
12
13 __all__ = (
14 'Tag',
15 'TaggedItem',
16 )
17
18
19 #
20 # Tags
21 #
22
23 class Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):
24 id = models.BigAutoField(
25 primary_key=True
26 )
27 color = ColorField(
28 verbose_name=_('color'),
29 default=ColorChoices.COLOR_GREY
30 )
31 description = models.CharField(
32 verbose_name=_('description'),
33 max_length=200,
34 blank=True,
35 )
36 object_types = models.ManyToManyField(
37 to='contenttypes.ContentType',
38 related_name='+',
39 blank=True,
40 help_text=_("The object type(s) to which this this tag can be applied.")
41 )
42
43 clone_fields = (
44 'color', 'description', 'object_types',
45 )
46
47 class Meta:
48 ordering = ['name']
49 verbose_name = _('tag')
50 verbose_name_plural = _('tags')
51
52 def get_absolute_url(self):
53 return reverse('extras:tag', args=[self.pk])
54
55 @property
56 def docs_url(self):
57 return f'{settings.STATIC_URL}docs/models/extras/tag/'
58
59 def slugify(self, tag, i=None):
60 # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)
61 slug = slugify(tag, allow_unicode=True)
62 if i is not None:
63 slug += "_%d" % i
64 return slug
65
66
67 class TaggedItem(GenericTaggedItemBase):
68 tag = models.ForeignKey(
69 to=Tag,
70 related_name="%(app_label)s_%(class)s_items",
71 on_delete=models.CASCADE
72 )
73
74 _netbox_private = True
75
76 class Meta:
77 indexes = [models.Index(fields=["content_type", "object_id"])]
78 verbose_name = _('tagged item')
79 verbose_name_plural = _('tagged items')
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py
--- a/netbox/extras/models/tags.py
+++ b/netbox/extras/models/tags.py
@@ -37,7 +37,7 @@
to='contenttypes.ContentType',
related_name='+',
blank=True,
- help_text=_("The object type(s) to which this this tag can be applied.")
+ help_text=_("The object type(s) to which this tag can be applied.")
)
clone_fields = (
| {"golden_diff": "diff --git a/netbox/extras/models/tags.py b/netbox/extras/models/tags.py\n--- a/netbox/extras/models/tags.py\n+++ b/netbox/extras/models/tags.py\n@@ -37,7 +37,7 @@\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n- help_text=_(\"The object type(s) to which this this tag can be applied.\")\n+ help_text=_(\"The object type(s) to which this tag can be applied.\")\n )\n \n clone_fields = (\n", "issue": "Typo in Tag model\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.4\n\n### Python Version\n\n3.8\n\n### Steps to Reproduce\n\nTypo in help_text where \"this\" is mistakenly repeated.\r\n\r\nhttps://github.com/netbox-community/netbox/blob/69c0aac1051015660133b2ae3c86607dabd8084b/netbox/extras/models/tags.py#L40\n\n### Expected Behavior\n\nThe object type(s) to which this tag can be applied.\n\n### Observed Behavior\n\nThe object type(s) to which this this tag can be applied.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.models import TagBase, GenericTaggedItemBase\n\nfrom netbox.models import ChangeLoggedModel\nfrom netbox.models.features import CloningMixin, ExportTemplatesMixin\nfrom utilities.choices import ColorChoices\nfrom utilities.fields import ColorField\n\n__all__ = (\n 'Tag',\n 'TaggedItem',\n)\n\n\n#\n# Tags\n#\n\nclass Tag(CloningMixin, ExportTemplatesMixin, ChangeLoggedModel, TagBase):\n id = models.BigAutoField(\n primary_key=True\n )\n color = ColorField(\n verbose_name=_('color'),\n default=ColorChoices.COLOR_GREY\n )\n description = models.CharField(\n verbose_name=_('description'),\n max_length=200,\n blank=True,\n )\n object_types = models.ManyToManyField(\n to='contenttypes.ContentType',\n related_name='+',\n blank=True,\n help_text=_(\"The object type(s) to which this tag can be applied.\")\n )\n\n clone_fields = (\n 'color', 'description', 'object_types',\n )\n\n class Meta:\n ordering = ['name']\n verbose_name = _('tag')\n verbose_name_plural = _('tags')\n\n def get_absolute_url(self):\n return reverse('extras:tag', args=[self.pk])\n\n @property\n def docs_url(self):\n return f'{settings.STATIC_URL}docs/models/extras/tag/'\n\n def slugify(self, tag, i=None):\n # Allow Unicode in Tag slugs (avoids empty slugs for Tags with all-Unicode names)\n slug = slugify(tag, allow_unicode=True)\n if i is not None:\n slug += \"_%d\" % i\n return slug\n\n\nclass TaggedItem(GenericTaggedItemBase):\n tag = models.ForeignKey(\n to=Tag,\n related_name=\"%(app_label)s_%(class)s_items\",\n on_delete=models.CASCADE\n )\n\n _netbox_private = True\n\n class Meta:\n indexes = [models.Index(fields=[\"content_type\", \"object_id\"])]\n verbose_name = _('tagged item')\n verbose_name_plural = _('tagged items')\n", "path": "netbox/extras/models/tags.py"}]} | 1,049 | 113 |
gh_patches_debug_2293 | rasdani/github-patches | git_diff | inventree__InvenTree-4285 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Increase worker-timeout to account for install times
I might have another instance of the same worker-timeout-loop during startup to add to the issue. My docker production installation at InvenTree:latest is running on a Raspberry Pi 3B+.
The loop occured after I had added the `inventree-brother-plugin` to `plugins.txt` - the initial plugin installation took too long during startup so that the worker processes timed out and were constantly restartet.
My "solution" was to increase the gunicorn timeout variable in the `.env` file to
```
# Options for gunicorn server
INVENTREE_GUNICORN_TIMEOUT=60
```
but maybe actions like pip installs should somehow generally not count against the worker timeout? (I'm not sure about the technical internals on this one at the moment...)
_Originally posted by @simonkuehling in https://github.com/inventree/InvenTree/issues/4180#issuecomment-1410348943_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/gunicorn.conf.py`
Content:
```
1 """Gunicorn configuration for InvenTree."""
2
3 import logging
4 import multiprocessing
5 import os
6
7 # Logger configuration
8 logger = logging.getLogger('inventree')
9 accesslog = '-'
10 errorlog = '-'
11 loglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()
12 capture_output = True
13
14 # Worker configuration
15 # TODO: Implement support for gevent
16 # worker_class = 'gevent' # Allow multi-threading support
17 worker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)
18 threads = 4
19
20
21 # Worker timeout (default = 30 seconds)
22 timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)
23
24 # Number of worker processes
25 workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)
26
27 if workers is not None:
28 try:
29 workers = int(workers)
30 except ValueError:
31 workers = None
32
33 if workers is None:
34 workers = multiprocessing.cpu_count() * 2 + 1
35
36 logger.info(f"Starting gunicorn server with {workers} workers")
37
38 max_requests = 1000
39 max_requests_jitter = 50
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/gunicorn.conf.py b/docker/gunicorn.conf.py
--- a/docker/gunicorn.conf.py
+++ b/docker/gunicorn.conf.py
@@ -18,8 +18,8 @@
threads = 4
-# Worker timeout (default = 30 seconds)
-timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)
+# Worker timeout (default = 90 seconds)
+timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)
# Number of worker processes
workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)
| {"golden_diff": "diff --git a/docker/gunicorn.conf.py b/docker/gunicorn.conf.py\n--- a/docker/gunicorn.conf.py\n+++ b/docker/gunicorn.conf.py\n@@ -18,8 +18,8 @@\n threads = 4\n \n \n-# Worker timeout (default = 30 seconds)\n-timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)\n+# Worker timeout (default = 90 seconds)\n+timeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)\n \n # Number of worker processes\n workers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n", "issue": "[BUG] Increase worker-timeout to account for install times\n I might have another instance of the same worker-timeout-loop during startup to add to the issue. My docker production installation at InvenTree:latest is running on a Raspberry Pi 3B+.\r\nThe loop occured after I had added the `inventree-brother-plugin` to `plugins.txt` - the initial plugin installation took too long during startup so that the worker processes timed out and were constantly restartet.\r\n\r\nMy \"solution\" was to increase the gunicorn timeout variable in the `.env` file to\r\n```\r\n# Options for gunicorn server\r\nINVENTREE_GUNICORN_TIMEOUT=60\r\n```\r\nbut maybe actions like pip installs should somehow generally not count against the worker timeout? (I'm not sure about the technical internals on this one at the moment...)\r\n\r\n_Originally posted by @simonkuehling in https://github.com/inventree/InvenTree/issues/4180#issuecomment-1410348943_\r\n \n", "before_files": [{"content": "\"\"\"Gunicorn configuration for InvenTree.\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\n\n# Logger configuration\nlogger = logging.getLogger('inventree')\naccesslog = '-'\nerrorlog = '-'\nloglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()\ncapture_output = True\n\n# Worker configuration\n# TODO: Implement support for gevent\n# worker_class = 'gevent' # Allow multi-threading support\nworker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)\nthreads = 4\n\n\n# Worker timeout (default = 30 seconds)\ntimeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 30)\n\n# Number of worker processes\nworkers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n\nif workers is not None:\n try:\n workers = int(workers)\n except ValueError:\n workers = None\n\nif workers is None:\n workers = multiprocessing.cpu_count() * 2 + 1\n\nlogger.info(f\"Starting gunicorn server with {workers} workers\")\n\nmax_requests = 1000\nmax_requests_jitter = 50\n", "path": "docker/gunicorn.conf.py"}], "after_files": [{"content": "\"\"\"Gunicorn configuration for InvenTree.\"\"\"\n\nimport logging\nimport multiprocessing\nimport os\n\n# Logger configuration\nlogger = logging.getLogger('inventree')\naccesslog = '-'\nerrorlog = '-'\nloglevel = os.environ.get('INVENTREE_LOG_LEVEL', 'warning').lower()\ncapture_output = True\n\n# Worker configuration\n# TODO: Implement support for gevent\n# worker_class = 'gevent' # Allow multi-threading support\nworker_tmp_dir = '/dev/shm' # Write temp file to RAM (faster)\nthreads = 4\n\n\n# Worker timeout (default = 90 seconds)\ntimeout = os.environ.get('INVENTREE_GUNICORN_TIMEOUT', 90)\n\n# Number of worker processes\nworkers = os.environ.get('INVENTREE_GUNICORN_WORKERS', None)\n\nif workers is not None:\n try:\n workers = int(workers)\n except ValueError:\n workers = None\n\nif workers is None:\n workers = multiprocessing.cpu_count() * 2 + 1\n\nlogger.info(f\"Starting gunicorn server with {workers} workers\")\n\nmax_requests = 1000\nmax_requests_jitter = 50\n", "path": "docker/gunicorn.conf.py"}]} | 809 | 138 |
gh_patches_debug_35648 | rasdani/github-patches | git_diff | searxng__searxng-2747 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[FEATURE REQUEST] language filtering and safe search with odysee
**Is your feature request related to a problem? Please describe.**
No
**Describe the solution you'd like**
We can use, for example, language=de in the search URL. en, de-DE, and en-US also seem to work. There is no list of supported languages afaik, we just need to try things out one by one.
for safe search Moderate/Strict we should use nsfw=false in the URL
**Additional context**
The information that you need for this is here: https://github.com/searx/searx/issues/2504
----
Related
- https://github.com/searxng/searxng/pull/2656
- https://github.com/searxng/searxng/issues/590
- [lbr command line](https://gitlab.com/gardenappl/lbt/-/blob/main/lbt?ref_type=heads)
- [LBRY SDK ](https://github.com/lbryio/lbry-sdk/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/odysee.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Odysee_ is a decentralised video hosting platform.
4
5 .. _Odysee: https://github.com/OdyseeTeam/odysee-frontend
6 """
7
8 import time
9 from urllib.parse import urlencode
10 from datetime import datetime
11
12 # Engine metadata
13 about = {
14 "website": "https://odysee.com/",
15 "wikidata_id": "Q102046570",
16 "official_api_documentation": None,
17 "use_official_api": False,
18 "require_api_key": False,
19 "results": "JSON",
20 }
21
22 # Engine configuration
23 paging = True
24 results_per_page = 20
25 categories = ['videos']
26
27 # Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)
28 base_url = "https://lighthouse.odysee.tv/search"
29
30
31 def request(query, params):
32 start_index = (params["pageno"] - 1) * results_per_page
33 query_params = {
34 "s": query,
35 "size": results_per_page,
36 "from": start_index,
37 "include": "channel,thumbnail_url,title,description,duration,release_time",
38 "mediaType": "video",
39 }
40
41 params["url"] = f"{base_url}?{urlencode(query_params)}"
42 return params
43
44
45 # Format the video duration
46 def format_duration(duration):
47 seconds = int(duration)
48 length = time.gmtime(seconds)
49 if length.tm_hour:
50 return time.strftime("%H:%M:%S", length)
51 return time.strftime("%M:%S", length)
52
53
54 def response(resp):
55 data = resp.json()
56 results = []
57
58 for item in data:
59 name = item["name"]
60 claim_id = item["claimId"]
61 title = item["title"]
62 thumbnail_url = item["thumbnail_url"]
63 description = item["description"] or ""
64 channel = item["channel"]
65 release_time = item["release_time"]
66 duration = item["duration"]
67
68 release_date = datetime.strptime(release_time.split("T")[0], "%Y-%m-%d")
69 formatted_date = datetime.utcfromtimestamp(release_date.timestamp())
70
71 url = f"https://odysee.com/{name}:{claim_id}"
72 iframe_url = f"https://odysee.com/$/embed/{name}:{claim_id}"
73 odysee_thumbnail = f"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}"
74 formatted_duration = format_duration(duration)
75
76 results.append(
77 {
78 "title": title,
79 "url": url,
80 "content": description,
81 "author": channel,
82 "publishedDate": formatted_date,
83 "length": formatted_duration,
84 "thumbnail": odysee_thumbnail,
85 "iframe_src": iframe_url,
86 "template": "videos.html",
87 }
88 )
89
90 return results
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/odysee.py b/searx/engines/odysee.py
--- a/searx/engines/odysee.py
+++ b/searx/engines/odysee.py
@@ -9,6 +9,14 @@
from urllib.parse import urlencode
from datetime import datetime
+import babel
+
+from searx.network import get
+from searx.locales import language_tag
+from searx.enginelib.traits import EngineTraits
+
+traits: EngineTraits
+
# Engine metadata
about = {
"website": "https://odysee.com/",
@@ -21,6 +29,7 @@
# Engine configuration
paging = True
+time_range_support = True
results_per_page = 20
categories = ['videos']
@@ -29,6 +38,13 @@
def request(query, params):
+ time_range_dict = {
+ "day": "today",
+ "week": "thisweek",
+ "month": "thismonth",
+ "year": "thisyear",
+ }
+
start_index = (params["pageno"] - 1) * results_per_page
query_params = {
"s": query,
@@ -38,6 +54,13 @@
"mediaType": "video",
}
+ lang = traits.get_language(params['searxng_locale'], None)
+ if lang is not None:
+ query_params['language'] = lang
+
+ if params['time_range'] in time_range_dict:
+ query_params['time_filter'] = time_range_dict[params['time_range']]
+
params["url"] = f"{base_url}?{urlencode(query_params)}"
return params
@@ -88,3 +111,35 @@
)
return results
+
+
+def fetch_traits(engine_traits: EngineTraits):
+ """
+ Fetch languages from Odysee's source code.
+ """
+
+ resp = get(
+ 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long
+ timeout=60,
+ )
+
+ if not resp.ok:
+ print("ERROR: can't determine languages from Odysee")
+ return
+
+ for line in resp.text.split("\n")[1:-4]:
+ lang_tag = line.strip().split(": ")[0].replace("'", "")
+
+ try:
+ sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep="-"))
+ except babel.UnknownLocaleError:
+ print("ERROR: %s is unknown by babel" % lang_tag)
+ continue
+
+ conflict = engine_traits.languages.get(sxng_tag)
+ if conflict:
+ if conflict != lang_tag:
+ print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, lang_tag))
+ continue
+
+ engine_traits.languages[sxng_tag] = lang_tag
| {"golden_diff": "diff --git a/searx/engines/odysee.py b/searx/engines/odysee.py\n--- a/searx/engines/odysee.py\n+++ b/searx/engines/odysee.py\n@@ -9,6 +9,14 @@\n from urllib.parse import urlencode\n from datetime import datetime\n \n+import babel\n+\n+from searx.network import get\n+from searx.locales import language_tag\n+from searx.enginelib.traits import EngineTraits\n+\n+traits: EngineTraits\n+\n # Engine metadata\n about = {\n \"website\": \"https://odysee.com/\",\n@@ -21,6 +29,7 @@\n \n # Engine configuration\n paging = True\n+time_range_support = True\n results_per_page = 20\n categories = ['videos']\n \n@@ -29,6 +38,13 @@\n \n \n def request(query, params):\n+ time_range_dict = {\n+ \"day\": \"today\",\n+ \"week\": \"thisweek\",\n+ \"month\": \"thismonth\",\n+ \"year\": \"thisyear\",\n+ }\n+\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n@@ -38,6 +54,13 @@\n \"mediaType\": \"video\",\n }\n \n+ lang = traits.get_language(params['searxng_locale'], None)\n+ if lang is not None:\n+ query_params['language'] = lang\n+\n+ if params['time_range'] in time_range_dict:\n+ query_params['time_filter'] = time_range_dict[params['time_range']]\n+\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n \n@@ -88,3 +111,35 @@\n )\n \n return results\n+\n+\n+def fetch_traits(engine_traits: EngineTraits):\n+ \"\"\"\n+ Fetch languages from Odysee's source code.\n+ \"\"\"\n+\n+ resp = get(\n+ 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long\n+ timeout=60,\n+ )\n+\n+ if not resp.ok:\n+ print(\"ERROR: can't determine languages from Odysee\")\n+ return\n+\n+ for line in resp.text.split(\"\\n\")[1:-4]:\n+ lang_tag = line.strip().split(\": \")[0].replace(\"'\", \"\")\n+\n+ try:\n+ sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep=\"-\"))\n+ except babel.UnknownLocaleError:\n+ print(\"ERROR: %s is unknown by babel\" % lang_tag)\n+ continue\n+\n+ conflict = engine_traits.languages.get(sxng_tag)\n+ if conflict:\n+ if conflict != lang_tag:\n+ print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, lang_tag))\n+ continue\n+\n+ engine_traits.languages[sxng_tag] = lang_tag\n", "issue": "[FEATURE REQUEST] language filtering and safe search with odysee\n**Is your feature request related to a problem? Please describe.**\r\nNo\r\n\r\n**Describe the solution you'd like**\r\nWe can use, for example, language=de in the search URL. en, de-DE, and en-US also seem to work. There is no list of supported languages afaik, we just need to try things out one by one.\r\n\r\nfor safe search Moderate/Strict we should use nsfw=false in the URL\r\n\r\n**Additional context**\r\nThe information that you need for this is here: https://github.com/searx/searx/issues/2504\r\n\r\n\r\n----\r\nRelated\r\n\r\n- https://github.com/searxng/searxng/pull/2656\r\n- https://github.com/searxng/searxng/issues/590\r\n- [lbr command line](https://gitlab.com/gardenappl/lbt/-/blob/main/lbt?ref_type=heads)\r\n- [LBRY SDK ](https://github.com/lbryio/lbry-sdk/)\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Odysee_ is a decentralised video hosting platform.\n\n.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend\n\"\"\"\n\nimport time\nfrom urllib.parse import urlencode\nfrom datetime import datetime\n\n# Engine metadata\nabout = {\n \"website\": \"https://odysee.com/\",\n \"wikidata_id\": \"Q102046570\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\nresults_per_page = 20\ncategories = ['videos']\n\n# Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)\nbase_url = \"https://lighthouse.odysee.tv/search\"\n\n\ndef request(query, params):\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n \"size\": results_per_page,\n \"from\": start_index,\n \"include\": \"channel,thumbnail_url,title,description,duration,release_time\",\n \"mediaType\": \"video\",\n }\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n seconds = int(duration)\n length = time.gmtime(seconds)\n if length.tm_hour:\n return time.strftime(\"%H:%M:%S\", length)\n return time.strftime(\"%M:%S\", length)\n\n\ndef response(resp):\n data = resp.json()\n results = []\n\n for item in data:\n name = item[\"name\"]\n claim_id = item[\"claimId\"]\n title = item[\"title\"]\n thumbnail_url = item[\"thumbnail_url\"]\n description = item[\"description\"] or \"\"\n channel = item[\"channel\"]\n release_time = item[\"release_time\"]\n duration = item[\"duration\"]\n\n release_date = datetime.strptime(release_time.split(\"T\")[0], \"%Y-%m-%d\")\n formatted_date = datetime.utcfromtimestamp(release_date.timestamp())\n\n url = f\"https://odysee.com/{name}:{claim_id}\"\n iframe_url = f\"https://odysee.com/$/embed/{name}:{claim_id}\"\n odysee_thumbnail = f\"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}\"\n formatted_duration = format_duration(duration)\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": channel,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": odysee_thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n", "path": "searx/engines/odysee.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Odysee_ is a decentralised video hosting platform.\n\n.. _Odysee: https://github.com/OdyseeTeam/odysee-frontend\n\"\"\"\n\nimport time\nfrom urllib.parse import urlencode\nfrom datetime import datetime\n\nimport babel\n\nfrom searx.network import get\nfrom searx.locales import language_tag\nfrom searx.enginelib.traits import EngineTraits\n\ntraits: EngineTraits\n\n# Engine metadata\nabout = {\n \"website\": \"https://odysee.com/\",\n \"wikidata_id\": \"Q102046570\",\n \"official_api_documentation\": None,\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"JSON\",\n}\n\n# Engine configuration\npaging = True\ntime_range_support = True\nresults_per_page = 20\ncategories = ['videos']\n\n# Search URL (Note: lighthouse.lbry.com/search works too, and may be faster at times)\nbase_url = \"https://lighthouse.odysee.tv/search\"\n\n\ndef request(query, params):\n time_range_dict = {\n \"day\": \"today\",\n \"week\": \"thisweek\",\n \"month\": \"thismonth\",\n \"year\": \"thisyear\",\n }\n\n start_index = (params[\"pageno\"] - 1) * results_per_page\n query_params = {\n \"s\": query,\n \"size\": results_per_page,\n \"from\": start_index,\n \"include\": \"channel,thumbnail_url,title,description,duration,release_time\",\n \"mediaType\": \"video\",\n }\n\n lang = traits.get_language(params['searxng_locale'], None)\n if lang is not None:\n query_params['language'] = lang\n\n if params['time_range'] in time_range_dict:\n query_params['time_filter'] = time_range_dict[params['time_range']]\n\n params[\"url\"] = f\"{base_url}?{urlencode(query_params)}\"\n return params\n\n\n# Format the video duration\ndef format_duration(duration):\n seconds = int(duration)\n length = time.gmtime(seconds)\n if length.tm_hour:\n return time.strftime(\"%H:%M:%S\", length)\n return time.strftime(\"%M:%S\", length)\n\n\ndef response(resp):\n data = resp.json()\n results = []\n\n for item in data:\n name = item[\"name\"]\n claim_id = item[\"claimId\"]\n title = item[\"title\"]\n thumbnail_url = item[\"thumbnail_url\"]\n description = item[\"description\"] or \"\"\n channel = item[\"channel\"]\n release_time = item[\"release_time\"]\n duration = item[\"duration\"]\n\n release_date = datetime.strptime(release_time.split(\"T\")[0], \"%Y-%m-%d\")\n formatted_date = datetime.utcfromtimestamp(release_date.timestamp())\n\n url = f\"https://odysee.com/{name}:{claim_id}\"\n iframe_url = f\"https://odysee.com/$/embed/{name}:{claim_id}\"\n odysee_thumbnail = f\"https://thumbnails.odycdn.com/optimize/s:390:0/quality:85/plain/{thumbnail_url}\"\n formatted_duration = format_duration(duration)\n\n results.append(\n {\n \"title\": title,\n \"url\": url,\n \"content\": description,\n \"author\": channel,\n \"publishedDate\": formatted_date,\n \"length\": formatted_duration,\n \"thumbnail\": odysee_thumbnail,\n \"iframe_src\": iframe_url,\n \"template\": \"videos.html\",\n }\n )\n\n return results\n\n\ndef fetch_traits(engine_traits: EngineTraits):\n \"\"\"\n Fetch languages from Odysee's source code.\n \"\"\"\n\n resp = get(\n 'https://raw.githubusercontent.com/OdyseeTeam/odysee-frontend/master/ui/constants/supported_browser_languages.js', # pylint: disable=line-too-long\n timeout=60,\n )\n\n if not resp.ok:\n print(\"ERROR: can't determine languages from Odysee\")\n return\n\n for line in resp.text.split(\"\\n\")[1:-4]:\n lang_tag = line.strip().split(\": \")[0].replace(\"'\", \"\")\n\n try:\n sxng_tag = language_tag(babel.Locale.parse(lang_tag, sep=\"-\"))\n except babel.UnknownLocaleError:\n print(\"ERROR: %s is unknown by babel\" % lang_tag)\n continue\n\n conflict = engine_traits.languages.get(sxng_tag)\n if conflict:\n if conflict != lang_tag:\n print(\"CONFLICT: babel %s --> %s, %s\" % (sxng_tag, conflict, lang_tag))\n continue\n\n engine_traits.languages[sxng_tag] = lang_tag\n", "path": "searx/engines/odysee.py"}]} | 1,306 | 677 |
gh_patches_debug_22417 | rasdani/github-patches | git_diff | pre-commit__pre-commit-537 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unicode error: python 2 + merge conflict + non-ascii commit message
The important part of the stack:
```
File "...python2.7/site-packages/pre_commit/commands/run.py", line 52, in get_filenames
return getter(include_expr, exclude_expr)
File "...python2.7/site-packages/pre_commit/util.py", line 46, in wrapper
ret = wrapper._cache[key] = func(*args)
File "...python2.7/site-packages/pre_commit/git.py", line 98, in wrapper
for filename in all_file_list_strategy()
File "...python2.7/site-packages/pre_commit/util.py", line 46, in wrapper
ret = wrapper._cache[key] = func(*args)
File "...python2.7/site-packages/pre_commit/git.py", line 64, in get_conflicted_files
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
File "...python2.7/site-packages/pre_commit/git.py", line 54, in parse_merge_msg_for_conflicts
if line.startswith(('\t', '#\t'))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 37: ordinal not in range(128)
```
An easy fix: https://github.com/pre-commit/pre-commit/blob/e3b14c35f782ed464e3f96b44e8509048187689f/pre_commit/git.py#L63
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import functools
4 import logging
5 import os.path
6 import re
7 import sys
8
9 from pre_commit.errors import FatalError
10 from pre_commit.util import CalledProcessError
11 from pre_commit.util import cmd_output
12 from pre_commit.util import memoize_by_cwd
13
14
15 logger = logging.getLogger('pre_commit')
16
17
18 def get_root():
19 try:
20 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
21 except CalledProcessError:
22 raise FatalError(
23 'git failed. Is it installed, and are you in a Git repository '
24 'directory?'
25 )
26
27
28 def get_git_dir(git_root):
29 return os.path.normpath(os.path.join(
30 git_root,
31 cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),
32 ))
33
34
35 def get_remote_url(git_root):
36 ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
37 return ret.strip()
38
39
40 def is_in_merge_conflict():
41 git_dir = get_git_dir('.')
42 return (
43 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
44 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
45 )
46
47
48 def parse_merge_msg_for_conflicts(merge_msg):
49 # Conflicted files start with tabs
50 return [
51 line.lstrip('#').strip()
52 for line in merge_msg.splitlines()
53 # '#\t' for git 2.4.1
54 if line.startswith(('\t', '#\t'))
55 ]
56
57
58 @memoize_by_cwd
59 def get_conflicted_files():
60 logger.info('Checking merge-conflict files only.')
61 # Need to get the conflicted files from the MERGE_MSG because they could
62 # have resolved the conflict by choosing one side or the other
63 merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
64 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
65
66 # This will get the rest of the changes made after the merge.
67 # If they resolved the merge conflict by choosing a mesh of both sides
68 # this will also include the conflicted files
69 tree_hash = cmd_output('git', 'write-tree')[1].strip()
70 merge_diff_filenames = cmd_output(
71 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',
72 )[1].splitlines()
73 return set(merge_conflict_filenames) | set(merge_diff_filenames)
74
75
76 @memoize_by_cwd
77 def get_staged_files():
78 return cmd_output(
79 'git', 'diff', '--staged', '--name-only',
80 # Everything except for D
81 '--diff-filter=ACMRTUXB'
82 )[1].splitlines()
83
84
85 @memoize_by_cwd
86 def get_all_files():
87 return cmd_output('git', 'ls-files')[1].splitlines()
88
89
90 def get_files_matching(all_file_list_strategy):
91 @functools.wraps(all_file_list_strategy)
92 @memoize_by_cwd
93 def wrapper(include_expr, exclude_expr):
94 include_regex = re.compile(include_expr)
95 exclude_regex = re.compile(exclude_expr)
96 return {
97 filename
98 for filename in all_file_list_strategy()
99 if (
100 include_regex.search(filename) and
101 not exclude_regex.search(filename) and
102 os.path.lexists(filename)
103 )
104 }
105 return wrapper
106
107
108 get_staged_files_matching = get_files_matching(get_staged_files)
109 get_all_files_matching = get_files_matching(get_all_files)
110 get_conflicted_files_matching = get_files_matching(get_conflicted_files)
111
112
113 def check_for_cygwin_mismatch():
114 """See https://github.com/pre-commit/pre-commit/issues/354"""
115 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
116 is_cygwin_python = sys.platform == 'cygwin'
117 toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
118 is_cygwin_git = toplevel.startswith('/')
119
120 if is_cygwin_python ^ is_cygwin_git:
121 exe_type = {True: '(cygwin)', False: '(windows)'}
122 logger.warn(
123 'pre-commit has detected a mix of cygwin python / git\n'
124 'This combination is not supported, it is likely you will '
125 'receive an error later in the program.\n'
126 'Make sure to use cygwin git+python while using cygwin\n'
127 'These can be installed through the cygwin installer.\n'
128 ' - python {}\n'
129 ' - git {}\n'.format(
130 exe_type[is_cygwin_python],
131 exe_type[is_cygwin_git],
132 )
133 )
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -48,10 +48,10 @@
def parse_merge_msg_for_conflicts(merge_msg):
# Conflicted files start with tabs
return [
- line.lstrip('#').strip()
+ line.lstrip(b'#').strip().decode('UTF-8')
for line in merge_msg.splitlines()
# '#\t' for git 2.4.1
- if line.startswith(('\t', '#\t'))
+ if line.startswith((b'\t', b'#\t'))
]
@@ -60,7 +60,7 @@
logger.info('Checking merge-conflict files only.')
# Need to get the conflicted files from the MERGE_MSG because they could
# have resolved the conflict by choosing one side or the other
- merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
# This will get the rest of the changes made after the merge.
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -48,10 +48,10 @@\n def parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n- line.lstrip('#').strip()\n+ line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n- if line.startswith(('\\t', '#\\t'))\n+ if line.startswith((b'\\t', b'#\\t'))\n ]\n \n \n@@ -60,7 +60,7 @@\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n- merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n \n # This will get the rest of the changes made after the merge.\n", "issue": "Unicode error: python 2 + merge conflict + non-ascii commit message\nThe important part of the stack:\r\n\r\n```\r\n File \"...python2.7/site-packages/pre_commit/commands/run.py\", line 52, in get_filenames\r\n return getter(include_expr, exclude_expr)\r\n File \"...python2.7/site-packages/pre_commit/util.py\", line 46, in wrapper\r\n ret = wrapper._cache[key] = func(*args)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 98, in wrapper\r\n for filename in all_file_list_strategy()\r\n File \"...python2.7/site-packages/pre_commit/util.py\", line 46, in wrapper\r\n ret = wrapper._cache[key] = func(*args)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 64, in get_conflicted_files\r\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\r\n File \"...python2.7/site-packages/pre_commit/git.py\", line 54, in parse_merge_msg_for_conflicts\r\n if line.startswith(('\\t', '#\\t'))\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 37: ordinal not in range(128)\r\n```\r\n\r\nAn easy fix: https://github.com/pre-commit/pre-commit/blob/e3b14c35f782ed464e3f96b44e8509048187689f/pre_commit/git.py#L63\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\nimport sys\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return {\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n }\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python],\n exe_type[is_cygwin_git],\n )\n )\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os.path\nimport re\nimport sys\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb').read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output(\n 'git', 'diff', '--staged', '--name-only',\n # Everything except for D\n '--diff-filter=ACMRTUXB'\n )[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return {\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.lexists(filename)\n )\n }\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python],\n exe_type[is_cygwin_git],\n )\n )\n", "path": "pre_commit/git.py"}]} | 1,939 | 277 |
gh_patches_debug_25875 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2292 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid query parameters cause internal server error
```
File "akvo/rest/filters.py", line 61, in filter_queryset
queryset = getattr(queryset, param)(**args_or_kwargs)
```
In the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}.
We should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/filters.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo Reporting is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 import ast
8
9 from django.db.models import Q
10
11 from rest_framework import filters
12
13
14 class RSRGenericFilterBackend(filters.BaseFilterBackend):
15
16 def filter_queryset(self, request, queryset, view):
17 """
18 Return a queryset possibly filtered by query param values.
19 The filter looks for the query param keys filter and exclude
20 For each of these query param the value is evaluated using ast.literal_eval() and used as
21 kwargs in queryset.filter and queryset.exclude respectively.
22
23 Example URLs:
24 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}
25 http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}
26
27 It's also possible to specify models to be included in select_related() and
28 prefetch_related() calls on the queryset, but specifying these in lists of strings as the
29 values for the query sting params select_relates and prefetch_related.
30
31 Example:
32 http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']
33
34 Finally limited support for filtering on multiple arguments using logical OR between
35 those expressions is available. To use this supply two or more query string keywords on the
36 form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q
37 object. All those Q objects created are used in a queryset.filter() call concatenated using
38 the | operator.
39 """
40 def eval_query_value(request, key):
41 """
42 Use ast.literal_eval() to evaluate a query string value as a python data type object
43 :param request: the django request object
44 :param param: the query string param key
45 :return: a python data type object, or None if literal_eval() fails
46 """
47 value = request.QUERY_PARAMS.get(key, None)
48 try:
49 return ast.literal_eval(value)
50 except ValueError:
51 return None
52
53 qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']
54
55 # evaluate each query string param, and apply the queryset method with the same name
56 for param in qs_params:
57 args_or_kwargs = eval_query_value(request, param)
58 if args_or_kwargs:
59 # filter and exclude are called with a dict kwarg, the _related methods with a list
60 if param in ['filter', 'exclude',]:
61 queryset = getattr(queryset, param)(**args_or_kwargs)
62 else:
63 queryset = getattr(queryset, param)(*args_or_kwargs)
64
65 # support for Q expressions, limited to OR-concatenated filtering
66 if request.QUERY_PARAMS.get('q_filter1', None):
67 i = 1
68 q_queries = []
69 while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):
70 query_arg = eval_query_value(request, 'q_filter{}'.format(i))
71 if query_arg:
72 q_queries += [query_arg]
73 i += 1
74
75 q_expr = Q(**q_queries[0])
76 for query in q_queries[1:]:
77 q_expr = q_expr | Q(**query)
78
79 queryset = queryset.filter(q_expr)
80
81 return queryset
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py
--- a/akvo/rest/filters.py
+++ b/akvo/rest/filters.py
@@ -7,8 +7,10 @@
import ast
from django.db.models import Q
+from django.core.exceptions import FieldError
-from rest_framework import filters
+from rest_framework import filters, status
+from rest_framework.exceptions import APIException
class RSRGenericFilterBackend(filters.BaseFilterBackend):
@@ -57,10 +59,14 @@
args_or_kwargs = eval_query_value(request, param)
if args_or_kwargs:
# filter and exclude are called with a dict kwarg, the _related methods with a list
- if param in ['filter', 'exclude',]:
- queryset = getattr(queryset, param)(**args_or_kwargs)
- else:
- queryset = getattr(queryset, param)(*args_or_kwargs)
+ try:
+ if param in ['filter', 'exclude',]:
+ queryset = getattr(queryset, param)(**args_or_kwargs)
+ else:
+ queryset = getattr(queryset, param)(*args_or_kwargs)
+
+ except FieldError as e:
+ raise APIException("Error in request: {message}".format(message=e.message))
# support for Q expressions, limited to OR-concatenated filtering
if request.QUERY_PARAMS.get('q_filter1', None):
| {"golden_diff": "diff --git a/akvo/rest/filters.py b/akvo/rest/filters.py\n--- a/akvo/rest/filters.py\n+++ b/akvo/rest/filters.py\n@@ -7,8 +7,10 @@\n import ast\n \n from django.db.models import Q\n+from django.core.exceptions import FieldError\n \n-from rest_framework import filters\n+from rest_framework import filters, status\n+from rest_framework.exceptions import APIException\n \n \n class RSRGenericFilterBackend(filters.BaseFilterBackend):\n@@ -57,10 +59,14 @@\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n- if param in ['filter', 'exclude',]:\n- queryset = getattr(queryset, param)(**args_or_kwargs)\n- else:\n- queryset = getattr(queryset, param)(*args_or_kwargs)\n+ try:\n+ if param in ['filter', 'exclude',]:\n+ queryset = getattr(queryset, param)(**args_or_kwargs)\n+ else:\n+ queryset = getattr(queryset, param)(*args_or_kwargs)\n+\n+ except FieldError as e:\n+ raise APIException(\"Error in request: {message}\".format(message=e.message))\n \n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n", "issue": "Invalid query parameters cause internal server error\n```\n File \"akvo/rest/filters.py\", line 61, in filter_queryset\n queryset = getattr(queryset, param)(**args_or_kwargs)\n```\n\nIn the new API filtering, you get an internal server error when the query is not correct, e.g.: http://rsr.akvo.org/rest/v1/project/?filter={'incorrect':'code'}. \n\nWe should catch this and return the error message in a dict with detail as key. E.g. `{detail: 'Related Field got invalid lookup: incorrect'}`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport ast\n\nfrom django.db.models import Q\n\nfrom rest_framework import filters\n\n\nclass RSRGenericFilterBackend(filters.BaseFilterBackend):\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a queryset possibly filtered by query param values.\n The filter looks for the query param keys filter and exclude\n For each of these query param the value is evaluated using ast.literal_eval() and used as\n kwargs in queryset.filter and queryset.exclude respectively.\n\n Example URLs:\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}\n\n It's also possible to specify models to be included in select_related() and\n prefetch_related() calls on the queryset, but specifying these in lists of strings as the\n values for the query sting params select_relates and prefetch_related.\n\n Example:\n http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']\n\n Finally limited support for filtering on multiple arguments using logical OR between\n those expressions is available. To use this supply two or more query string keywords on the\n form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q\n object. All those Q objects created are used in a queryset.filter() call concatenated using\n the | operator.\n \"\"\"\n def eval_query_value(request, key):\n \"\"\"\n Use ast.literal_eval() to evaluate a query string value as a python data type object\n :param request: the django request object\n :param param: the query string param key\n :return: a python data type object, or None if literal_eval() fails\n \"\"\"\n value = request.QUERY_PARAMS.get(key, None)\n try:\n return ast.literal_eval(value)\n except ValueError:\n return None\n\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']\n\n # evaluate each query string param, and apply the queryset method with the same name\n for param in qs_params:\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n if param in ['filter', 'exclude',]:\n queryset = getattr(queryset, param)(**args_or_kwargs)\n else:\n queryset = getattr(queryset, param)(*args_or_kwargs)\n\n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n i = 1\n q_queries = []\n while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):\n query_arg = eval_query_value(request, 'q_filter{}'.format(i))\n if query_arg:\n q_queries += [query_arg]\n i += 1\n\n q_expr = Q(**q_queries[0])\n for query in q_queries[1:]:\n q_expr = q_expr | Q(**query)\n\n queryset = queryset.filter(q_expr)\n\n return queryset\n", "path": "akvo/rest/filters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo Reporting is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nimport ast\n\nfrom django.db.models import Q\nfrom django.core.exceptions import FieldError\n\nfrom rest_framework import filters, status\nfrom rest_framework.exceptions import APIException\n\n\nclass RSRGenericFilterBackend(filters.BaseFilterBackend):\n\n def filter_queryset(self, request, queryset, view):\n \"\"\"\n Return a queryset possibly filtered by query param values.\n The filter looks for the query param keys filter and exclude\n For each of these query param the value is evaluated using ast.literal_eval() and used as\n kwargs in queryset.filter and queryset.exclude respectively.\n\n Example URLs:\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water','currency':'EUR'}\n http://rsr.akvo.org/rest/v1/project/?filter={'title__icontains':'water'}&exclude={'currency':'EUR'}\n\n It's also possible to specify models to be included in select_related() and\n prefetch_related() calls on the queryset, but specifying these in lists of strings as the\n values for the query sting params select_relates and prefetch_related.\n\n Example:\n http://rsr.akvo.org/rest/v1/project/?filter={'partners__in':[42,43]}&prefetch_related=['partners']\n\n Finally limited support for filtering on multiple arguments using logical OR between\n those expressions is available. To use this supply two or more query string keywords on the\n form q_filter1, q_filter2... where the value is a dict that can be used as a kwarg in a Q\n object. All those Q objects created are used in a queryset.filter() call concatenated using\n the | operator.\n \"\"\"\n def eval_query_value(request, key):\n \"\"\"\n Use ast.literal_eval() to evaluate a query string value as a python data type object\n :param request: the django request object\n :param param: the query string param key\n :return: a python data type object, or None if literal_eval() fails\n \"\"\"\n value = request.QUERY_PARAMS.get(key, None)\n try:\n return ast.literal_eval(value)\n except ValueError:\n return None\n\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related']\n\n # evaluate each query string param, and apply the queryset method with the same name\n for param in qs_params:\n args_or_kwargs = eval_query_value(request, param)\n if args_or_kwargs:\n # filter and exclude are called with a dict kwarg, the _related methods with a list\n try:\n if param in ['filter', 'exclude',]:\n queryset = getattr(queryset, param)(**args_or_kwargs)\n else:\n queryset = getattr(queryset, param)(*args_or_kwargs)\n\n except FieldError as e:\n raise APIException(\"Error in request: {message}\".format(message=e.message))\n\n # support for Q expressions, limited to OR-concatenated filtering\n if request.QUERY_PARAMS.get('q_filter1', None):\n i = 1\n q_queries = []\n while request.QUERY_PARAMS.get('q_filter{}'.format(i), None):\n query_arg = eval_query_value(request, 'q_filter{}'.format(i))\n if query_arg:\n q_queries += [query_arg]\n i += 1\n\n q_expr = Q(**q_queries[0])\n for query in q_queries[1:]:\n q_expr = q_expr | Q(**query)\n\n queryset = queryset.filter(q_expr)\n\n return queryset\n", "path": "akvo/rest/filters.py"}]} | 1,303 | 311 |
gh_patches_debug_38763 | rasdani/github-patches | git_diff | Gallopsled__pwntools-227 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
atexit module can't handle multiple instances
The `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.
Additionally, this means that `pwnlib.atexit` does not guarantee order.
Per the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:
_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._
However, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/atexit.py`
Content:
```
1 """
2 Replacement for the Python standard library's atexit.py.
3
4 Whereas the standard :mod:`atexit` module only defines :func:`atexit.register`,
5 this replacement module also defines :func:`unregister`.
6
7 This module also fixes a the issue that exceptions raised by an exit handler is
8 printed twice when the standard :mod:`atexit` is used.
9 """
10
11 import sys, traceback
12
13 __all__ = ['register', 'unregister']
14
15 _handlers = {}
16
17 def register(func, *args, **kwargs):
18 """register(func, *args, **kwargs)
19
20 Registers a function to be called when an unhandled exception occurs. The
21 function will be called with positional arguments `args` and keyword
22 arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
23
24 If `func` is already registered then `args` and `kwargs` will be updated.
25
26 This function can be used as a decorator::
27
28 def f():
29 ...
30 atexit.register(f)
31
32 is equivalent to this::
33
34 @atexit.register
35 def f():
36 ...
37
38 """
39 _handlers[func] = (args, kwargs)
40 return func
41
42 def unregister(func):
43 """unregister(func)
44
45 Remove `func` from the collection of registered functions. If `func` isn't
46 registered this is a no-op.
47 """
48 if func in _handlers:
49 del _handlers[func]
50
51 def _run_handlers():
52 """_run_handlers()
53
54 Run registered exit handlers. The order is arbitrary.
55
56 If a handler raises an exception, it will be printed but nothing else
57 happens, i.e. other handlers will be run and `sys.excepthook` will not be
58 called for that reason.
59 """
60 for func, (args, kwargs) in _handlers.items():
61 try:
62 func(*args, **kwargs)
63 except SystemExit:
64 pass
65 except:
66 # extract the current exception and rewind the traceback to where it
67 # originated
68 typ, val, tb = sys.exc_info()
69 traceback.print_exception(typ, val, tb.tb_next)
70
71 # if there's already an exitfunc registered be sure to run that too
72 if hasattr(sys, "exitfunc"):
73 register(sys.exitfunc)
74
75 sys.exitfunc = _run_handlers
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py
--- a/pwnlib/atexit.py
+++ b/pwnlib/atexit.py
@@ -8,58 +8,76 @@
printed twice when the standard :mod:`atexit` is used.
"""
-import sys, traceback
+import sys, traceback, threading
+from .context import context
__all__ = ['register', 'unregister']
+_lock = threading.Lock()
+_ident = 0
_handlers = {}
def register(func, *args, **kwargs):
"""register(func, *args, **kwargs)
- Registers a function to be called when an unhandled exception occurs. The
- function will be called with positional arguments `args` and keyword
- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.
+ Registers a function to be called on program termination. The function will
+ be called with positional arguments `args` and keyword arguments `kwargs`,
+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will
+ be the one used when the handler is run.
- If `func` is already registered then `args` and `kwargs` will be updated.
+ E.g. to suppress logging output from an exit-handler one could write::
- This function can be used as a decorator::
+ with context.local(log_level = 'error'):
+ atexit.register(handler)
- def f():
- ...
- atexit.register(f)
+ An identifier is returned which can be used to unregister the exit-handler.
- is equivalent to this::
+ This function can be used as a decorator::
@atexit.register
- def f():
+ def handler():
...
- """
- _handlers[func] = (args, kwargs)
- return func
+ Notice however that this will bind ``handler`` to the identifier and not the
+ actual exit-handler. The exit-handler can then be unregistered with::
+
+ atexit.unregister(handler)
-def unregister(func):
- """unregister(func)
+ This function is thread safe.
- Remove `func` from the collection of registered functions. If `func` isn't
- registered this is a no-op.
"""
- if func in _handlers:
- del _handlers[func]
+ global _ident
+ with _lock:
+ ident = _ident
+ _ident += 1
+ _handlers[ident] = (func, args, kwargs, vars(context))
+ return ident
+
+def unregister(ident):
+ """unregister(ident)
+
+ Remove the exit-handler identified by `ident` from the list of registered
+ handlers. If `ident` isn't registered this is a no-op.
+ """
+ if ident in _handlers:
+ del _handlers[ident]
def _run_handlers():
"""_run_handlers()
- Run registered exit handlers. The order is arbitrary.
+ Run registered exit-handlers. They run in the reverse order of which they
+ were registered.
If a handler raises an exception, it will be printed but nothing else
happens, i.e. other handlers will be run and `sys.excepthook` will not be
called for that reason.
"""
- for func, (args, kwargs) in _handlers.items():
+ context.clear()
+ for _ident, (func, args, kwargs, ctx) in \
+ sorted(_handlers.items(), reverse = True):
try:
- func(*args, **kwargs)
+ with context.local(**ctx):
+ func(*args, **kwargs)
except SystemExit:
pass
except:
| {"golden_diff": "diff --git a/pwnlib/atexit.py b/pwnlib/atexit.py\n--- a/pwnlib/atexit.py\n+++ b/pwnlib/atexit.py\n@@ -8,58 +8,76 @@\n printed twice when the standard :mod:`atexit` is used.\n \"\"\"\n \n-import sys, traceback\n+import sys, traceback, threading\n+from .context import context\n \n __all__ = ['register', 'unregister']\n \n+_lock = threading.Lock()\n+_ident = 0\n _handlers = {}\n \n def register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n \n- Registers a function to be called when an unhandled exception occurs. The\n- function will be called with positional arguments `args` and keyword\n- arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n+ Registers a function to be called on program termination. The function will\n+ be called with positional arguments `args` and keyword arguments `kwargs`,\n+ i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n+ be the one used when the handler is run.\n \n- If `func` is already registered then `args` and `kwargs` will be updated.\n+ E.g. to suppress logging output from an exit-handler one could write::\n \n- This function can be used as a decorator::\n+ with context.local(log_level = 'error'):\n+ atexit.register(handler)\n \n- def f():\n- ...\n- atexit.register(f)\n+ An identifier is returned which can be used to unregister the exit-handler.\n \n- is equivalent to this::\n+ This function can be used as a decorator::\n \n @atexit.register\n- def f():\n+ def handler():\n ...\n \n- \"\"\"\n- _handlers[func] = (args, kwargs)\n- return func\n+ Notice however that this will bind ``handler`` to the identifier and not the\n+ actual exit-handler. The exit-handler can then be unregistered with::\n+\n+ atexit.unregister(handler)\n \n-def unregister(func):\n- \"\"\"unregister(func)\n+ This function is thread safe.\n \n- Remove `func` from the collection of registered functions. If `func` isn't\n- registered this is a no-op.\n \"\"\"\n- if func in _handlers:\n- del _handlers[func]\n+ global _ident\n+ with _lock:\n+ ident = _ident\n+ _ident += 1\n+ _handlers[ident] = (func, args, kwargs, vars(context))\n+ return ident\n+\n+def unregister(ident):\n+ \"\"\"unregister(ident)\n+\n+ Remove the exit-handler identified by `ident` from the list of registered\n+ handlers. If `ident` isn't registered this is a no-op.\n+ \"\"\"\n+ if ident in _handlers:\n+ del _handlers[ident]\n \n def _run_handlers():\n \"\"\"_run_handlers()\n \n- Run registered exit handlers. The order is arbitrary.\n+ Run registered exit-handlers. They run in the reverse order of which they\n+ were registered.\n \n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n- for func, (args, kwargs) in _handlers.items():\n+ context.clear()\n+ for _ident, (func, args, kwargs, ctx) in \\\n+ sorted(_handlers.items(), reverse = True):\n try:\n- func(*args, **kwargs)\n+ with context.local(**ctx):\n+ func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n", "issue": "atexit module can't handle multiple instances\nThe `atexit` module uses a dictionary to keep track of the routines to call. This means that no routines can be registered more than once, for example with different arguments.\n\nAdditionally, this means that `pwnlib.atexit` does not guarantee order.\n\nPer the [Python documentation](https://docs.python.org/2/library/atexit.html#module-atexit) for `atexit`:\n\n_atexit runs these functions in the reverse order in which they were registered; if you register A, B, and C, at interpreter termination time they will be run in the order C, B, A._\n\nHowever, multiple registration of routines breaks `unregister` in its current incarnation. The suggested remedy for this is to return a unique ID for each `register` call that is passed to `unregister`, rather than passing in the function to `unregister`.\n\n", "before_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\n\nimport sys, traceback\n\n__all__ = ['register', 'unregister']\n\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called when an unhandled exception occurs. The\n function will be called with positional arguments `args` and keyword\n arguments `kwargs`, i.e. ``func(*args, **kwargs)``.\n\n If `func` is already registered then `args` and `kwargs` will be updated.\n\n This function can be used as a decorator::\n\n def f():\n ...\n atexit.register(f)\n\n is equivalent to this::\n\n @atexit.register\n def f():\n ...\n\n \"\"\"\n _handlers[func] = (args, kwargs)\n return func\n\ndef unregister(func):\n \"\"\"unregister(func)\n\n Remove `func` from the collection of registered functions. If `func` isn't\n registered this is a no-op.\n \"\"\"\n if func in _handlers:\n del _handlers[func]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit handlers. The order is arbitrary.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n for func, (args, kwargs) in _handlers.items():\n try:\n func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}], "after_files": [{"content": "\"\"\"\nReplacement for the Python standard library's atexit.py.\n\nWhereas the standard :mod:`atexit` module only defines :func:`atexit.register`,\nthis replacement module also defines :func:`unregister`.\n\nThis module also fixes a the issue that exceptions raised by an exit handler is\nprinted twice when the standard :mod:`atexit` is used.\n\"\"\"\n\nimport sys, traceback, threading\nfrom .context import context\n\n__all__ = ['register', 'unregister']\n\n_lock = threading.Lock()\n_ident = 0\n_handlers = {}\n\ndef register(func, *args, **kwargs):\n \"\"\"register(func, *args, **kwargs)\n\n Registers a function to be called on program termination. The function will\n be called with positional arguments `args` and keyword arguments `kwargs`,\n i.e. ``func(*args, **kwargs)``. The current `context` is recorded and will\n be the one used when the handler is run.\n\n E.g. to suppress logging output from an exit-handler one could write::\n\n with context.local(log_level = 'error'):\n atexit.register(handler)\n\n An identifier is returned which can be used to unregister the exit-handler.\n\n This function can be used as a decorator::\n\n @atexit.register\n def handler():\n ...\n\n Notice however that this will bind ``handler`` to the identifier and not the\n actual exit-handler. The exit-handler can then be unregistered with::\n\n atexit.unregister(handler)\n\n This function is thread safe.\n\n \"\"\"\n global _ident\n with _lock:\n ident = _ident\n _ident += 1\n _handlers[ident] = (func, args, kwargs, vars(context))\n return ident\n\ndef unregister(ident):\n \"\"\"unregister(ident)\n\n Remove the exit-handler identified by `ident` from the list of registered\n handlers. If `ident` isn't registered this is a no-op.\n \"\"\"\n if ident in _handlers:\n del _handlers[ident]\n\ndef _run_handlers():\n \"\"\"_run_handlers()\n\n Run registered exit-handlers. They run in the reverse order of which they\n were registered.\n\n If a handler raises an exception, it will be printed but nothing else\n happens, i.e. other handlers will be run and `sys.excepthook` will not be\n called for that reason.\n \"\"\"\n context.clear()\n for _ident, (func, args, kwargs, ctx) in \\\n sorted(_handlers.items(), reverse = True):\n try:\n with context.local(**ctx):\n func(*args, **kwargs)\n except SystemExit:\n pass\n except:\n # extract the current exception and rewind the traceback to where it\n # originated\n typ, val, tb = sys.exc_info()\n traceback.print_exception(typ, val, tb.tb_next)\n\n# if there's already an exitfunc registered be sure to run that too\nif hasattr(sys, \"exitfunc\"):\n register(sys.exitfunc)\n\nsys.exitfunc = _run_handlers\n", "path": "pwnlib/atexit.py"}]} | 1,090 | 835 |
gh_patches_debug_8954 | rasdani/github-patches | git_diff | mne-tools__mne-bids-1028 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
complete `.zenodo.json` metadata
Our `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):
https://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4
I feel like adding the contributors that are currently not listed to that file in a PR, and making that a "custom" (to add new contributors also to the zenodo metadata). I think that's a nice "reward" for contributions. Any objections?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """Setup MNE-BIDS."""
2 import sys
3
4 from setuptools import setup
5
6 # Give setuptools a hint to complain if it's too old a version
7 SETUP_REQUIRES = ["setuptools >= 46.4.0"]
8 # This enables setuptools to install wheel on-the-fly
9 SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
10
11 version = None
12 with open('mne_bids/__init__.py', 'r') as fid:
13 for line in fid:
14 line = line.strip()
15 if line.startswith('__version__ = '):
16 version = line.split(' = ')[1].split('#')[0].strip('\'')
17 break
18 if version is None:
19 raise RuntimeError('Could not determine version')
20
21
22 if __name__ == "__main__":
23 setup(
24 version=version,
25 setup_requires=SETUP_REQUIRES,
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
"""Setup MNE-BIDS."""
+import os
import sys
from setuptools import setup
@@ -9,7 +10,7 @@
SETUP_REQUIRES += ["wheel"] if "bdist_wheel" in sys.argv else []
version = None
-with open('mne_bids/__init__.py', 'r') as fid:
+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:
for line in fid:
line = line.strip()
if line.startswith('__version__ = '):
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n \"\"\"Setup MNE-BIDS.\"\"\"\n+import os\n import sys\n \n from setuptools import setup\n@@ -9,7 +10,7 @@\n SETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n \n version = None\n-with open('mne_bids/__init__.py', 'r') as fid:\n+with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n", "issue": "complete `.zenodo.json` metadata\nOur `.zenodo.json` file controls which contributors get listed in the [Zenodo code archive](https://zenodo.org/record/6359371):\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/13d2b975393f5ba55f4b3a7a8bee4a164c08fa1a/.zenodo.json#L1-L4\r\n\r\nI feel like adding the contributors that are currently not listed to that file in a PR, and making that a \"custom\" (to add new contributors also to the zenodo metadata). I think that's a nice \"reward\" for contributions. Any objections?\n", "before_files": [{"content": "\"\"\"Setup MNE-BIDS.\"\"\"\nimport sys\n\nfrom setuptools import setup\n\n# Give setuptools a hint to complain if it's too old a version\nSETUP_REQUIRES = [\"setuptools >= 46.4.0\"]\n# This enables setuptools to install wheel on-the-fly\nSETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n\nversion = None\nwith open('mne_bids/__init__.py', 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n version = line.split(' = ')[1].split('#')[0].strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\nif __name__ == \"__main__\":\n setup(\n version=version,\n setup_requires=SETUP_REQUIRES,\n )\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nimport sys\n\nfrom setuptools import setup\n\n# Give setuptools a hint to complain if it's too old a version\nSETUP_REQUIRES = [\"setuptools >= 46.4.0\"]\n# This enables setuptools to install wheel on-the-fly\nSETUP_REQUIRES += [\"wheel\"] if \"bdist_wheel\" in sys.argv else []\n\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in fid:\n line = line.strip()\n if line.startswith('__version__ = '):\n version = line.split(' = ')[1].split('#')[0].strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\nif __name__ == \"__main__\":\n setup(\n version=version,\n setup_requires=SETUP_REQUIRES,\n )\n", "path": "setup.py"}]} | 643 | 146 |
gh_patches_debug_60760 | rasdani/github-patches | git_diff | coala__coala-bears-310 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GitCommitBear: body_line_length shall be 72 and not 73
The standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bears/vcs/git/GitCommitBear.py`
Content:
```
1 import re
2 import shutil
3 import os
4
5 from coalib.bears.GlobalBear import GlobalBear
6 from coalib.misc.ContextManagers import change_directory
7 from coalib.misc.Shell import run_shell_command
8 from coalib.results.Result import Result
9
10
11 class GitCommitBear(GlobalBear):
12
13 @classmethod
14 def check_prerequisites(cls):
15 if shutil.which("git") is None:
16 return "git is not installed."
17 else:
18 return True
19
20 def run(self,
21 shortlog_length: int=50,
22 body_line_length: int=73,
23 force_body: bool=False,
24 allow_empty_commit_message: bool=False,
25 shortlog_regex: str="",
26 shortlog_trailing_period: bool=None):
27 """
28 Checks the current git commit message at HEAD.
29
30 This bear ensures that the shortlog and body do not exceed a given
31 line-length and that a newline lies between them.
32
33 :param shortlog_length: The maximum length of the shortlog.
34 The shortlog is the first line of
35 the commit message. The newline
36 character at end does not count to
37 the length.
38 :param body_line_length: The maximum line-length of the body.
39 The newline character at each line
40 end does not count to the length.
41 :param force_body: Whether a body shall exist or not.
42 :param allow_empty_commit_message: Whether empty commit messages are
43 allowed or not.
44 :param shortlog_regex: A regex to check the shortlog with.
45 A full match of this regex is then
46 required. Passing an empty string
47 disable the regex-check.
48 :param shortlog_trailing_period: Whether a dot shall be enforced at
49 the end of the shortlog line.
50 Providing ``None`` means
51 "doesn't care".
52 """
53 with change_directory(self.get_config_dir() or os.getcwd()):
54 stdout, stderr = run_shell_command("git log -1 --pretty=%B")
55
56 if stderr:
57 self.err("git:", repr(stderr))
58 return
59
60 stdout = stdout.rstrip("\n").splitlines()
61
62 if len(stdout) == 0:
63 if not allow_empty_commit_message:
64 yield Result(self, "HEAD commit has no message.")
65 return
66
67 yield from self.check_shortlog(shortlog_length,
68 shortlog_regex,
69 shortlog_trailing_period,
70 stdout[0])
71 yield from self.check_body(body_line_length, force_body, stdout[1:])
72
73 def check_shortlog(self,
74 shortlog_length,
75 regex,
76 shortlog_trailing_period,
77 shortlog):
78 """
79 Checks the given shortlog.
80
81 :param shortlog_length: The maximum length of the shortlog.
82 The newline character at end does not
83 count to the length.
84 :param regex: A regex to check the shortlog with.
85 :param shortlog_trailing_period: Whether a dot shall be enforced at end
86 end or not (or ``None`` for "don't
87 care").
88 :param shortlog: The shortlog message string.
89 """
90 diff = len(shortlog) - shortlog_length
91 if diff > 0:
92 yield Result(self,
93 "Shortlog of HEAD commit is {} character(s) longer "
94 "than the limit ({} > {}).".format(
95 diff, len(shortlog), shortlog_length))
96
97 if (shortlog[-1] != ".") == shortlog_trailing_period:
98 yield Result(self,
99 "Shortlog of HEAD commit contains no period at end."
100 if shortlog_trailing_period else
101 "Shortlog of HEAD commit contains a period at end.")
102
103 if regex != "":
104 match = re.match(regex, shortlog)
105 # fullmatch() inside re-module exists sadly since 3.4, but we
106 # support 3.3 so we need to check that the regex matched completely
107 # ourselves.
108 if not match or match.end() != len(shortlog):
109 yield Result(
110 self,
111 "Shortlog of HEAD commit does not match given regex.")
112
113 def check_body(self, body_line_length, force_body, body):
114 """
115 Checks the given commit body.
116
117 :param body_line_length: The maximum line-length of the body. The
118 newline character at each line end does not
119 count to the length.
120 :param force_body: Whether a body shall exist or not.
121 :param body: The commit body splitted by lines.
122 """
123 if len(body) == 0:
124 if force_body:
125 yield Result(self, "No commit message body at HEAD.")
126 return
127
128 if body[0] != "":
129 yield Result(self, "No newline between shortlog and body at HEAD.")
130 return
131
132 if any(len(line) > body_line_length for line in body[1:]):
133 yield Result(self, "Body of HEAD commit contains too long lines.")
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py
--- a/bears/vcs/git/GitCommitBear.py
+++ b/bears/vcs/git/GitCommitBear.py
@@ -19,7 +19,7 @@
def run(self,
shortlog_length: int=50,
- body_line_length: int=73,
+ body_line_length: int=72,
force_body: bool=False,
allow_empty_commit_message: bool=False,
shortlog_regex: str="",
| {"golden_diff": "diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py\n--- a/bears/vcs/git/GitCommitBear.py\n+++ b/bears/vcs/git/GitCommitBear.py\n@@ -19,7 +19,7 @@\n \n def run(self,\n shortlog_length: int=50,\n- body_line_length: int=73,\n+ body_line_length: int=72,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n", "issue": "GitCommitBear: body_line_length shall be 72 and not 73\nThe standard git commit guidelines suggest wrapping the body at 72 chars. So just replace the default of 73 with 72.\n\n", "before_files": [{"content": "import re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=73,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n :param shortlog_regex: A regex to check the shortlog with.\n A full match of this regex is then\n required. Passing an empty string\n disable the regex-check.\n :param shortlog_trailing_period: Whether a dot shall be enforced at\n the end of the shortlog line.\n Providing ``None`` means\n \"doesn't care\".\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length,\n shortlog_regex,\n shortlog_trailing_period,\n stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self,\n shortlog_length,\n regex,\n shortlog_trailing_period,\n shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog: The shortlog message string.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of HEAD commit is {} character(s) longer \"\n \"than the limit ({} > {}).\".format(\n diff, len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if regex != \"\":\n match = re.match(regex, shortlog)\n # fullmatch() inside re-module exists sadly since 3.4, but we\n # support 3.3 so we need to check that the regex matched completely\n # ourselves.\n if not match or match.end() != len(shortlog):\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}], "after_files": [{"content": "import re\nimport shutil\nimport os\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.ContextManagers import change_directory\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=72,\n force_body: bool=False,\n allow_empty_commit_message: bool=False,\n shortlog_regex: str=\"\",\n shortlog_trailing_period: bool=None):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n :param shortlog_regex: A regex to check the shortlog with.\n A full match of this regex is then\n required. Passing an empty string\n disable the regex-check.\n :param shortlog_trailing_period: Whether a dot shall be enforced at\n the end of the shortlog line.\n Providing ``None`` means\n \"doesn't care\".\n \"\"\"\n with change_directory(self.get_config_dir() or os.getcwd()):\n stdout, stderr = run_shell_command(\"git log -1 --pretty=%B\")\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n stdout = stdout.rstrip(\"\\n\").splitlines()\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length,\n shortlog_regex,\n shortlog_trailing_period,\n stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self,\n shortlog_length,\n regex,\n shortlog_trailing_period,\n shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog.\n The newline character at end does not\n count to the length.\n :param regex: A regex to check the shortlog with.\n :param shortlog_trailing_period: Whether a dot shall be enforced at end\n end or not (or ``None`` for \"don't\n care\").\n :param shortlog: The shortlog message string.\n \"\"\"\n diff = len(shortlog) - shortlog_length\n if diff > 0:\n yield Result(self,\n \"Shortlog of HEAD commit is {} character(s) longer \"\n \"than the limit ({} > {}).\".format(\n diff, len(shortlog), shortlog_length))\n\n if (shortlog[-1] != \".\") == shortlog_trailing_period:\n yield Result(self,\n \"Shortlog of HEAD commit contains no period at end.\"\n if shortlog_trailing_period else\n \"Shortlog of HEAD commit contains a period at end.\")\n\n if regex != \"\":\n match = re.match(regex, shortlog)\n # fullmatch() inside re-module exists sadly since 3.4, but we\n # support 3.3 so we need to check that the regex matched completely\n # ourselves.\n if not match or match.end() != len(shortlog):\n yield Result(\n self,\n \"Shortlog of HEAD commit does not match given regex.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}]} | 1,663 | 126 |
gh_patches_debug_8972 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2367 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] pyro.distributions.InverseGamma.sample does not work on GPU
### Issue Description
`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:
```
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
```
I think the problem is in `__init__`:
```python
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
```
The argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.
### Environment
Ubuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0
### Code Snippet
```python
>>> import torch
>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution
>>> from pyro.distributions.transforms import PowerTransform
>>>
>>> concentration = torch.tensor(1.0).to("cuda")
>>> rate = torch.tensor(1.0).to("cuda")
>>>
>>> # InverseGamma.sample fails with an error
>>> InverseGamma(concentration, rate).sample()
RuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.
>>>
>>> # The equivalent TransformedDistribution is fine
>>> TransformedDistribution(
... Gamma(concentration, rate),
... PowerTransform(torch.tensor(-1.0).to("cuda")),
... ).sample()
tensor(0.5707, device='cuda:0')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/inverse_gamma.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from torch.distributions import constraints
5 from torch.distributions.transforms import PowerTransform
6 from pyro.distributions.torch import Gamma, TransformedDistribution
7
8
9 class InverseGamma(TransformedDistribution):
10 r"""
11 Creates an inverse-gamma distribution parameterized by
12 `concentration` and `rate`.
13
14 X ~ Gamma(concentration, rate)
15 Y = 1/X ~ InverseGamma(concentration, rate)
16
17 :param torch.Tensor concentration: the concentration parameter (i.e. alpha).
18 :param torch.Tensor rate: the rate parameter (i.e. beta).
19 """
20 arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}
21 support = constraints.positive
22 has_rsample = True
23
24 def __init__(self, concentration, rate, validate_args=None):
25 base_dist = Gamma(concentration, rate)
26 super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
27
28 def expand(self, batch_shape, _instance=None):
29 new = self._get_checked_instance(InverseGamma, _instance)
30 return super().expand(batch_shape, _instance=new)
31
32 @property
33 def concentration(self):
34 return self.base_dist.concentration
35
36 @property
37 def rate(self):
38 return self.base_dist.rate
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py
--- a/pyro/distributions/inverse_gamma.py
+++ b/pyro/distributions/inverse_gamma.py
@@ -23,7 +23,8 @@
def __init__(self, concentration, rate, validate_args=None):
base_dist = Gamma(concentration, rate)
- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)
+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),
+ validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(InverseGamma, _instance)
| {"golden_diff": "diff --git a/pyro/distributions/inverse_gamma.py b/pyro/distributions/inverse_gamma.py\n--- a/pyro/distributions/inverse_gamma.py\n+++ b/pyro/distributions/inverse_gamma.py\n@@ -23,7 +23,8 @@\n \n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n- super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n+ super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),\n+ validate_args=validate_args)\n \n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n", "issue": "[bug] pyro.distributions.InverseGamma.sample does not work on GPU\n### Issue Description\r\n`pyro.distributions.InverseGamma.sample` fails with the following error when its arguments are on the GPU:\r\n```\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n```\r\n\r\nI think the problem is in `__init__`:\r\n```python\r\n def __init__(self, concentration, rate, validate_args=None):\r\n base_dist = Gamma(concentration, rate)\r\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\r\n```\r\nThe argument to `PowerTransform` should probably be something like `-torch.ones_like(rate)`.\r\n\r\n### Environment\r\nUbuntu 16.04, Python 3.7, PyTorch 1.4.0, Pyro 1.3.0\r\n\r\n### Code Snippet\r\n```python\r\n>>> import torch\r\n>>> from pyro.distributions import Gamma, InverseGamma, TransformedDistribution\r\n>>> from pyro.distributions.transforms import PowerTransform\r\n>>>\r\n>>> concentration = torch.tensor(1.0).to(\"cuda\")\r\n>>> rate = torch.tensor(1.0).to(\"cuda\")\r\n>>> \r\n>>> # InverseGamma.sample fails with an error\r\n>>> InverseGamma(concentration, rate).sample()\r\nRuntimeError: iter.device(arg).is_cuda() INTERNAL ASSERT FAILED at /pytorch/aten/src/ATen/native/cuda/Loops.cuh:197, please report a bug to PyTorch.\r\n>>> \r\n>>> # The equivalent TransformedDistribution is fine\r\n>>> TransformedDistribution(\r\n... Gamma(concentration, rate),\r\n... PowerTransform(torch.tensor(-1.0).to(\"cuda\")),\r\n... ).sample()\r\ntensor(0.5707, device='cuda:0')\r\n```\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import PowerTransform\nfrom pyro.distributions.torch import Gamma, TransformedDistribution\n\n\nclass InverseGamma(TransformedDistribution):\n r\"\"\"\n Creates an inverse-gamma distribution parameterized by\n `concentration` and `rate`.\n\n X ~ Gamma(concentration, rate)\n Y = 1/X ~ InverseGamma(concentration, rate)\n\n :param torch.Tensor concentration: the concentration parameter (i.e. alpha).\n :param torch.Tensor rate: the rate parameter (i.e. beta).\n \"\"\"\n arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}\n support = constraints.positive\n has_rsample = True\n\n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n super().__init__(base_dist, PowerTransform(-1.0), validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def concentration(self):\n return self.base_dist.concentration\n\n @property\n def rate(self):\n return self.base_dist.rate\n", "path": "pyro/distributions/inverse_gamma.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom torch.distributions import constraints\nfrom torch.distributions.transforms import PowerTransform\nfrom pyro.distributions.torch import Gamma, TransformedDistribution\n\n\nclass InverseGamma(TransformedDistribution):\n r\"\"\"\n Creates an inverse-gamma distribution parameterized by\n `concentration` and `rate`.\n\n X ~ Gamma(concentration, rate)\n Y = 1/X ~ InverseGamma(concentration, rate)\n\n :param torch.Tensor concentration: the concentration parameter (i.e. alpha).\n :param torch.Tensor rate: the rate parameter (i.e. beta).\n \"\"\"\n arg_constraints = {'concentration': constraints.positive, 'rate': constraints.positive}\n support = constraints.positive\n has_rsample = True\n\n def __init__(self, concentration, rate, validate_args=None):\n base_dist = Gamma(concentration, rate)\n super().__init__(base_dist, PowerTransform(-base_dist.rate.new_ones(())),\n validate_args=validate_args)\n\n def expand(self, batch_shape, _instance=None):\n new = self._get_checked_instance(InverseGamma, _instance)\n return super().expand(batch_shape, _instance=new)\n\n @property\n def concentration(self):\n return self.base_dist.concentration\n\n @property\n def rate(self):\n return self.base_dist.rate\n", "path": "pyro/distributions/inverse_gamma.py"}]} | 1,053 | 160 |
gh_patches_debug_7918 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-75 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CIFAR10 run_fl.py misses license header
https://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/cifar10/run_fl.py`
Content:
```
1 import argparse
2 import os
3 import time
4
5 from nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner
6
7
8 def main():
9 parser = argparse.ArgumentParser()
10 parser.add_argument("--run_number", type=int, default=100, help="FL run number to start at.")
11 parser.add_argument("--admin_dir", type=str, default="./admin/", help="Path to admin directory.")
12 parser.add_argument("--username", type=str, default="[email protected]", help="Admin username")
13 parser.add_argument("--app", type=str, default="cifar10_fedavg", help="App to be deployed")
14 parser.add_argument("--port", type=int, default=8003, help="The admin server port")
15 parser.add_argument("--poc", action='store_true', help="Whether admin uses POC mode.")
16 parser.add_argument("--min_clients", type=int, default=8, help="Minimum number of clients.")
17 args = parser.parse_args()
18
19 host = ""
20 port = args.port
21
22 assert os.path.isdir(args.admin_dir), f"admin directory does not exist at {args.admin_dir}"
23
24 # Set up certificate names and admin folders
25 upload_dir = os.path.join(args.admin_dir, "transfer")
26 if not os.path.isdir(upload_dir):
27 os.makedirs(upload_dir)
28 download_dir = os.path.join(args.admin_dir, "download")
29 if not os.path.isdir(download_dir):
30 os.makedirs(download_dir)
31
32 run_number = args.run_number
33
34 # Initialize the runner
35 runner = FLAdminAPIRunner(
36 host=host,
37 port=port,
38 username=args.username,
39 admin_dir=args.admin_dir,
40 poc=args.poc,
41 debug=False,
42 )
43
44 # Run
45 start = time.time()
46 runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,
47 timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours
48 print("Total training time", time.time() - start)
49
50
51 if __name__ == "__main__":
52 main()
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py
--- a/examples/cifar10/run_fl.py
+++ b/examples/cifar10/run_fl.py
@@ -1,3 +1,17 @@
+# Copyright (c) 2021, NVIDIA CORPORATION.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import argparse
import os
import time
| {"golden_diff": "diff --git a/examples/cifar10/run_fl.py b/examples/cifar10/run_fl.py\n--- a/examples/cifar10/run_fl.py\n+++ b/examples/cifar10/run_fl.py\n@@ -1,3 +1,17 @@\n+# Copyright (c) 2021, NVIDIA CORPORATION.\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# http://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n import argparse\n import os\n import time\n", "issue": "CIFAR10 run_fl.py misses license header\nhttps://github.com/NVIDIA/NVFlare/blob/d784e7be9742b4d1bcfa0f389ea063fec984fe50/examples/cifar10/run_fl.py#L1\n", "before_files": [{"content": "import argparse\nimport os\nimport time\n\nfrom nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_number\", type=int, default=100, help=\"FL run number to start at.\")\n parser.add_argument(\"--admin_dir\", type=str, default=\"./admin/\", help=\"Path to admin directory.\")\n parser.add_argument(\"--username\", type=str, default=\"[email protected]\", help=\"Admin username\")\n parser.add_argument(\"--app\", type=str, default=\"cifar10_fedavg\", help=\"App to be deployed\")\n parser.add_argument(\"--port\", type=int, default=8003, help=\"The admin server port\")\n parser.add_argument(\"--poc\", action='store_true', help=\"Whether admin uses POC mode.\")\n parser.add_argument(\"--min_clients\", type=int, default=8, help=\"Minimum number of clients.\")\n args = parser.parse_args()\n\n host = \"\"\n port = args.port\n\n assert os.path.isdir(args.admin_dir), f\"admin directory does not exist at {args.admin_dir}\"\n\n # Set up certificate names and admin folders\n upload_dir = os.path.join(args.admin_dir, \"transfer\")\n if not os.path.isdir(upload_dir):\n os.makedirs(upload_dir)\n download_dir = os.path.join(args.admin_dir, \"download\")\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n run_number = args.run_number\n\n # Initialize the runner\n runner = FLAdminAPIRunner(\n host=host,\n port=port,\n username=args.username,\n admin_dir=args.admin_dir,\n poc=args.poc,\n debug=False,\n )\n\n # Run\n start = time.time()\n runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,\n timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours\n print(\"Total training time\", time.time() - start)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/cifar10/run_fl.py"}], "after_files": [{"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport time\n\nfrom nvflare.fuel.hci.client.fl_admin_api_runner import FLAdminAPIRunner\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run_number\", type=int, default=100, help=\"FL run number to start at.\")\n parser.add_argument(\"--admin_dir\", type=str, default=\"./admin/\", help=\"Path to admin directory.\")\n parser.add_argument(\"--username\", type=str, default=\"[email protected]\", help=\"Admin username\")\n parser.add_argument(\"--app\", type=str, default=\"cifar10_fedavg\", help=\"App to be deployed\")\n parser.add_argument(\"--port\", type=int, default=8003, help=\"The admin server port\")\n parser.add_argument(\"--poc\", action='store_true', help=\"Whether admin uses POC mode.\")\n parser.add_argument(\"--min_clients\", type=int, default=8, help=\"Minimum number of clients.\")\n args = parser.parse_args()\n\n host = \"\"\n port = args.port\n\n assert os.path.isdir(args.admin_dir), f\"admin directory does not exist at {args.admin_dir}\"\n\n # Set up certificate names and admin folders\n upload_dir = os.path.join(args.admin_dir, \"transfer\")\n if not os.path.isdir(upload_dir):\n os.makedirs(upload_dir)\n download_dir = os.path.join(args.admin_dir, \"download\")\n if not os.path.isdir(download_dir):\n os.makedirs(download_dir)\n\n run_number = args.run_number\n\n # Initialize the runner\n runner = FLAdminAPIRunner(\n host=host,\n port=port,\n username=args.username,\n admin_dir=args.admin_dir,\n poc=args.poc,\n debug=False,\n )\n\n # Run\n start = time.time()\n runner.run(run_number, args.app, restart_all_first=False, shutdown_on_error=True, shutdown_at_end=True,\n timeout=7200, min_clients=args.min_clients) # will time out if not completed in 2 hours\n print(\"Total training time\", time.time() - start)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/cifar10/run_fl.py"}]} | 888 | 202 |
gh_patches_debug_29982 | rasdani/github-patches | git_diff | astronomer__astro-sdk-453 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change `export_file` to return `File` object
**Context**
In order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.
* Astro-SDK version: 0.9..1
* Request by: @jlaneve
* Analysed by @dimberman
**Problem**
At the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:
```
output_file = File(path="/tmp/saved_df.csv")
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
export = aql.export_file(
input_data=table,
output_file=output_file,
if_exists="replace",
)
res_df = aql.load_file(input_file=output_file)
export >> res_df
```
**Desired behaviour**
```
with sample_dag:
table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
exported_file = aql.export_file(
input_data=table,
output_file=File(path="/tmp/saved_df.csv"),
if_exists="replace",
)
res_df = aql.load_file(input_file=exported_file)
```
**Acceptance criteria**
* Change `export_file` so it returns the `File` instance, as opposed to `None`
Since there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/astro/sql/operators/export_file.py`
Content:
```
1 from typing import Optional, Union
2
3 import pandas as pd
4 from airflow.models import BaseOperator
5 from airflow.models.xcom_arg import XComArg
6
7 from astro.constants import ExportExistsStrategy
8 from astro.databases import create_database
9 from astro.files import File
10 from astro.sql.table import Table
11 from astro.utils.task_id_helper import get_task_id
12
13
14 class ExportFile(BaseOperator):
15 """Write SQL table to csv/parquet on local/S3/GCS.
16
17 :param input_data: Table to convert to file
18 :param output_file: File object containing the path to the file and connection id.
19 :param if_exists: Overwrite file if exists. Default False.
20 """
21
22 template_fields = ("input_data", "output_file")
23
24 def __init__(
25 self,
26 input_data: Union[Table, pd.DataFrame],
27 output_file: File,
28 if_exists: ExportExistsStrategy = "exception",
29 **kwargs,
30 ) -> None:
31 super().__init__(**kwargs)
32 self.output_file = output_file
33 self.input_data = input_data
34 self.if_exists = if_exists
35 self.kwargs = kwargs
36
37 def execute(self, context: dict) -> None:
38 """Write SQL table to csv/parquet on local/S3/GCS.
39
40 Infers SQL database type based on connection.
41 """
42 # Infer db type from `input_conn_id`.
43 if isinstance(self.input_data, Table):
44 database = create_database(self.input_data.conn_id)
45 self.input_data = database.populate_table_metadata(self.input_data)
46 df = database.export_table_to_pandas_dataframe(self.input_data)
47 elif isinstance(self.input_data, pd.DataFrame):
48 df = self.input_data
49 else:
50 raise ValueError(
51 f"Expected input_table to be Table or dataframe. Got {type(self.input_data)}"
52 )
53 # Write file if overwrite == True or if file doesn't exist.
54 if self.if_exists == "replace" or not self.output_file.exists():
55 self.output_file.create_from_dataframe(df)
56 else:
57 raise FileExistsError(f"{self.output_file.path} file already exists.")
58
59
60 def export_file(
61 input_data: Union[Table, pd.DataFrame],
62 output_file: File,
63 if_exists: ExportExistsStrategy = "exception",
64 task_id: Optional[str] = None,
65 **kwargs,
66 ) -> XComArg:
67 """Convert SaveFile into a function. Returns XComArg.
68
69 Returns an XComArg object.
70
71 :param output_file: Path and conn_id
72 :param input_data: Input table / dataframe
73 :param if_exists: Overwrite file if exists. Default "exception"
74 :param task_id: task id, optional
75 """
76
77 task_id = (
78 task_id if task_id is not None else get_task_id("export_file", output_file.path)
79 )
80
81 return ExportFile(
82 task_id=task_id,
83 output_file=output_file,
84 input_data=input_data,
85 if_exists=if_exists,
86 ).output
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py
--- a/src/astro/sql/operators/export_file.py
+++ b/src/astro/sql/operators/export_file.py
@@ -34,7 +34,7 @@
self.if_exists = if_exists
self.kwargs = kwargs
- def execute(self, context: dict) -> None:
+ def execute(self, context: dict) -> File:
"""Write SQL table to csv/parquet on local/S3/GCS.
Infers SQL database type based on connection.
@@ -53,6 +53,7 @@
# Write file if overwrite == True or if file doesn't exist.
if self.if_exists == "replace" or not self.output_file.exists():
self.output_file.create_from_dataframe(df)
+ return self.output_file
else:
raise FileExistsError(f"{self.output_file.path} file already exists.")
@@ -66,7 +67,20 @@
) -> XComArg:
"""Convert SaveFile into a function. Returns XComArg.
- Returns an XComArg object.
+ Returns an XComArg object of type File which matches the output_file parameter.
+
+ This will allow users to perform further actions with the exported file.
+
+ e.g.
+
+ with sample_dag:
+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)
+ exported_file = aql.export_file(
+ input_data=table,
+ output_file=File(path="/tmp/saved_df.csv"),
+ if_exists="replace",
+ )
+ res_df = aql.load_file(input_file=exported_file)
:param output_file: Path and conn_id
:param input_data: Input table / dataframe
| {"golden_diff": "diff --git a/src/astro/sql/operators/export_file.py b/src/astro/sql/operators/export_file.py\n--- a/src/astro/sql/operators/export_file.py\n+++ b/src/astro/sql/operators/export_file.py\n@@ -34,7 +34,7 @@\n self.if_exists = if_exists\n self.kwargs = kwargs\n \n- def execute(self, context: dict) -> None:\n+ def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n \n Infers SQL database type based on connection.\n@@ -53,6 +53,7 @@\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n+ return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n \n@@ -66,7 +67,20 @@\n ) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n \n- Returns an XComArg object.\n+ Returns an XComArg object of type File which matches the output_file parameter.\n+\n+ This will allow users to perform further actions with the exported file.\n+\n+ e.g.\n+\n+ with sample_dag:\n+ table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n+ exported_file = aql.export_file(\n+ input_data=table,\n+ output_file=File(path=\"/tmp/saved_df.csv\"),\n+ if_exists=\"replace\",\n+ )\n+ res_df = aql.load_file(input_file=exported_file)\n \n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n", "issue": "Change `export_file` to return `File` object\n**Context**\r\n\r\nIn order to allow users to perform subsequent actions on an exported file (while maintaining a functional structure), we should allow the `export_file` function to return a file object.\r\n\r\n* Astro-SDK version: 0.9..1\r\n* Request by: @jlaneve\r\n* Analysed by @dimberman \r\n\r\n**Problem**\r\n\r\nAt the moment a user who wants to use the `output_file` object would need to explicitly set dependencies like this:\r\n\r\n```\r\n output_file = File(path=\"/tmp/saved_df.csv\")\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n export = aql.export_file(\r\n input_data=table,\r\n output_file=output_file,\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=output_file)\r\n export >> res_df\r\n```\r\n\r\n**Desired behaviour**\r\n\r\n```\r\n with sample_dag:\r\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\r\n exported_file = aql.export_file(\r\n input_data=table,\r\n output_file=File(path=\"/tmp/saved_df.csv\"),\r\n if_exists=\"replace\",\r\n )\r\n res_df = aql.load_file(input_file=exported_file)\r\n```\r\n\r\n**Acceptance criteria**\r\n* Change `export_file` so it returns the `File` instance, as opposed to `None`\r\n\r\nSince there is no documentation about this task, we don't need to update the documentation for it. To create documentation for this feature should be part of another issue.\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> None:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object.\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nimport pandas as pd\nfrom airflow.models import BaseOperator\nfrom airflow.models.xcom_arg import XComArg\n\nfrom astro.constants import ExportExistsStrategy\nfrom astro.databases import create_database\nfrom astro.files import File\nfrom astro.sql.table import Table\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass ExportFile(BaseOperator):\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n :param input_data: Table to convert to file\n :param output_file: File object containing the path to the file and connection id.\n :param if_exists: Overwrite file if exists. Default False.\n \"\"\"\n\n template_fields = (\"input_data\", \"output_file\")\n\n def __init__(\n self,\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_file = output_file\n self.input_data = input_data\n self.if_exists = if_exists\n self.kwargs = kwargs\n\n def execute(self, context: dict) -> File:\n \"\"\"Write SQL table to csv/parquet on local/S3/GCS.\n\n Infers SQL database type based on connection.\n \"\"\"\n # Infer db type from `input_conn_id`.\n if isinstance(self.input_data, Table):\n database = create_database(self.input_data.conn_id)\n self.input_data = database.populate_table_metadata(self.input_data)\n df = database.export_table_to_pandas_dataframe(self.input_data)\n elif isinstance(self.input_data, pd.DataFrame):\n df = self.input_data\n else:\n raise ValueError(\n f\"Expected input_table to be Table or dataframe. Got {type(self.input_data)}\"\n )\n # Write file if overwrite == True or if file doesn't exist.\n if self.if_exists == \"replace\" or not self.output_file.exists():\n self.output_file.create_from_dataframe(df)\n return self.output_file\n else:\n raise FileExistsError(f\"{self.output_file.path} file already exists.\")\n\n\ndef export_file(\n input_data: Union[Table, pd.DataFrame],\n output_file: File,\n if_exists: ExportExistsStrategy = \"exception\",\n task_id: Optional[str] = None,\n **kwargs,\n) -> XComArg:\n \"\"\"Convert SaveFile into a function. Returns XComArg.\n\n Returns an XComArg object of type File which matches the output_file parameter.\n\n This will allow users to perform further actions with the exported file.\n\n e.g.\n\n with sample_dag:\n table = aql.load_file(input_file=File(path=data_path), output_table=test_table)\n exported_file = aql.export_file(\n input_data=table,\n output_file=File(path=\"/tmp/saved_df.csv\"),\n if_exists=\"replace\",\n )\n res_df = aql.load_file(input_file=exported_file)\n\n :param output_file: Path and conn_id\n :param input_data: Input table / dataframe\n :param if_exists: Overwrite file if exists. Default \"exception\"\n :param task_id: task id, optional\n \"\"\"\n\n task_id = (\n task_id if task_id is not None else get_task_id(\"export_file\", output_file.path)\n )\n\n return ExportFile(\n task_id=task_id,\n output_file=output_file,\n input_data=input_data,\n if_exists=if_exists,\n ).output\n", "path": "src/astro/sql/operators/export_file.py"}]} | 1,415 | 395 |
gh_patches_debug_39269 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1019 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_file_stream support
node-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/files/file.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains an object that represents a Telegram File."""
20 from os.path import basename
21
22 from future.backports.urllib import parse as urllib_parse
23
24 from telegram import TelegramObject
25
26
27 class File(TelegramObject):
28 """
29 This object represents a file ready to be downloaded. The file can be downloaded with
30 :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the
31 link expires, a new one can be requested by calling getFile.
32
33 Note:
34 Maximum file size to download is 20 MB
35
36 Attributes:
37 file_id (:obj:`str`): Unique identifier for this file.
38 file_size (:obj:`str`): Optional. File size.
39 file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.
40
41 Args:
42 file_id (:obj:`str`): Unique identifier for this file.
43 file_size (:obj:`int`, optional): Optional. File size, if known.
44 file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.
45 bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.
46 **kwargs (:obj:`dict`): Arbitrary keyword arguments.
47
48 """
49
50 def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):
51 # Required
52 self.file_id = str(file_id)
53
54 # Optionals
55 self.file_size = file_size
56 self.file_path = file_path
57
58 self.bot = bot
59
60 self._id_attrs = (self.file_id,)
61
62 @classmethod
63 def de_json(cls, data, bot):
64 if not data:
65 return None
66
67 return cls(bot=bot, **data)
68
69 def download(self, custom_path=None, out=None, timeout=None):
70 """
71 Download this file. By default, the file is saved in the current working directory with its
72 original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be
73 saved to that path instead. If :attr:`out` is defined, the file contents will be saved to
74 that object using the ``out.write`` method.
75
76 Note:
77 `custom_path` and `out` are mutually exclusive.
78
79 Args:
80 custom_path (:obj:`str`, optional): Custom path.
81 out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
82 applicable.
83 timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
84 the read timeout from the server (instead of the one specified during creation of
85 the connection pool).
86
87 Raises:
88 ValueError: If both ``custom_path`` and ``out`` are passed.
89
90 """
91 if custom_path is not None and out is not None:
92 raise ValueError('custom_path and out are mutually exclusive')
93
94 # Convert any UTF-8 char into a url encoded ASCII string.
95 sres = urllib_parse.urlsplit(self.file_path)
96 url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
97 sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
98
99 if out:
100 buf = self.bot.request.retrieve(url)
101 out.write(buf)
102
103 else:
104 if custom_path:
105 filename = custom_path
106 else:
107 filename = basename(self.file_path)
108
109 self.bot.request.download(url, filename, timeout=timeout)
110
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/files/file.py b/telegram/files/file.py
--- a/telegram/files/file.py
+++ b/telegram/files/file.py
@@ -74,32 +74,34 @@
that object using the ``out.write`` method.
Note:
- `custom_path` and `out` are mutually exclusive.
+ :attr:`custom_path` and :attr:`out` are mutually exclusive.
Args:
custom_path (:obj:`str`, optional): Custom path.
- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if
- applicable.
+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for
+ writing in binary mode, if applicable.
timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as
the read timeout from the server (instead of the one specified during creation of
the connection pool).
+ Returns:
+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.
+ Otherwise, returns the filename downloaded to.
+
Raises:
- ValueError: If both ``custom_path`` and ``out`` are passed.
+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.
"""
if custom_path is not None and out is not None:
raise ValueError('custom_path and out are mutually exclusive')
# Convert any UTF-8 char into a url encoded ASCII string.
- sres = urllib_parse.urlsplit(self.file_path)
- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(
- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+ url = self._get_encoded_url()
if out:
buf = self.bot.request.retrieve(url)
out.write(buf)
-
+ return out
else:
if custom_path:
filename = custom_path
@@ -107,3 +109,27 @@
filename = basename(self.file_path)
self.bot.request.download(url, filename, timeout=timeout)
+ return filename
+
+ def _get_encoded_url(self):
+ """Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string."""
+ sres = urllib_parse.urlsplit(self.file_path)
+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(
+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))
+
+ def download_as_bytearray(self, buf=None):
+ """Download this file and return it as a bytearray.
+
+ Args:
+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.
+
+ Returns:
+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly
+ allocated :obj:`bytearray`.
+
+ """
+ if buf is None:
+ buf = bytearray()
+
+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))
+ return buf
| {"golden_diff": "diff --git a/telegram/files/file.py b/telegram/files/file.py\n--- a/telegram/files/file.py\n+++ b/telegram/files/file.py\n@@ -74,32 +74,34 @@\n that object using the ``out.write`` method.\n \n Note:\n- `custom_path` and `out` are mutually exclusive.\n+ :attr:`custom_path` and :attr:`out` are mutually exclusive.\n \n Args:\n custom_path (:obj:`str`, optional): Custom path.\n- out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n- applicable.\n+ out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for\n+ writing in binary mode, if applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n \n+ Returns:\n+ :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.\n+ Otherwise, returns the filename downloaded to.\n+\n Raises:\n- ValueError: If both ``custom_path`` and ``out`` are passed.\n+ ValueError: If both :attr:`custom_path` and :attr:`out` are passed.\n \n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n \n # Convert any UTF-8 char into a url encoded ASCII string.\n- sres = urllib_parse.urlsplit(self.file_path)\n- url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n- sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+ url = self._get_encoded_url()\n \n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n-\n+ return out\n else:\n if custom_path:\n filename = custom_path\n@@ -107,3 +109,27 @@\n filename = basename(self.file_path)\n \n self.bot.request.download(url, filename, timeout=timeout)\n+ return filename\n+\n+ def _get_encoded_url(self):\n+ \"\"\"Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.\"\"\"\n+ sres = urllib_parse.urlsplit(self.file_path)\n+ return urllib_parse.urlunsplit(urllib_parse.SplitResult(\n+ sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n+\n+ def download_as_bytearray(self, buf=None):\n+ \"\"\"Download this file and return it as a bytearray.\n+\n+ Args:\n+ buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.\n+\n+ Returns:\n+ :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly\n+ allocated :obj:`bytearray`.\n+\n+ \"\"\"\n+ if buf is None:\n+ buf = bytearray()\n+\n+ buf.extend(self.bot.request.retrieve(self._get_encoded_url()))\n+ return buf\n", "issue": "get_file_stream support\nnode-telegram-bot-api has added `getFileStream` method , which allows accessing the file stream without downloading it to a folder. It'll be useful.For example, changing a group photo from a photo uploaded by users.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram File.\"\"\"\nfrom os.path import basename\n\nfrom future.backports.urllib import parse as urllib_parse\n\nfrom telegram import TelegramObject\n\n\nclass File(TelegramObject):\n \"\"\"\n This object represents a file ready to be downloaded. The file can be downloaded with\n :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the\n link expires, a new one can be requested by calling getFile.\n\n Note:\n Maximum file size to download is 20 MB\n\n Attributes:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`str`): Optional. File size.\n file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.\n\n Args:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`int`, optional): Optional. File size, if known.\n file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.\n bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n \"\"\"\n\n def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):\n # Required\n self.file_id = str(file_id)\n\n # Optionals\n self.file_size = file_size\n self.file_path = file_path\n\n self.bot = bot\n\n self._id_attrs = (self.file_id,)\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n return cls(bot=bot, **data)\n\n def download(self, custom_path=None, out=None, timeout=None):\n \"\"\"\n Download this file. By default, the file is saved in the current working directory with its\n original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be\n saved to that path instead. If :attr:`out` is defined, the file contents will be saved to\n that object using the ``out.write`` method.\n\n Note:\n `custom_path` and `out` are mutually exclusive.\n\n Args:\n custom_path (:obj:`str`, optional): Custom path.\n out (:obj:`object`, optional): A file-like object. Must be opened in binary mode, if\n applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n\n Raises:\n ValueError: If both ``custom_path`` and ``out`` are passed.\n\n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n\n # Convert any UTF-8 char into a url encoded ASCII string.\n sres = urllib_parse.urlsplit(self.file_path)\n url = urllib_parse.urlunsplit(urllib_parse.SplitResult(\n sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n\n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n\n else:\n if custom_path:\n filename = custom_path\n else:\n filename = basename(self.file_path)\n\n self.bot.request.download(url, filename, timeout=timeout)\n", "path": "telegram/files/file.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram File.\"\"\"\nfrom os.path import basename\n\nfrom future.backports.urllib import parse as urllib_parse\n\nfrom telegram import TelegramObject\n\n\nclass File(TelegramObject):\n \"\"\"\n This object represents a file ready to be downloaded. The file can be downloaded with\n :attr:`download`. It is guaranteed that the link will be valid for at least 1 hour. When the\n link expires, a new one can be requested by calling getFile.\n\n Note:\n Maximum file size to download is 20 MB\n\n Attributes:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`str`): Optional. File size.\n file_path (:obj:`str`): Optional. File path. Use :attr:`download` to get the file.\n\n Args:\n file_id (:obj:`str`): Unique identifier for this file.\n file_size (:obj:`int`, optional): Optional. File size, if known.\n file_path (:obj:`str`, optional): File path. Use :attr:`download` to get the file.\n bot (:obj:`telegram.Bot`, optional): Bot to use with shortcut method.\n **kwargs (:obj:`dict`): Arbitrary keyword arguments.\n\n \"\"\"\n\n def __init__(self, file_id, bot=None, file_size=None, file_path=None, **kwargs):\n # Required\n self.file_id = str(file_id)\n\n # Optionals\n self.file_size = file_size\n self.file_path = file_path\n\n self.bot = bot\n\n self._id_attrs = (self.file_id,)\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n return cls(bot=bot, **data)\n\n def download(self, custom_path=None, out=None, timeout=None):\n \"\"\"\n Download this file. By default, the file is saved in the current working directory with its\n original filename as reported by Telegram. If a :attr:`custom_path` is supplied, it will be\n saved to that path instead. If :attr:`out` is defined, the file contents will be saved to\n that object using the ``out.write`` method.\n\n Note:\n :attr:`custom_path` and :attr:`out` are mutually exclusive.\n\n Args:\n custom_path (:obj:`str`, optional): Custom path.\n out (:obj:`io.BufferedWriter`, optional): A file-like object. Must be opened for\n writing in binary mode, if applicable.\n timeout (:obj:`int` | :obj:`float`, optional): If this value is specified, use it as\n the read timeout from the server (instead of the one specified during creation of\n the connection pool).\n\n Returns:\n :obj:`str` | :obj:`io.BufferedWriter`: The same object as :attr:`out` if specified.\n Otherwise, returns the filename downloaded to.\n\n Raises:\n ValueError: If both :attr:`custom_path` and :attr:`out` are passed.\n\n \"\"\"\n if custom_path is not None and out is not None:\n raise ValueError('custom_path and out are mutually exclusive')\n\n # Convert any UTF-8 char into a url encoded ASCII string.\n url = self._get_encoded_url()\n\n if out:\n buf = self.bot.request.retrieve(url)\n out.write(buf)\n return out\n else:\n if custom_path:\n filename = custom_path\n else:\n filename = basename(self.file_path)\n\n self.bot.request.download(url, filename, timeout=timeout)\n return filename\n\n def _get_encoded_url(self):\n \"\"\"Convert any UTF-8 char in :obj:`File.file_path` into a url encoded ASCII string.\"\"\"\n sres = urllib_parse.urlsplit(self.file_path)\n return urllib_parse.urlunsplit(urllib_parse.SplitResult(\n sres.scheme, sres.netloc, urllib_parse.quote(sres.path), sres.query, sres.fragment))\n\n def download_as_bytearray(self, buf=None):\n \"\"\"Download this file and return it as a bytearray.\n\n Args:\n buf (:obj:`bytearray`, optional): Extend the given bytearray with the downloaded data.\n\n Returns:\n :obj:`bytearray`: The same object as :attr:`buf` if it was specified. Otherwise a newly\n allocated :obj:`bytearray`.\n\n \"\"\"\n if buf is None:\n buf = bytearray()\n\n buf.extend(self.bot.request.retrieve(self._get_encoded_url()))\n return buf\n", "path": "telegram/files/file.py"}]} | 1,491 | 705 |
gh_patches_debug_32392 | rasdani/github-patches | git_diff | Qiskit__qiskit-2387 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`set_qiskit_logger()` is hard to use
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
A utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.
It has room for improvement, namely:
- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.
- One cannot customize the LOG_LEVEL.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/tools/logging.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Utilities for logging."""
16
17 import logging
18 from logging.config import dictConfig
19
20
21 class SimpleInfoFormatter(logging.Formatter):
22 """Custom Formatter that uses a simple format for INFO."""
23 _style_info = logging._STYLES['%'][0]('%(message)s')
24
25 def formatMessage(self, record):
26 if record.levelno == logging.INFO:
27 return self._style_info.format(record)
28 return logging.Formatter.formatMessage(self, record)
29
30
31 QISKIT_LOGGING_CONFIG = {
32 'version': 1,
33 'disable_existing_loggers': False,
34 'formatters': {
35 'f': {
36 '()': SimpleInfoFormatter,
37 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
38 },
39 },
40 'handlers': {
41 'h': {
42 'class': 'logging.StreamHandler',
43 'formatter': 'f'
44 }
45 },
46 'loggers': {
47 'qiskit': {
48 'handlers': ['h'],
49 'level': logging.INFO,
50 },
51 }
52 }
53
54
55 def set_qiskit_logger():
56 """Update 'qiskit' logger configuration using a SDK default one.
57
58 Update the configuration of the 'qiskit' logger using the default SDK
59 configuration provided by `QISKIT_LOGGING_CONFIG`:
60
61 * console logging using a custom format for levels != INFO.
62 * console logging with simple format for level INFO.
63 * set logger level to INFO.
64
65 Warning:
66 This function modifies the configuration of the standard logging system
67 for the 'qiskit.*' loggers, and might interfere with custom logger
68 configurations.
69 """
70 dictConfig(QISKIT_LOGGING_CONFIG)
71
72
73 def unset_qiskit_logger():
74 """Remove the handlers for the 'qiskit' logger."""
75 qiskit_logger = logging.getLogger('qiskit')
76 for handler in qiskit_logger.handlers:
77 qiskit_logger.removeHandler(handler)
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py
deleted file mode 100644
--- a/qiskit/tools/logging.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# This code is part of Qiskit.
-#
-# (C) Copyright IBM 2017.
-#
-# This code is licensed under the Apache License, Version 2.0. You may
-# obtain a copy of this license in the LICENSE.txt file in the root directory
-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
-#
-# Any modifications or derivative works of this code must retain this
-# copyright notice, and modified files need to carry a notice indicating
-# that they have been altered from the originals.
-
-"""Utilities for logging."""
-
-import logging
-from logging.config import dictConfig
-
-
-class SimpleInfoFormatter(logging.Formatter):
- """Custom Formatter that uses a simple format for INFO."""
- _style_info = logging._STYLES['%'][0]('%(message)s')
-
- def formatMessage(self, record):
- if record.levelno == logging.INFO:
- return self._style_info.format(record)
- return logging.Formatter.formatMessage(self, record)
-
-
-QISKIT_LOGGING_CONFIG = {
- 'version': 1,
- 'disable_existing_loggers': False,
- 'formatters': {
- 'f': {
- '()': SimpleInfoFormatter,
- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
- },
- },
- 'handlers': {
- 'h': {
- 'class': 'logging.StreamHandler',
- 'formatter': 'f'
- }
- },
- 'loggers': {
- 'qiskit': {
- 'handlers': ['h'],
- 'level': logging.INFO,
- },
- }
-}
-
-
-def set_qiskit_logger():
- """Update 'qiskit' logger configuration using a SDK default one.
-
- Update the configuration of the 'qiskit' logger using the default SDK
- configuration provided by `QISKIT_LOGGING_CONFIG`:
-
- * console logging using a custom format for levels != INFO.
- * console logging with simple format for level INFO.
- * set logger level to INFO.
-
- Warning:
- This function modifies the configuration of the standard logging system
- for the 'qiskit.*' loggers, and might interfere with custom logger
- configurations.
- """
- dictConfig(QISKIT_LOGGING_CONFIG)
-
-
-def unset_qiskit_logger():
- """Remove the handlers for the 'qiskit' logger."""
- qiskit_logger = logging.getLogger('qiskit')
- for handler in qiskit_logger.handlers:
- qiskit_logger.removeHandler(handler)
| {"golden_diff": "diff --git a/qiskit/tools/logging.py b/qiskit/tools/logging.py\ndeleted file mode 100644\n--- a/qiskit/tools/logging.py\n+++ /dev/null\n@@ -1,77 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\n-# This code is part of Qiskit.\n-#\n-# (C) Copyright IBM 2017.\n-#\n-# This code is licensed under the Apache License, Version 2.0. You may\n-# obtain a copy of this license in the LICENSE.txt file in the root directory\n-# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n-#\n-# Any modifications or derivative works of this code must retain this\n-# copyright notice, and modified files need to carry a notice indicating\n-# that they have been altered from the originals.\n-\n-\"\"\"Utilities for logging.\"\"\"\n-\n-import logging\n-from logging.config import dictConfig\n-\n-\n-class SimpleInfoFormatter(logging.Formatter):\n- \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n- _style_info = logging._STYLES['%'][0]('%(message)s')\n-\n- def formatMessage(self, record):\n- if record.levelno == logging.INFO:\n- return self._style_info.format(record)\n- return logging.Formatter.formatMessage(self, record)\n-\n-\n-QISKIT_LOGGING_CONFIG = {\n- 'version': 1,\n- 'disable_existing_loggers': False,\n- 'formatters': {\n- 'f': {\n- '()': SimpleInfoFormatter,\n- 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n- },\n- },\n- 'handlers': {\n- 'h': {\n- 'class': 'logging.StreamHandler',\n- 'formatter': 'f'\n- }\n- },\n- 'loggers': {\n- 'qiskit': {\n- 'handlers': ['h'],\n- 'level': logging.INFO,\n- },\n- }\n-}\n-\n-\n-def set_qiskit_logger():\n- \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n-\n- Update the configuration of the 'qiskit' logger using the default SDK\n- configuration provided by `QISKIT_LOGGING_CONFIG`:\n-\n- * console logging using a custom format for levels != INFO.\n- * console logging with simple format for level INFO.\n- * set logger level to INFO.\n-\n- Warning:\n- This function modifies the configuration of the standard logging system\n- for the 'qiskit.*' loggers, and might interfere with custom logger\n- configurations.\n- \"\"\"\n- dictConfig(QISKIT_LOGGING_CONFIG)\n-\n-\n-def unset_qiskit_logger():\n- \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n- qiskit_logger = logging.getLogger('qiskit')\n- for handler in qiskit_logger.handlers:\n- qiskit_logger.removeHandler(handler)\n", "issue": "`set_qiskit_logger()` is hard to use\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\nA utility method `[set_qiskit_logger()]`(https://github.com/Qiskit/qiskit-terra/blob/cac7c6a2631d76ef3e811a5c943a2b9101fed240/qiskit/_logging.py#L48) exists to log qiskit-terra's execution flow.\r\n\r\nIt has room for improvement, namely:\r\n- It turns on logging everywhere, thus generating a lot of noise. One should be able to specify a certain file to or module to log.\r\n- One cannot customize the LOG_LEVEL.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Utilities for logging.\"\"\"\n\nimport logging\nfrom logging.config import dictConfig\n\n\nclass SimpleInfoFormatter(logging.Formatter):\n \"\"\"Custom Formatter that uses a simple format for INFO.\"\"\"\n _style_info = logging._STYLES['%'][0]('%(message)s')\n\n def formatMessage(self, record):\n if record.levelno == logging.INFO:\n return self._style_info.format(record)\n return logging.Formatter.formatMessage(self, record)\n\n\nQISKIT_LOGGING_CONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'f': {\n '()': SimpleInfoFormatter,\n 'format': '%(asctime)s:%(name)s:%(levelname)s: %(message)s'\n },\n },\n 'handlers': {\n 'h': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'f'\n }\n },\n 'loggers': {\n 'qiskit': {\n 'handlers': ['h'],\n 'level': logging.INFO,\n },\n }\n}\n\n\ndef set_qiskit_logger():\n \"\"\"Update 'qiskit' logger configuration using a SDK default one.\n\n Update the configuration of the 'qiskit' logger using the default SDK\n configuration provided by `QISKIT_LOGGING_CONFIG`:\n\n * console logging using a custom format for levels != INFO.\n * console logging with simple format for level INFO.\n * set logger level to INFO.\n\n Warning:\n This function modifies the configuration of the standard logging system\n for the 'qiskit.*' loggers, and might interfere with custom logger\n configurations.\n \"\"\"\n dictConfig(QISKIT_LOGGING_CONFIG)\n\n\ndef unset_qiskit_logger():\n \"\"\"Remove the handlers for the 'qiskit' logger.\"\"\"\n qiskit_logger = logging.getLogger('qiskit')\n for handler in qiskit_logger.handlers:\n qiskit_logger.removeHandler(handler)\n", "path": "qiskit/tools/logging.py"}], "after_files": [{"content": null, "path": "qiskit/tools/logging.py"}]} | 1,142 | 656 |
gh_patches_debug_6305 | rasdani/github-patches | git_diff | scipy__scipy-5920 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"master" fails to build with MKL
Recently, when I try to build the "master" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:
``` .Python
ValueError: list.remove(x): x not in list
```
The error traces to this block of code <a href="https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.
I don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?
Setup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scipy/sparse/linalg/dsolve/setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import division, print_function, absolute_import
3
4 from os.path import join, dirname
5 import sys
6 import os
7 import glob
8
9
10 def configuration(parent_package='',top_path=None):
11 from numpy.distutils.misc_util import Configuration
12 from numpy.distutils.system_info import get_info
13 from scipy._build_utils import get_sgemv_fix
14 from scipy._build_utils import numpy_nodepr_api
15
16 config = Configuration('dsolve',parent_package,top_path)
17 config.add_data_dir('tests')
18
19 lapack_opt = get_info('lapack_opt',notfound_action=2)
20 if sys.platform == 'win32':
21 superlu_defs = [('NO_TIMER',1)]
22 else:
23 superlu_defs = []
24 superlu_defs.append(('USE_VENDOR_BLAS',1))
25
26 superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')
27
28 sources = list(glob.glob(join(superlu_src, '*.c')))
29 headers = list(glob.glob(join(superlu_src, '*.h')))
30 if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
31 # when using MSVC + MKL, lsame is already in MKL
32 sources.remove(join(superlu_src, 'lsame.c'))
33
34 config.add_library('superlu_src',
35 sources=sources,
36 macros=superlu_defs,
37 include_dirs=[superlu_src],
38 )
39
40 # Extension
41 ext_sources = ['_superlumodule.c',
42 '_superlu_utils.c',
43 '_superluobject.c']
44 ext_sources += get_sgemv_fix(lapack_opt)
45
46 config.add_extension('_superlu',
47 sources=ext_sources,
48 libraries=['superlu_src'],
49 depends=(sources + headers),
50 extra_info=lapack_opt,
51 **numpy_nodepr_api
52 )
53
54 return config
55
56 if __name__ == '__main__':
57 from numpy.distutils.core import setup
58 setup(**configuration(top_path='').todict())
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py
--- a/scipy/sparse/linalg/dsolve/setup.py
+++ b/scipy/sparse/linalg/dsolve/setup.py
@@ -27,9 +27,6 @@
sources = list(glob.glob(join(superlu_src, '*.c')))
headers = list(glob.glob(join(superlu_src, '*.h')))
- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):
- # when using MSVC + MKL, lsame is already in MKL
- sources.remove(join(superlu_src, 'lsame.c'))
config.add_library('superlu_src',
sources=sources,
| {"golden_diff": "diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py\n--- a/scipy/sparse/linalg/dsolve/setup.py\n+++ b/scipy/sparse/linalg/dsolve/setup.py\n@@ -27,9 +27,6 @@\n \n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n- if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n- # when using MSVC + MKL, lsame is already in MKL\n- sources.remove(join(superlu_src, 'lsame.c'))\n \n config.add_library('superlu_src',\n sources=sources,\n", "issue": "\"master\" fails to build with MKL\nRecently, when I try to build the \"master\" branch, I keep getting this error from `linalg/solve/dsolve/setup.py`:\n\n``` .Python\nValueError: list.remove(x): x not in list\n```\n\nThe error traces to this block of code <a href=\"https://github.com/scipy/scipy/blob/master/scipy/sparse/linalg/dsolve/setup.py#L30-L32\">here</a>, for it seems that the `lsame.c` file does not exist in the list of sources. Sure enough, when I remove this check, `scipy` builds successfully and tests pass.\n\nI don't know what happened recently that caused this setup to break (it has never happened before until now), but is anyone else getting this issue?\n\nSetup: Python 3.5.1, Windows 7, `numpy` 1.10.4, MKL\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\nimport sys\nimport os\nimport glob\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n from scipy._build_utils import get_sgemv_fix\n from scipy._build_utils import numpy_nodepr_api\n\n config = Configuration('dsolve',parent_package,top_path)\n config.add_data_dir('tests')\n\n lapack_opt = get_info('lapack_opt',notfound_action=2)\n if sys.platform == 'win32':\n superlu_defs = [('NO_TIMER',1)]\n else:\n superlu_defs = []\n superlu_defs.append(('USE_VENDOR_BLAS',1))\n\n superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')\n\n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n if os.name == 'nt' and ('FPATH' in os.environ or 'MKLROOT' in os.environ):\n # when using MSVC + MKL, lsame is already in MKL\n sources.remove(join(superlu_src, 'lsame.c'))\n\n config.add_library('superlu_src',\n sources=sources,\n macros=superlu_defs,\n include_dirs=[superlu_src],\n )\n\n # Extension\n ext_sources = ['_superlumodule.c',\n '_superlu_utils.c',\n '_superluobject.c']\n ext_sources += get_sgemv_fix(lapack_opt)\n\n config.add_extension('_superlu',\n sources=ext_sources,\n libraries=['superlu_src'],\n depends=(sources + headers),\n extra_info=lapack_opt,\n **numpy_nodepr_api\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/sparse/linalg/dsolve/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, print_function, absolute_import\n\nfrom os.path import join, dirname\nimport sys\nimport os\nimport glob\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n from scipy._build_utils import get_sgemv_fix\n from scipy._build_utils import numpy_nodepr_api\n\n config = Configuration('dsolve',parent_package,top_path)\n config.add_data_dir('tests')\n\n lapack_opt = get_info('lapack_opt',notfound_action=2)\n if sys.platform == 'win32':\n superlu_defs = [('NO_TIMER',1)]\n else:\n superlu_defs = []\n superlu_defs.append(('USE_VENDOR_BLAS',1))\n\n superlu_src = join(dirname(__file__), 'SuperLU', 'SRC')\n\n sources = list(glob.glob(join(superlu_src, '*.c')))\n headers = list(glob.glob(join(superlu_src, '*.h')))\n\n config.add_library('superlu_src',\n sources=sources,\n macros=superlu_defs,\n include_dirs=[superlu_src],\n )\n\n # Extension\n ext_sources = ['_superlumodule.c',\n '_superlu_utils.c',\n '_superluobject.c']\n ext_sources += get_sgemv_fix(lapack_opt)\n\n config.add_extension('_superlu',\n sources=ext_sources,\n libraries=['superlu_src'],\n depends=(sources + headers),\n extra_info=lapack_opt,\n **numpy_nodepr_api\n )\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n", "path": "scipy/sparse/linalg/dsolve/setup.py"}]} | 1,015 | 170 |
gh_patches_debug_624 | rasdani/github-patches | git_diff | codespell-project__codespell-89 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Makefile is broken
The makefile is no longer working, since there is no longer a codespell.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 # adapted from mne-python
4
5 import os
6 from os import path as op
7
8 try:
9 import setuptools # noqa to allow --develop
10 except Exception:
11 pass
12 from distutils.core import setup
13
14 from codespell_lib import __version__
15
16 DISTNAME = 'codespell'
17 DESCRIPTION = """Codespell"""
18 MAINTAINER = 'Lucas De Marchi'
19 MAINTAINER_EMAIL = '[email protected]'
20 URL = 'https://github.com/lucasdemarchi/codespell/'
21 LICENSE = 'GPL v2'
22 DOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'
23 with open('README.rst', 'r') as f:
24 LONG_DESCRIPTION = f.read()
25
26 if __name__ == "__main__":
27 if os.path.exists('MANIFEST'):
28 os.remove('MANIFEST')
29
30 setup(name=DISTNAME,
31 maintainer=MAINTAINER,
32 include_package_data=True,
33 maintainer_email=MAINTAINER_EMAIL,
34 description=DESCRIPTION,
35 license=LICENSE,
36 url=URL,
37 version=__version__,
38 download_url=DOWNLOAD_URL,
39 long_description=LONG_DESCRIPTION,
40 zip_safe=False,
41 classifiers=['Intended Audience :: Developers',
42 'License :: OSI Approved',
43 'Programming Language :: Python',
44 'Topic :: Software Development',
45 'Operating System :: Microsoft :: Windows',
46 'Operating System :: POSIX',
47 'Operating System :: Unix',
48 'Operating System :: MacOS'],
49 platforms='any',
50 packages=[
51 'codespell_lib', 'codespell_lib.tests',
52 'codespell_lib.data',
53 ],
54 package_data={'codespell_lib': [
55 op.join('data', 'dictionary.txt'),
56 op.join('data', 'linux-kernel.exclude'),
57 ]},
58 scripts=['bin/codespell.py'])
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,4 +55,4 @@
op.join('data', 'dictionary.txt'),
op.join('data', 'linux-kernel.exclude'),
]},
- scripts=['bin/codespell.py'])
+ scripts=['bin/codespell'])
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,4 +55,4 @@\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n- scripts=['bin/codespell.py'])\n+ scripts=['bin/codespell'])\n", "issue": "Makefile is broken\nThe makefile is no longer working, since there is no longer a codespell.py\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\nfrom os import path as op\n\ntry:\n import setuptools # noqa to allow --develop\nexcept Exception:\n pass\nfrom distutils.core import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/lucasdemarchi/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n packages=[\n 'codespell_lib', 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n scripts=['bin/codespell.py'])\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\n# adapted from mne-python\n\nimport os\nfrom os import path as op\n\ntry:\n import setuptools # noqa to allow --develop\nexcept Exception:\n pass\nfrom distutils.core import setup\n\nfrom codespell_lib import __version__\n\nDISTNAME = 'codespell'\nDESCRIPTION = \"\"\"Codespell\"\"\"\nMAINTAINER = 'Lucas De Marchi'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://github.com/lucasdemarchi/codespell/'\nLICENSE = 'GPL v2'\nDOWNLOAD_URL = 'https://github.com/lucasdemarchi/codespell/'\nwith open('README.rst', 'r') as f:\n LONG_DESCRIPTION = f.read()\n\nif __name__ == \"__main__\":\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n include_package_data=True,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=__version__,\n download_url=DOWNLOAD_URL,\n long_description=LONG_DESCRIPTION,\n zip_safe=False,\n classifiers=['Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS'],\n platforms='any',\n packages=[\n 'codespell_lib', 'codespell_lib.tests',\n 'codespell_lib.data',\n ],\n package_data={'codespell_lib': [\n op.join('data', 'dictionary.txt'),\n op.join('data', 'linux-kernel.exclude'),\n ]},\n scripts=['bin/codespell'])\n", "path": "setup.py"}]} | 779 | 76 |
gh_patches_debug_28536 | rasdani/github-patches | git_diff | systemd__mkosi-499 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include man page in Python package
When building the man page with
```
pandoc -t man -o mkosi.1 mkosi.md
```
the output looks rather unpleasing:
```
mkosi ‐ Build Legacy‐Free OS Images mkosi [options...] build
mkosi [options...] clean mkosi [options...] summary mkosi [op‐
tions...] shell [command line...] mkosi [options...] boot [com‐
mand line...] mkosi [options...] qemu mkosi is a tool for easily
building legacy‐free OS images. It’s a fancy wrapper around dnf
‐‐installroot, debootstrap, pacstrap and zypper that may generate
disk images with a number of bells and whistles. The following
output formats are supported: Raw GPT disk image, with ext4 as
[...]
```
We should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkosi/__main__.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2 # PYTHON_ARGCOMPLETE_OK
3 import os
4 import sys
5
6 from . import parse_args, complete_step, run_verb, die, MkosiException
7
8
9 try:
10 args = parse_args()
11
12 for job_name, a in args.items():
13 # Change working directory if --directory is passed
14 if a.directory:
15 work_dir = a.directory
16 if os.path.isdir(work_dir):
17 os.chdir(work_dir)
18 else:
19 die(f"Error: {work_dir} is not a directory!")
20 with complete_step(f"Processing {job_name}"):
21 run_verb(a)
22 except MkosiException:
23 sys.exit(1)
24
```
Path: `setup.py`
Content:
```
1 #!/usr/bin/python3
2 # SPDX-License-Identifier: LGPL-2.1+
3
4 from setuptools import setup, Command
5
6 class BuildManpage(Command):
7 description = ('builds the manpage')
8 user_options = []
9
10 def initialize_options(self):
11 pass
12 def finalize_options(self):
13 pass
14
15 def run(self):
16 self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
17
18
19 setup(
20 name="mkosi",
21 version="5",
22 description="Create legacy-free OS images",
23 url="https://github.com/systemd/mkosi",
24 maintainer="mkosi contributors",
25 maintainer_email="[email protected]",
26 license="LGPLv2+",
27 python_requires=">=3.6",
28 packages = ["mkosi"],
29 cmdclass = { "man": BuildManpage },
30 entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
31 )
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkosi/__main__.py b/mkosi/__main__.py
--- a/mkosi/__main__.py
+++ b/mkosi/__main__.py
@@ -6,18 +6,23 @@
from . import parse_args, complete_step, run_verb, die, MkosiException
-try:
- args = parse_args()
+def main() -> None:
+ try:
+ args = parse_args()
- for job_name, a in args.items():
- # Change working directory if --directory is passed
- if a.directory:
- work_dir = a.directory
- if os.path.isdir(work_dir):
- os.chdir(work_dir)
- else:
- die(f"Error: {work_dir} is not a directory!")
- with complete_step(f"Processing {job_name}"):
- run_verb(a)
-except MkosiException:
- sys.exit(1)
+ for job_name, a in args.items():
+ # Change working directory if --directory is passed
+ if a.directory:
+ work_dir = a.directory
+ if os.path.isdir(work_dir):
+ os.chdir(work_dir)
+ else:
+ die(f"Error: {work_dir} is not a directory!")
+ with complete_step(f"Processing {job_name}"):
+ run_verb(a)
+ except MkosiException:
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,7 +13,7 @@
pass
def run(self):
- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])
+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])
setup(
@@ -27,5 +27,6 @@
python_requires=">=3.6",
packages = ["mkosi"],
cmdclass = { "man": BuildManpage },
- entry_points = {"console_scripts": ["mkosi=mkosi.__main__"]},
+ data_files = [('share/man/man1', ["man/mkosi.1"])],
+ entry_points = {"console_scripts": ["mkosi=mkosi.__main__:main"]},
)
| {"golden_diff": "diff --git a/mkosi/__main__.py b/mkosi/__main__.py\n--- a/mkosi/__main__.py\n+++ b/mkosi/__main__.py\n@@ -6,18 +6,23 @@\n from . import parse_args, complete_step, run_verb, die, MkosiException\n \n \n-try:\n- args = parse_args()\n+def main() -> None:\n+ try:\n+ args = parse_args()\n \n- for job_name, a in args.items():\n- # Change working directory if --directory is passed\n- if a.directory:\n- work_dir = a.directory\n- if os.path.isdir(work_dir):\n- os.chdir(work_dir)\n- else:\n- die(f\"Error: {work_dir} is not a directory!\")\n- with complete_step(f\"Processing {job_name}\"):\n- run_verb(a)\n-except MkosiException:\n- sys.exit(1)\n+ for job_name, a in args.items():\n+ # Change working directory if --directory is passed\n+ if a.directory:\n+ work_dir = a.directory\n+ if os.path.isdir(work_dir):\n+ os.chdir(work_dir)\n+ else:\n+ die(f\"Error: {work_dir} is not a directory!\")\n+ with complete_step(f\"Processing {job_name}\"):\n+ run_verb(a)\n+ except MkosiException:\n+ sys.exit(1)\n+\n+\n+if __name__ == \"__main__\":\n+ main()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,7 +13,7 @@\n pass\n \n def run(self):\n- self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n+ self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])\n \n \n setup(\n@@ -27,5 +27,6 @@\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n- entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n+ data_files = [('share/man/man1', [\"man/mkosi.1\"])],\n+ entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__:main\"]},\n )\n", "issue": "Include man page in Python package\nWhen building the man page with\r\n```\r\npandoc -t man -o mkosi.1 mkosi.md\r\n```\r\nthe output looks rather unpleasing:\r\n```\r\nmkosi \u2010 Build Legacy\u2010Free OS Images mkosi [options...] build\r\nmkosi [options...] clean mkosi [options...] summary mkosi [op\u2010\r\ntions...] shell [command line...] mkosi [options...] boot [com\u2010\r\nmand line...] mkosi [options...] qemu mkosi is a tool for easily\r\nbuilding legacy\u2010free OS images. It\u2019s a fancy wrapper around dnf\r\n\u2010\u2010installroot, debootstrap, pacstrap and zypper that may generate\r\ndisk images with a number of bells and whistles. The following\r\noutput formats are supported: Raw GPT disk image, with ext4 as\r\n[...]\r\n```\r\n\r\nWe should fix that and maybe generate the man page automatically via a commit hook, so that the man page can be included (and installed) when doing a pip install.\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\nimport os\nimport sys\n\nfrom . import parse_args, complete_step, run_verb, die, MkosiException\n\n\ntry:\n args = parse_args()\n\n for job_name, a in args.items():\n # Change working directory if --directory is passed\n if a.directory:\n work_dir = a.directory\n if os.path.isdir(work_dir):\n os.chdir(work_dir)\n else:\n die(f\"Error: {work_dir} is not a directory!\")\n with complete_step(f\"Processing {job_name}\"):\n run_verb(a)\nexcept MkosiException:\n sys.exit(1)\n", "path": "mkosi/__main__.py"}, {"content": "#!/usr/bin/python3\n# SPDX-License-Identifier: LGPL-2.1+\n\nfrom setuptools import setup, Command\n\nclass BuildManpage(Command):\n description = ('builds the manpage')\n user_options = []\n\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n\n def run(self):\n self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'mkosi.1', 'mkosi.md'])\n\n\nsetup(\n name=\"mkosi\",\n version=\"5\",\n description=\"Create legacy-free OS images\",\n url=\"https://github.com/systemd/mkosi\",\n maintainer=\"mkosi contributors\",\n maintainer_email=\"[email protected]\",\n license=\"LGPLv2+\",\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__\"]},\n)\n", "path": "setup.py"}], "after_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n# PYTHON_ARGCOMPLETE_OK\nimport os\nimport sys\n\nfrom . import parse_args, complete_step, run_verb, die, MkosiException\n\n\ndef main() -> None:\n try:\n args = parse_args()\n\n for job_name, a in args.items():\n # Change working directory if --directory is passed\n if a.directory:\n work_dir = a.directory\n if os.path.isdir(work_dir):\n os.chdir(work_dir)\n else:\n die(f\"Error: {work_dir} is not a directory!\")\n with complete_step(f\"Processing {job_name}\"):\n run_verb(a)\n except MkosiException:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mkosi/__main__.py"}, {"content": "#!/usr/bin/python3\n# SPDX-License-Identifier: LGPL-2.1+\n\nfrom setuptools import setup, Command\n\nclass BuildManpage(Command):\n description = ('builds the manpage')\n user_options = []\n\n def initialize_options(self):\n pass\n def finalize_options(self):\n pass\n\n def run(self):\n self.spawn(['pandoc', '-t', 'man', '-s', '-o', 'man/mkosi.1', 'mkosi.md'])\n\n\nsetup(\n name=\"mkosi\",\n version=\"5\",\n description=\"Create legacy-free OS images\",\n url=\"https://github.com/systemd/mkosi\",\n maintainer=\"mkosi contributors\",\n maintainer_email=\"[email protected]\",\n license=\"LGPLv2+\",\n python_requires=\">=3.6\",\n packages = [\"mkosi\"],\n cmdclass = { \"man\": BuildManpage },\n data_files = [('share/man/man1', [\"man/mkosi.1\"])],\n entry_points = {\"console_scripts\": [\"mkosi=mkosi.__main__:main\"]},\n)\n", "path": "setup.py"}]} | 972 | 540 |
gh_patches_debug_419 | rasdani/github-patches | git_diff | bokeh__bokeh-8651 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
grid_axis_alignment_no_toolbar is broken in Windows (32-bit)
`examples/integration/layout/grid_axis_alignment_no_toolbar`:

All axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/integration/layout/grid_axis_alignment_no_toolbar.py`
Content:
```
1 from __future__ import absolute_import
2
3 import numpy as np
4
5 from bokeh.plotting import figure, save
6 from bokeh.layouts import gridplot
7
8 coeffs = [10**0, 10**3, 10**6, 10**9]
9 V = np.arange(10)
10
11 figs = []
12
13 for ycoeff in coeffs:
14 row = []
15 for xcoeff in coeffs:
16 fig = figure(plot_height=200, plot_width=200)
17 fig.xaxis[0].formatter.use_scientific = False
18 fig.yaxis[0].formatter.use_scientific = False
19 fig.xaxis[0].major_label_orientation = "vertical"
20 fig.yaxis[0].major_label_orientation = "horizontal"
21 fig.scatter(V*xcoeff, V*ycoeff)
22 row.append(fig)
23 figs.append(row)
24
25 grid = gridplot(figs, toolbar_location=None)
26
27 save(grid)
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py
+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py
@@ -6,7 +6,7 @@
from bokeh.layouts import gridplot
coeffs = [10**0, 10**3, 10**6, 10**9]
-V = np.arange(10)
+V = np.arange(10, dtype="int64")
figs = []
| {"golden_diff": "diff --git a/examples/integration/layout/grid_axis_alignment_no_toolbar.py b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n--- a/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n+++ b/examples/integration/layout/grid_axis_alignment_no_toolbar.py\n@@ -6,7 +6,7 @@\n from bokeh.layouts import gridplot\n \n coeffs = [10**0, 10**3, 10**6, 10**9]\n-V = np.arange(10)\n+V = np.arange(10, dtype=\"int64\")\n \n figs = []\n", "issue": "grid_axis_alignment_no_toolbar is broken in Windows (32-bit)\n`examples/integration/layout/grid_axis_alignment_no_toolbar`:\r\n\r\n\r\n\r\nAll axes should be in 0 to 10, 10^3, 10^6, 10^9 ranges. All major browsers are affected the same way. I suspect this may be due to 32-bit system architecture. It would be good if someone with 64-bit windows could run this example for comparison.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10)\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n", "path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, save\nfrom bokeh.layouts import gridplot\n\ncoeffs = [10**0, 10**3, 10**6, 10**9]\nV = np.arange(10, dtype=\"int64\")\n\nfigs = []\n\nfor ycoeff in coeffs:\n row = []\n for xcoeff in coeffs:\n fig = figure(plot_height=200, plot_width=200)\n fig.xaxis[0].formatter.use_scientific = False\n fig.yaxis[0].formatter.use_scientific = False\n fig.xaxis[0].major_label_orientation = \"vertical\"\n fig.yaxis[0].major_label_orientation = \"horizontal\"\n fig.scatter(V*xcoeff, V*ycoeff)\n row.append(fig)\n figs.append(row)\n\ngrid = gridplot(figs, toolbar_location=None)\n\nsave(grid)\n", "path": "examples/integration/layout/grid_axis_alignment_no_toolbar.py"}]} | 674 | 124 |
gh_patches_debug_17856 | rasdani/github-patches | git_diff | mdn__kuma-5636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary SQL select for react home landing page
The React landing page view extracts the default filters. Every time but it's never used.
<img width="1502" alt="Screen Shot 2019-08-13 at 2 44 39 PM" src="https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/landing/views.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django.http import HttpResponse
5 from django.shortcuts import redirect, render
6 from django.views import static
7 from django.views.decorators.cache import never_cache
8 from django.views.generic import RedirectView
9
10 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
11 from kuma.core.utils import is_wiki
12 from kuma.feeder.models import Bundle
13 from kuma.feeder.sections import SECTION_HACKS
14 from kuma.search.models import Filter
15
16 from .utils import favicon_url
17
18
19 @shared_cache_control
20 def contribute_json(request):
21 return static.serve(request, 'contribute.json', document_root=settings.ROOT)
22
23
24 @shared_cache_control
25 def home(request):
26 """Home page."""
27 if is_wiki(request):
28 return render_home(request, 'landing/homepage.html')
29 return render_home(request, 'landing/react_homepage.html')
30
31
32 def render_home(request, template_name):
33 """Render the home page with the template named "template_name"."""
34 updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
35 default_filters = Filter.objects.default_filters()
36 context = {
37 'updates': updates,
38 'default_filters': default_filters,
39 }
40 return render(request, template_name, context)
41
42
43 @ensure_wiki_domain
44 @never_cache
45 def maintenance_mode(request):
46 if settings.MAINTENANCE_MODE:
47 return render(request, 'landing/maintenance-mode.html')
48 else:
49 return redirect('home')
50
51
52 @ensure_wiki_domain
53 @shared_cache_control
54 def promote_buttons(request):
55 """Bug 646192: MDN affiliate buttons"""
56 return render(request, 'landing/promote_buttons.html')
57
58
59 ROBOTS_ALLOWED_TXT = '''\
60 User-agent: *
61 Sitemap: https://developer.mozilla.org/sitemap.xml
62
63 Disallow: /admin/
64 Disallow: /api/
65 Disallow: /*/dashboards/*
66 Disallow: /*docs/feeds
67 Disallow: /*docs/templates
68 Disallow: /*docs*Template:
69 Disallow: /*docs/all
70 Disallow: /*docs/tag*
71 Disallow: /*docs/needs-review*
72 Disallow: /*docs/localization-tag*
73 Disallow: /*docs/with-errors
74 Disallow: /*docs/without-parent
75 Disallow: /*docs/top-level
76 Disallow: /*docs/new
77 Disallow: /*docs/get-documents
78 Disallow: /*docs/submit_akismet_spam
79 Disallow: /*docs/load*
80 Disallow: /*docs/Experiment:*
81 Disallow: /*$api
82 Disallow: /*$compare
83 Disallow: /*$revision
84 Disallow: /*$history
85 Disallow: /*$children
86 Disallow: /*$flag
87 Disallow: /*$locales
88 Disallow: /*$toc
89 Disallow: /*$move
90 Disallow: /*$quick-review
91 Disallow: /*$samples
92 Disallow: /*$revert
93 Disallow: /*$repair_breadcrumbs
94 Disallow: /*$delete
95 Disallow: /*$restore
96 Disallow: /*$purge
97 Disallow: /*$subscribe
98 Disallow: /*$subscribe_to_tree
99 Disallow: /*$vote
100 Disallow: /*docs.json
101 Disallow: /*docs/ckeditor_config.js
102 Disallow: /*/files/
103 Disallow: /media
104 Disallow: /*move-requested
105 Disallow: /*preview-wiki-content
106 Disallow: /*profiles*/edit
107 Disallow: /skins
108 Disallow: /*type=feed
109 Disallow: /*users/
110 ''' + '\n'.join('Disallow: /{locale}/search'.format(locale=locale)
111 for locale in settings.ENABLED_LOCALES)
112
113 ROBOTS_GO_AWAY_TXT = '''\
114 User-Agent: *
115 Disallow: /
116 '''
117
118
119 @shared_cache_control
120 def robots_txt(request):
121 """Serve robots.txt that allows or forbids robots."""
122 host = request.get_host()
123 if host in settings.ALLOW_ROBOTS_DOMAINS:
124 robots = ""
125 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
126 robots = ROBOTS_ALLOWED_TXT
127 else:
128 robots = ROBOTS_GO_AWAY_TXT
129 return HttpResponse(robots, content_type='text/plain')
130
131
132 class FaviconRedirect(RedirectView):
133 """Redirect to the favicon in the static img folder (bug 1402497)"""
134
135 def get_redirect_url(self, *args, **kwargs):
136 return favicon_url()
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -24,19 +24,16 @@
@shared_cache_control
def home(request):
"""Home page."""
+ context = {}
+ # Need for both wiki and react homepage
+ context['updates'] = list(
+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
+
+ # The default template name
+ template_name = 'landing/react_homepage.html'
if is_wiki(request):
- return render_home(request, 'landing/homepage.html')
- return render_home(request, 'landing/react_homepage.html')
-
-
-def render_home(request, template_name):
- """Render the home page with the template named "template_name"."""
- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
- default_filters = Filter.objects.default_filters()
- context = {
- 'updates': updates,
- 'default_filters': default_filters,
- }
+ template_name = 'landing/homepage.html'
+ context['default_filters'] = Filter.objects.default_filters()
return render(request, template_name, context)
| {"golden_diff": "diff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -24,19 +24,16 @@\n @shared_cache_control\n def home(request):\n \"\"\"Home page.\"\"\"\n+ context = {}\n+ # Need for both wiki and react homepage\n+ context['updates'] = list(\n+ Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n+\n+ # The default template name\n+ template_name = 'landing/react_homepage.html'\n if is_wiki(request):\n- return render_home(request, 'landing/homepage.html')\n- return render_home(request, 'landing/react_homepage.html')\n-\n-\n-def render_home(request, template_name):\n- \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n- updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n- default_filters = Filter.objects.default_filters()\n- context = {\n- 'updates': updates,\n- 'default_filters': default_filters,\n- }\n+ template_name = 'landing/homepage.html'\n+ context['default_filters'] = Filter.objects.default_filters()\n return render(request, template_name, context)\n", "issue": "Unnecessary SQL select for react home landing page\nThe React landing page view extracts the default filters. Every time but it's never used. \r\n<img width=\"1502\" alt=\"Screen Shot 2019-08-13 at 2 44 39 PM\" src=\"https://user-images.githubusercontent.com/26739/62968514-34a08a00-bdd9-11e9-92fb-c584683c7085.png\">\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, 'contribute.json', document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n if is_wiki(request):\n return render_home(request, 'landing/homepage.html')\n return render_home(request, 'landing/react_homepage.html')\n\n\ndef render_home(request, template_name):\n \"\"\"Render the home page with the template named \"template_name\".\"\"\"\n updates = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n default_filters = Filter.objects.default_filters()\n context = {\n 'updates': updates,\n 'default_filters': default_filters,\n }\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, 'landing/maintenance-mode.html')\n else:\n return redirect('home')\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, 'landing/promote_buttons.html')\n\n\nROBOTS_ALLOWED_TXT = '''\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n''' + '\\n'.join('Disallow: /{locale}/search'.format(locale=locale)\n for locale in settings.ENABLED_LOCALES)\n\nROBOTS_GO_AWAY_TXT = '''\\\nUser-Agent: *\nDisallow: /\n'''\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type='text/plain')\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, 'contribute.json', document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n context = {}\n # Need for both wiki and react homepage\n context['updates'] = list(\n Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n\n # The default template name\n template_name = 'landing/react_homepage.html'\n if is_wiki(request):\n template_name = 'landing/homepage.html'\n context['default_filters'] = Filter.objects.default_filters()\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, 'landing/maintenance-mode.html')\n else:\n return redirect('home')\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, 'landing/promote_buttons.html')\n\n\nROBOTS_ALLOWED_TXT = '''\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n''' + '\\n'.join('Disallow: /{locale}/search'.format(locale=locale)\n for locale in settings.ENABLED_LOCALES)\n\nROBOTS_GO_AWAY_TXT = '''\\\nUser-Agent: *\nDisallow: /\n'''\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type='text/plain')\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}]} | 1,645 | 275 |
gh_patches_debug_41801 | rasdani/github-patches | git_diff | pydantic__pydantic-1620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add encoding to `read_env_file()`
# Feature Request
Output of `import pydantic.utils; print(pydantic.utils.version_info())`:
```
pydantic version: 1.5.1
pydantic compiled: True
python version: 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]
platform: Windows-7-6.1.7601-SP1
optional deps. installed: ['email-validator']
```
## Description
Hi, there's known problem on Windows with parsing dotenv files - https://github.com/pypa/pipenv/issues/1963. `python-dotenv` would parse files with default encoding (cp1251 for Cyrillic Windows). As a result we get `Лист 1` instead of `Лист 1`.
It looks like [this](https://github.com/samuelcolvin/pydantic/blob/960b24a5aab7ae0631bfbfbe0047b4d8600c6012/pydantic/env_settings.py#L111) function need to fetch encoding from `Config` class somehow.
<!-- Where possible please include a self-contained code snippet describing your feature request: -->
## Example
`.env` file (UTF-8):
```
foo=Лист 1
```
Code snippet:
```py
import pydantic
class Settings(pydantic.BaseSettings):
foo: str
class Config:
env_file_encoding = 'utf-8'
settings = Settings(_env_file='.env')
print(settings)
# foo='Лист 1'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydantic/env_settings.py`
Content:
```
1 import os
2 import warnings
3 from pathlib import Path
4 from typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union
5
6 from .fields import ModelField
7 from .main import BaseModel, Extra
8 from .typing import display_as_type
9 from .utils import deep_update, sequence_like
10
11 env_file_sentinel = str(object())
12
13
14 class SettingsError(ValueError):
15 pass
16
17
18 class BaseSettings(BaseModel):
19 """
20 Base class for settings, allowing values to be overridden by environment variables.
21
22 This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),
23 Heroku and any 12 factor app design.
24 """
25
26 def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:
27 # Uses something other than `self` the first arg to allow "self" as a settable attribute
28 super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))
29
30 def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:
31 return deep_update(self._build_environ(_env_file), init_kwargs)
32
33 def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:
34 """
35 Build environment variables suitable for passing to the Model.
36 """
37 d: Dict[str, Optional[str]] = {}
38
39 if self.__config__.case_sensitive:
40 env_vars: Mapping[str, Optional[str]] = os.environ
41 else:
42 env_vars = {k.lower(): v for k, v in os.environ.items()}
43
44 env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file
45 if env_file is not None:
46 env_path = Path(env_file)
47 if env_path.is_file():
48 env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}
49
50 for field in self.__fields__.values():
51 env_val: Optional[str] = None
52 for env_name in field.field_info.extra['env_names']:
53 env_val = env_vars.get(env_name)
54 if env_val is not None:
55 break
56
57 if env_val is None:
58 continue
59
60 if field.is_complex():
61 try:
62 env_val = self.__config__.json_loads(env_val) # type: ignore
63 except ValueError as e:
64 raise SettingsError(f'error parsing JSON for "{env_name}"') from e
65 d[field.alias] = env_val
66 return d
67
68 class Config:
69 env_prefix = ''
70 env_file = None
71 validate_all = True
72 extra = Extra.forbid
73 arbitrary_types_allowed = True
74 case_sensitive = False
75
76 @classmethod
77 def prepare_field(cls, field: ModelField) -> None:
78 env_names: Union[List[str], AbstractSet[str]]
79 env = field.field_info.extra.get('env')
80 if env is None:
81 if field.has_alias:
82 warnings.warn(
83 'aliases are no longer used by BaseSettings to define which environment variables to read. '
84 'Instead use the "env" field setting. '
85 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',
86 FutureWarning,
87 )
88 env_names = {cls.env_prefix + field.name}
89 elif isinstance(env, str):
90 env_names = {env}
91 elif isinstance(env, (set, frozenset)):
92 env_names = env
93 elif sequence_like(env):
94 env_names = list(env)
95 else:
96 raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')
97
98 if not cls.case_sensitive:
99 env_names = env_names.__class__(n.lower() for n in env_names)
100 field.field_info.extra['env_names'] = env_names
101
102 __config__: Config # type: ignore
103
104
105 def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
106 try:
107 from dotenv import dotenv_values
108 except ImportError as e:
109 raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
110
111 file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)
112 if not case_sensitive:
113 return {k.lower(): v for k, v in file_vars.items()}
114 else:
115 return file_vars
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydantic/env_settings.py b/pydantic/env_settings.py
--- a/pydantic/env_settings.py
+++ b/pydantic/env_settings.py
@@ -23,14 +23,28 @@
Heroku and any 12 factor app design.
"""
- def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:
+ def __init__(
+ __pydantic_self__,
+ _env_file: Union[Path, str, None] = env_file_sentinel,
+ _env_file_encoding: Optional[str] = None,
+ **values: Any,
+ ) -> None:
# Uses something other than `self` the first arg to allow "self" as a settable attribute
- super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))
-
- def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:
- return deep_update(self._build_environ(_env_file), init_kwargs)
-
- def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:
+ super().__init__(
+ **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)
+ )
+
+ def _build_values(
+ self,
+ init_kwargs: Dict[str, Any],
+ _env_file: Union[Path, str, None] = None,
+ _env_file_encoding: Optional[str] = None,
+ ) -> Dict[str, Any]:
+ return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)
+
+ def _build_environ(
+ self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None
+ ) -> Dict[str, Optional[str]]:
"""
Build environment variables suitable for passing to the Model.
"""
@@ -42,10 +56,16 @@
env_vars = {k.lower(): v for k, v in os.environ.items()}
env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file
+ env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding
if env_file is not None:
env_path = Path(env_file)
if env_path.is_file():
- env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}
+ env_vars = {
+ **read_env_file(
+ env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive
+ ),
+ **env_vars,
+ }
for field in self.__fields__.values():
env_val: Optional[str] = None
@@ -68,6 +88,7 @@
class Config:
env_prefix = ''
env_file = None
+ env_file_encoding = None
validate_all = True
extra = Extra.forbid
arbitrary_types_allowed = True
@@ -102,13 +123,13 @@
__config__: Config # type: ignore
-def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
+def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:
try:
from dotenv import dotenv_values
except ImportError as e:
raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e
- file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)
+ file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)
if not case_sensitive:
return {k.lower(): v for k, v in file_vars.items()}
else:
| {"golden_diff": "diff --git a/pydantic/env_settings.py b/pydantic/env_settings.py\n--- a/pydantic/env_settings.py\n+++ b/pydantic/env_settings.py\n@@ -23,14 +23,28 @@\n Heroku and any 12 factor app design.\n \"\"\"\n \n- def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:\n+ def __init__(\n+ __pydantic_self__,\n+ _env_file: Union[Path, str, None] = env_file_sentinel,\n+ _env_file_encoding: Optional[str] = None,\n+ **values: Any,\n+ ) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n- super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))\n-\n- def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:\n- return deep_update(self._build_environ(_env_file), init_kwargs)\n-\n- def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:\n+ super().__init__(\n+ **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)\n+ )\n+\n+ def _build_values(\n+ self,\n+ init_kwargs: Dict[str, Any],\n+ _env_file: Union[Path, str, None] = None,\n+ _env_file_encoding: Optional[str] = None,\n+ ) -> Dict[str, Any]:\n+ return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)\n+\n+ def _build_environ(\n+ self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None\n+ ) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n@@ -42,10 +56,16 @@\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n \n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n+ env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n- env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}\n+ env_vars = {\n+ **read_env_file(\n+ env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive\n+ ),\n+ **env_vars,\n+ }\n \n for field in self.__fields__.values():\n env_val: Optional[str] = None\n@@ -68,6 +88,7 @@\n class Config:\n env_prefix = ''\n env_file = None\n+ env_file_encoding = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n@@ -102,13 +123,13 @@\n __config__: Config # type: ignore\n \n \n-def read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n+def read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n \n- file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)\n+ file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n", "issue": "Add encoding to `read_env_file()`\n# Feature Request\r\n\r\nOutput of `import pydantic.utils; print(pydantic.utils.version_info())`:\r\n\r\n```\r\n pydantic version: 1.5.1\r\n pydantic compiled: True\r\n python version: 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\r\n platform: Windows-7-6.1.7601-SP1\r\n optional deps. installed: ['email-validator']\r\n```\r\n\r\n## Description\r\n\r\nHi, there's known problem on Windows with parsing dotenv files - https://github.com/pypa/pipenv/issues/1963. `python-dotenv` would parse files with default encoding (cp1251 for Cyrillic Windows). As a result we get `\u0420\u203a\u0420\u0451\u0421\u0403\u0421\u201a 1` instead of `\u041b\u0438\u0441\u0442 1`.\r\n\r\nIt looks like [this](https://github.com/samuelcolvin/pydantic/blob/960b24a5aab7ae0631bfbfbe0047b4d8600c6012/pydantic/env_settings.py#L111) function need to fetch encoding from `Config` class somehow.\r\n\r\n<!-- Where possible please include a self-contained code snippet describing your feature request: -->\r\n\r\n## Example\r\n\r\n`.env` file (UTF-8):\r\n\r\n```\r\nfoo=\u041b\u0438\u0441\u0442 1\r\n```\r\n\r\nCode snippet:\r\n\r\n```py\r\nimport pydantic\r\n\r\nclass Settings(pydantic.BaseSettings):\r\n foo: str\r\n\r\n class Config:\r\n env_file_encoding = 'utf-8'\r\n\r\nsettings = Settings(_env_file='.env')\r\nprint(settings)\r\n# foo='\u041b\u0438\u0441\u0442 1'\r\n```\r\n\n", "before_files": [{"content": "import os\nimport warnings\nfrom pathlib import Path\nfrom typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union\n\nfrom .fields import ModelField\nfrom .main import BaseModel, Extra\nfrom .typing import display_as_type\nfrom .utils import deep_update, sequence_like\n\nenv_file_sentinel = str(object())\n\n\nclass SettingsError(ValueError):\n pass\n\n\nclass BaseSettings(BaseModel):\n \"\"\"\n Base class for settings, allowing values to be overridden by environment variables.\n\n This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),\n Heroku and any 12 factor app design.\n \"\"\"\n\n def __init__(__pydantic_self__, _env_file: Union[Path, str, None] = env_file_sentinel, **values: Any) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n super().__init__(**__pydantic_self__._build_values(values, _env_file=_env_file))\n\n def _build_values(self, init_kwargs: Dict[str, Any], _env_file: Union[Path, str, None] = None) -> Dict[str, Any]:\n return deep_update(self._build_environ(_env_file), init_kwargs)\n\n def _build_environ(self, _env_file: Union[Path, str, None] = None) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n d: Dict[str, Optional[str]] = {}\n\n if self.__config__.case_sensitive:\n env_vars: Mapping[str, Optional[str]] = os.environ\n else:\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n\n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n env_vars = {**read_env_file(env_path, case_sensitive=self.__config__.case_sensitive), **env_vars}\n\n for field in self.__fields__.values():\n env_val: Optional[str] = None\n for env_name in field.field_info.extra['env_names']:\n env_val = env_vars.get(env_name)\n if env_val is not None:\n break\n\n if env_val is None:\n continue\n\n if field.is_complex():\n try:\n env_val = self.__config__.json_loads(env_val) # type: ignore\n except ValueError as e:\n raise SettingsError(f'error parsing JSON for \"{env_name}\"') from e\n d[field.alias] = env_val\n return d\n\n class Config:\n env_prefix = ''\n env_file = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n case_sensitive = False\n\n @classmethod\n def prepare_field(cls, field: ModelField) -> None:\n env_names: Union[List[str], AbstractSet[str]]\n env = field.field_info.extra.get('env')\n if env is None:\n if field.has_alias:\n warnings.warn(\n 'aliases are no longer used by BaseSettings to define which environment variables to read. '\n 'Instead use the \"env\" field setting. '\n 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',\n FutureWarning,\n )\n env_names = {cls.env_prefix + field.name}\n elif isinstance(env, str):\n env_names = {env}\n elif isinstance(env, (set, frozenset)):\n env_names = env\n elif sequence_like(env):\n env_names = list(env)\n else:\n raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')\n\n if not cls.case_sensitive:\n env_names = env_names.__class__(n.lower() for n in env_names)\n field.field_info.extra['env_names'] = env_names\n\n __config__: Config # type: ignore\n\n\ndef read_env_file(file_path: Path, *, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n\n file_vars: Dict[str, Optional[str]] = dotenv_values(file_path)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n return file_vars\n", "path": "pydantic/env_settings.py"}], "after_files": [{"content": "import os\nimport warnings\nfrom pathlib import Path\nfrom typing import AbstractSet, Any, Dict, List, Mapping, Optional, Union\n\nfrom .fields import ModelField\nfrom .main import BaseModel, Extra\nfrom .typing import display_as_type\nfrom .utils import deep_update, sequence_like\n\nenv_file_sentinel = str(object())\n\n\nclass SettingsError(ValueError):\n pass\n\n\nclass BaseSettings(BaseModel):\n \"\"\"\n Base class for settings, allowing values to be overridden by environment variables.\n\n This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose),\n Heroku and any 12 factor app design.\n \"\"\"\n\n def __init__(\n __pydantic_self__,\n _env_file: Union[Path, str, None] = env_file_sentinel,\n _env_file_encoding: Optional[str] = None,\n **values: Any,\n ) -> None:\n # Uses something other than `self` the first arg to allow \"self\" as a settable attribute\n super().__init__(\n **__pydantic_self__._build_values(values, _env_file=_env_file, _env_file_encoding=_env_file_encoding)\n )\n\n def _build_values(\n self,\n init_kwargs: Dict[str, Any],\n _env_file: Union[Path, str, None] = None,\n _env_file_encoding: Optional[str] = None,\n ) -> Dict[str, Any]:\n return deep_update(self._build_environ(_env_file, _env_file_encoding), init_kwargs)\n\n def _build_environ(\n self, _env_file: Union[Path, str, None] = None, _env_file_encoding: Optional[str] = None\n ) -> Dict[str, Optional[str]]:\n \"\"\"\n Build environment variables suitable for passing to the Model.\n \"\"\"\n d: Dict[str, Optional[str]] = {}\n\n if self.__config__.case_sensitive:\n env_vars: Mapping[str, Optional[str]] = os.environ\n else:\n env_vars = {k.lower(): v for k, v in os.environ.items()}\n\n env_file = _env_file if _env_file != env_file_sentinel else self.__config__.env_file\n env_file_encoding = _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding\n if env_file is not None:\n env_path = Path(env_file)\n if env_path.is_file():\n env_vars = {\n **read_env_file(\n env_path, encoding=env_file_encoding, case_sensitive=self.__config__.case_sensitive\n ),\n **env_vars,\n }\n\n for field in self.__fields__.values():\n env_val: Optional[str] = None\n for env_name in field.field_info.extra['env_names']:\n env_val = env_vars.get(env_name)\n if env_val is not None:\n break\n\n if env_val is None:\n continue\n\n if field.is_complex():\n try:\n env_val = self.__config__.json_loads(env_val) # type: ignore\n except ValueError as e:\n raise SettingsError(f'error parsing JSON for \"{env_name}\"') from e\n d[field.alias] = env_val\n return d\n\n class Config:\n env_prefix = ''\n env_file = None\n env_file_encoding = None\n validate_all = True\n extra = Extra.forbid\n arbitrary_types_allowed = True\n case_sensitive = False\n\n @classmethod\n def prepare_field(cls, field: ModelField) -> None:\n env_names: Union[List[str], AbstractSet[str]]\n env = field.field_info.extra.get('env')\n if env is None:\n if field.has_alias:\n warnings.warn(\n 'aliases are no longer used by BaseSettings to define which environment variables to read. '\n 'Instead use the \"env\" field setting. '\n 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names',\n FutureWarning,\n )\n env_names = {cls.env_prefix + field.name}\n elif isinstance(env, str):\n env_names = {env}\n elif isinstance(env, (set, frozenset)):\n env_names = env\n elif sequence_like(env):\n env_names = list(env)\n else:\n raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set')\n\n if not cls.case_sensitive:\n env_names = env_names.__class__(n.lower() for n in env_names)\n field.field_info.extra['env_names'] = env_names\n\n __config__: Config # type: ignore\n\n\ndef read_env_file(file_path: Path, *, encoding: str = None, case_sensitive: bool = False) -> Dict[str, Optional[str]]:\n try:\n from dotenv import dotenv_values\n except ImportError as e:\n raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e\n\n file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding)\n if not case_sensitive:\n return {k.lower(): v for k, v in file_vars.items()}\n else:\n return file_vars\n", "path": "pydantic/env_settings.py"}]} | 1,914 | 926 |
gh_patches_debug_9183 | rasdani/github-patches | git_diff | PaddlePaddle__models-492 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
model/text_classification 使用内置语料集训练模型后,预测时出错,具体如下
```text
I1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1
Traceback (most recent call last):
File "infer.py", line 91, in <module>
batch_size=10)
File "infer.py", line 58, in infer
for idx, item in enumerate(test_reader):
TypeError: 'function' object is not iterable
```
paddle镜像用的是https://hub.docker.com/r/paddlepaddle/paddle/tags/ latest的,model下载的最新的
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `text_classification/infer.py`
Content:
```
1 import sys
2 import os
3 import gzip
4
5 import paddle.v2 as paddle
6
7 import reader
8 from network_conf import fc_net, convolution_net
9 from utils import logger, load_dict, load_reverse_dict
10
11
12 def infer(topology, data_dir, model_path, word_dict_path, label_dict_path,
13 batch_size):
14 def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):
15 probs = inferer.infer(input=test_batch, field=["value"])
16 assert len(probs) == len(test_batch)
17 for word_ids, prob in zip(test_batch, probs):
18 word_text = " ".join([ids_2_word[id] for id in word_ids[0]])
19 print("%s\t%s\t%s" % (ids_2_label[prob.argmax()],
20 " ".join(["{:0.4f}".format(p)
21 for p in prob]), word_text))
22
23 logger.info("begin to predict...")
24 use_default_data = (data_dir is None)
25
26 if use_default_data:
27 word_dict = paddle.dataset.imdb.word_dict()
28 word_reverse_dict = dict((value, key)
29 for key, value in word_dict.iteritems())
30 label_reverse_dict = {0: "positive", 1: "negative"}
31 test_reader = paddle.dataset.imdb.test(word_dict)
32 else:
33 assert os.path.exists(
34 word_dict_path), "the word dictionary file does not exist"
35 assert os.path.exists(
36 label_dict_path), "the label dictionary file does not exist"
37
38 word_dict = load_dict(word_dict_path)
39 word_reverse_dict = load_reverse_dict(word_dict_path)
40 label_reverse_dict = load_reverse_dict(label_dict_path)
41
42 test_reader = reader.test_reader(data_dir, word_dict)()
43
44 dict_dim = len(word_dict)
45 class_num = len(label_reverse_dict)
46 prob_layer = topology(dict_dim, class_num, is_infer=True)
47
48 # initialize PaddlePaddle
49 paddle.init(use_gpu=False, trainer_count=1)
50
51 # load the trained models
52 parameters = paddle.parameters.Parameters.from_tar(
53 gzip.open(model_path, "r"))
54 inferer = paddle.inference.Inference(
55 output_layer=prob_layer, parameters=parameters)
56
57 test_batch = []
58 for idx, item in enumerate(test_reader):
59 test_batch.append([item[0]])
60 if len(test_batch) == batch_size:
61 _infer_a_batch(inferer, test_batch, word_reverse_dict,
62 label_reverse_dict)
63 test_batch = []
64
65 if len(test_batch):
66 _infer_a_batch(inferer, test_batch, word_reverse_dict,
67 label_reverse_dict)
68 test_batch = []
69
70
71 if __name__ == "__main__":
72 model_path = "models/dnn_params_pass_00000.tar.gz"
73 assert os.path.exists(model_path), "the trained model does not exist."
74
75 nn_type = "dnn"
76 test_dir = None
77 word_dict = None
78 label_dict = None
79
80 if nn_type == "dnn":
81 topology = fc_net
82 elif nn_type == "cnn":
83 topology = convolution_net
84
85 infer(
86 topology=topology,
87 data_dir=test_dir,
88 word_dict_path=word_dict,
89 label_dict_path=label_dict,
90 model_path=model_path,
91 batch_size=10)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/text_classification/infer.py b/text_classification/infer.py
--- a/text_classification/infer.py
+++ b/text_classification/infer.py
@@ -28,7 +28,7 @@
word_reverse_dict = dict((value, key)
for key, value in word_dict.iteritems())
label_reverse_dict = {0: "positive", 1: "negative"}
- test_reader = paddle.dataset.imdb.test(word_dict)
+ test_reader = paddle.dataset.imdb.test(word_dict)()
else:
assert os.path.exists(
word_dict_path), "the word dictionary file does not exist"
| {"golden_diff": "diff --git a/text_classification/infer.py b/text_classification/infer.py\n--- a/text_classification/infer.py\n+++ b/text_classification/infer.py\n@@ -28,7 +28,7 @@\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n- test_reader = paddle.dataset.imdb.test(word_dict)\n+ test_reader = paddle.dataset.imdb.test(word_dict)()\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n", "issue": "model/text_classification \u4f7f\u7528\u5185\u7f6e\u8bed\u6599\u96c6\u8bad\u7ec3\u6a21\u578b\u540e\uff0c\u9884\u6d4b\u65f6\u51fa\u9519\uff0c\u5177\u4f53\u5982\u4e0b\n```text\r\nI1123 09:23:54.320137 84 Util.cpp:166] commandline: --use_gpu=False --trainer_count=1 \r\nTraceback (most recent call last):\r\n File \"infer.py\", line 91, in <module>\r\n batch_size=10)\r\n File \"infer.py\", line 58, in infer\r\n for idx, item in enumerate(test_reader):\r\nTypeError: 'function' object is not iterable\r\n```\r\npaddle\u955c\u50cf\u7528\u7684\u662fhttps://hub.docker.com/r/paddlepaddle/paddle/tags/ latest\u7684\uff0cmodel\u4e0b\u8f7d\u7684\u6700\u65b0\u7684\n", "before_files": [{"content": "import sys\nimport os\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom network_conf import fc_net, convolution_net\nfrom utils import logger, load_dict, load_reverse_dict\n\n\ndef infer(topology, data_dir, model_path, word_dict_path, label_dict_path,\n batch_size):\n def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):\n probs = inferer.infer(input=test_batch, field=[\"value\"])\n assert len(probs) == len(test_batch)\n for word_ids, prob in zip(test_batch, probs):\n word_text = \" \".join([ids_2_word[id] for id in word_ids[0]])\n print(\"%s\\t%s\\t%s\" % (ids_2_label[prob.argmax()],\n \" \".join([\"{:0.4f}\".format(p)\n for p in prob]), word_text))\n\n logger.info(\"begin to predict...\")\n use_default_data = (data_dir is None)\n\n if use_default_data:\n word_dict = paddle.dataset.imdb.word_dict()\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n test_reader = paddle.dataset.imdb.test(word_dict)\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n assert os.path.exists(\n label_dict_path), \"the label dictionary file does not exist\"\n\n word_dict = load_dict(word_dict_path)\n word_reverse_dict = load_reverse_dict(word_dict_path)\n label_reverse_dict = load_reverse_dict(label_dict_path)\n\n test_reader = reader.test_reader(data_dir, word_dict)()\n\n dict_dim = len(word_dict)\n class_num = len(label_reverse_dict)\n prob_layer = topology(dict_dim, class_num, is_infer=True)\n\n # initialize PaddlePaddle\n paddle.init(use_gpu=False, trainer_count=1)\n\n # load the trained models\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(model_path, \"r\"))\n inferer = paddle.inference.Inference(\n output_layer=prob_layer, parameters=parameters)\n\n test_batch = []\n for idx, item in enumerate(test_reader):\n test_batch.append([item[0]])\n if len(test_batch) == batch_size:\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n if len(test_batch):\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n\nif __name__ == \"__main__\":\n model_path = \"models/dnn_params_pass_00000.tar.gz\"\n assert os.path.exists(model_path), \"the trained model does not exist.\"\n\n nn_type = \"dnn\"\n test_dir = None\n word_dict = None\n label_dict = None\n\n if nn_type == \"dnn\":\n topology = fc_net\n elif nn_type == \"cnn\":\n topology = convolution_net\n\n infer(\n topology=topology,\n data_dir=test_dir,\n word_dict_path=word_dict,\n label_dict_path=label_dict,\n model_path=model_path,\n batch_size=10)\n", "path": "text_classification/infer.py"}], "after_files": [{"content": "import sys\nimport os\nimport gzip\n\nimport paddle.v2 as paddle\n\nimport reader\nfrom network_conf import fc_net, convolution_net\nfrom utils import logger, load_dict, load_reverse_dict\n\n\ndef infer(topology, data_dir, model_path, word_dict_path, label_dict_path,\n batch_size):\n def _infer_a_batch(inferer, test_batch, ids_2_word, ids_2_label):\n probs = inferer.infer(input=test_batch, field=[\"value\"])\n assert len(probs) == len(test_batch)\n for word_ids, prob in zip(test_batch, probs):\n word_text = \" \".join([ids_2_word[id] for id in word_ids[0]])\n print(\"%s\\t%s\\t%s\" % (ids_2_label[prob.argmax()],\n \" \".join([\"{:0.4f}\".format(p)\n for p in prob]), word_text))\n\n logger.info(\"begin to predict...\")\n use_default_data = (data_dir is None)\n\n if use_default_data:\n word_dict = paddle.dataset.imdb.word_dict()\n word_reverse_dict = dict((value, key)\n for key, value in word_dict.iteritems())\n label_reverse_dict = {0: \"positive\", 1: \"negative\"}\n test_reader = paddle.dataset.imdb.test(word_dict)()\n else:\n assert os.path.exists(\n word_dict_path), \"the word dictionary file does not exist\"\n assert os.path.exists(\n label_dict_path), \"the label dictionary file does not exist\"\n\n word_dict = load_dict(word_dict_path)\n word_reverse_dict = load_reverse_dict(word_dict_path)\n label_reverse_dict = load_reverse_dict(label_dict_path)\n\n test_reader = reader.test_reader(data_dir, word_dict)()\n\n dict_dim = len(word_dict)\n class_num = len(label_reverse_dict)\n prob_layer = topology(dict_dim, class_num, is_infer=True)\n\n # initialize PaddlePaddle\n paddle.init(use_gpu=False, trainer_count=1)\n\n # load the trained models\n parameters = paddle.parameters.Parameters.from_tar(\n gzip.open(model_path, \"r\"))\n inferer = paddle.inference.Inference(\n output_layer=prob_layer, parameters=parameters)\n\n test_batch = []\n for idx, item in enumerate(test_reader):\n test_batch.append([item[0]])\n if len(test_batch) == batch_size:\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n if len(test_batch):\n _infer_a_batch(inferer, test_batch, word_reverse_dict,\n label_reverse_dict)\n test_batch = []\n\n\nif __name__ == \"__main__\":\n model_path = \"models/dnn_params_pass_00000.tar.gz\"\n assert os.path.exists(model_path), \"the trained model does not exist.\"\n\n nn_type = \"dnn\"\n test_dir = None\n word_dict = None\n label_dict = None\n\n if nn_type == \"dnn\":\n topology = fc_net\n elif nn_type == \"cnn\":\n topology = convolution_net\n\n infer(\n topology=topology,\n data_dir=test_dir,\n word_dict_path=word_dict,\n label_dict_path=label_dict,\n model_path=model_path,\n batch_size=10)\n", "path": "text_classification/infer.py"}]} | 1,314 | 132 |
gh_patches_debug_30819 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-2322 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use `show objects in schema` on Snowflake
### Describe the feature
dbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).
Caveats:
- Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.
- The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.
- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.
- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/snowflake/dbt/adapters/snowflake/impl.py`
Content:
```
1 from typing import Mapping, Any, Optional, List
2
3 import agate
4
5 from dbt.adapters.sql import SQLAdapter
6 from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
7 from dbt.adapters.snowflake import SnowflakeConnectionManager
8 from dbt.adapters.snowflake import SnowflakeRelation
9 from dbt.adapters.snowflake import SnowflakeColumn
10 from dbt.contracts.graph.manifest import Manifest
11 from dbt.exceptions import RuntimeException, DatabaseException
12 from dbt.utils import filter_null_values
13
14
15 class SnowflakeAdapter(SQLAdapter):
16 Relation = SnowflakeRelation
17 Column = SnowflakeColumn
18 ConnectionManager = SnowflakeConnectionManager
19
20 AdapterSpecificConfigs = frozenset(
21 {"transient", "cluster_by", "automatic_clustering", "secure",
22 "copy_grants", "snowflake_warehouse"}
23 )
24
25 @classmethod
26 def date_function(cls):
27 return "CURRENT_TIMESTAMP()"
28
29 @classmethod
30 def _catalog_filter_table(
31 cls, table: agate.Table, manifest: Manifest
32 ) -> agate.Table:
33 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force
34 # the column names to their lowercased forms.
35 lowered = table.rename(
36 column_names=[c.lower() for c in table.column_names]
37 )
38 return super()._catalog_filter_table(lowered, manifest)
39
40 def _make_match_kwargs(self, database, schema, identifier):
41 quoting = self.config.quoting
42 if identifier is not None and quoting["identifier"] is False:
43 identifier = identifier.upper()
44
45 if schema is not None and quoting["schema"] is False:
46 schema = schema.upper()
47
48 if database is not None and quoting["database"] is False:
49 database = database.upper()
50
51 return filter_null_values(
52 {"identifier": identifier, "schema": schema, "database": database}
53 )
54
55 def _get_warehouse(self) -> str:
56 _, table = self.execute(
57 'select current_warehouse() as warehouse',
58 fetch=True
59 )
60 if len(table) == 0 or len(table[0]) == 0:
61 # can this happen?
62 raise RuntimeException(
63 'Could not get current warehouse: no results'
64 )
65 return str(table[0][0])
66
67 def _use_warehouse(self, warehouse: str):
68 """Use the given warehouse. Quotes are never applied."""
69 self.execute('use warehouse {}'.format(warehouse))
70
71 def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:
72 default_warehouse = self.config.credentials.warehouse
73 warehouse = config.get('snowflake_warehouse', default_warehouse)
74 if warehouse == default_warehouse or warehouse is None:
75 return None
76 previous = self._get_warehouse()
77 self._use_warehouse(warehouse)
78 return previous
79
80 def post_model_hook(
81 self, config: Mapping[str, Any], context: Optional[str]
82 ) -> None:
83 if context is not None:
84 self._use_warehouse(context)
85
86 def list_schemas(self, database: str) -> List[str]:
87 try:
88 results = self.execute_macro(
89 LIST_SCHEMAS_MACRO_NAME,
90 kwargs={'database': database}
91 )
92 except DatabaseException as exc:
93 msg = (
94 f'Database error while listing schemas in database '
95 f'"{database}"\n{exc}'
96 )
97 raise RuntimeException(msg)
98 # this uses 'show terse schemas in database', and the column name we
99 # want is 'name'
100
101 return [row['name'] for row in results]
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py
--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py
@@ -3,7 +3,10 @@
import agate
from dbt.adapters.sql import SQLAdapter
-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME
+from dbt.adapters.sql.impl import (
+ LIST_SCHEMAS_MACRO_NAME,
+ LIST_RELATIONS_MACRO_NAME,
+)
from dbt.adapters.snowflake import SnowflakeConnectionManager
from dbt.adapters.snowflake import SnowflakeRelation
from dbt.adapters.snowflake import SnowflakeColumn
@@ -99,3 +102,43 @@
# want is 'name'
return [row['name'] for row in results]
+
+ def list_relations_without_caching(
+ self, information_schema, schema
+ ) -> List[SnowflakeRelation]:
+ kwargs = {'information_schema': information_schema, 'schema': schema}
+ try:
+ results = self.execute_macro(
+ LIST_RELATIONS_MACRO_NAME,
+ kwargs=kwargs
+ )
+ except DatabaseException as exc:
+ # if the schema doesn't exist, we just want to return.
+ # Alternatively, we could query the list of schemas before we start
+ # and skip listing the missing ones, which sounds expensive.
+ if 'Object does not exist' in str(exc):
+ return []
+ raise
+
+ relations = []
+ quote_policy = {
+ 'database': True,
+ 'schema': True,
+ 'identifier': True
+ }
+
+ columns = ['database_name', 'schema_name', 'name', 'kind']
+ for _database, _schema, _identifier, _type in results.select(columns):
+ try:
+ _type = self.Relation.get_relation_type(_type.lower())
+ except ValueError:
+ _type = self.Relation.External
+ relations.append(self.Relation.create(
+ database=_database,
+ schema=_schema,
+ identifier=_identifier,
+ quote_policy=quote_policy,
+ type=_type
+ ))
+
+ return relations
| {"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -3,7 +3,10 @@\n import agate\n \n from dbt.adapters.sql import SQLAdapter\n-from dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\n+from dbt.adapters.sql.impl import (\n+ LIST_SCHEMAS_MACRO_NAME,\n+ LIST_RELATIONS_MACRO_NAME,\n+)\n from dbt.adapters.snowflake import SnowflakeConnectionManager\n from dbt.adapters.snowflake import SnowflakeRelation\n from dbt.adapters.snowflake import SnowflakeColumn\n@@ -99,3 +102,43 @@\n # want is 'name'\n \n return [row['name'] for row in results]\n+\n+ def list_relations_without_caching(\n+ self, information_schema, schema\n+ ) -> List[SnowflakeRelation]:\n+ kwargs = {'information_schema': information_schema, 'schema': schema}\n+ try:\n+ results = self.execute_macro(\n+ LIST_RELATIONS_MACRO_NAME,\n+ kwargs=kwargs\n+ )\n+ except DatabaseException as exc:\n+ # if the schema doesn't exist, we just want to return.\n+ # Alternatively, we could query the list of schemas before we start\n+ # and skip listing the missing ones, which sounds expensive.\n+ if 'Object does not exist' in str(exc):\n+ return []\n+ raise\n+\n+ relations = []\n+ quote_policy = {\n+ 'database': True,\n+ 'schema': True,\n+ 'identifier': True\n+ }\n+\n+ columns = ['database_name', 'schema_name', 'name', 'kind']\n+ for _database, _schema, _identifier, _type in results.select(columns):\n+ try:\n+ _type = self.Relation.get_relation_type(_type.lower())\n+ except ValueError:\n+ _type = self.Relation.External\n+ relations.append(self.Relation.create(\n+ database=_database,\n+ schema=_schema,\n+ identifier=_identifier,\n+ quote_policy=quote_policy,\n+ type=_type\n+ ))\n+\n+ return relations\n", "issue": "Use `show objects in schema` on Snowflake\n### Describe the feature\r\ndbt should leverage the `show terse objects in schema` query to list the tables and views in a database. Crucially, `show ....` queries _do not require a running warehouse_, so they will not queue in the virtual warehouse (leading to long perceived startup times).\r\n\r\nCaveats:\r\n - Snowflake's docs indicate that only 10k records can be returned from one of these queries. I'd like to talk to someone from Snowflake to get an understanding if this limit is still accurate, or if they have near-term plans to lift this restriction.\r\n - The existing info schema query gets away with a case-insensitive lookup using `ilike, but we'll need to quote and case the schema name exactly for this query to succeed.\r\n- Per [the docs](https://docs.snowflake.net/manuals/sql-reference/sql/show-objects.html), materialized views are returned with type `VIEW`, which is kind of unpleasant.\r\n- We should post-process the results in Python instead of using `result_scan`, because selecting from `result_scan` can queue\n", "before_files": [{"content": "from typing import Mapping, Any, Optional, List\n\nimport agate\n\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import LIST_SCHEMAS_MACRO_NAME\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\",\n \"copy_grants\", \"snowflake_warehouse\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}], "after_files": [{"content": "from typing import Mapping, Any, Optional, List\n\nimport agate\n\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import (\n LIST_SCHEMAS_MACRO_NAME,\n LIST_RELATIONS_MACRO_NAME,\n)\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = frozenset(\n {\"transient\", \"cluster_by\", \"automatic_clustering\", \"secure\",\n \"copy_grants\", \"snowflake_warehouse\"}\n )\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n\n def list_relations_without_caching(\n self, information_schema, schema\n ) -> List[SnowflakeRelation]:\n kwargs = {'information_schema': information_schema, 'schema': schema}\n try:\n results = self.execute_macro(\n LIST_RELATIONS_MACRO_NAME,\n kwargs=kwargs\n )\n except DatabaseException as exc:\n # if the schema doesn't exist, we just want to return.\n # Alternatively, we could query the list of schemas before we start\n # and skip listing the missing ones, which sounds expensive.\n if 'Object does not exist' in str(exc):\n return []\n raise\n\n relations = []\n quote_policy = {\n 'database': True,\n 'schema': True,\n 'identifier': True\n }\n\n columns = ['database_name', 'schema_name', 'name', 'kind']\n for _database, _schema, _identifier, _type in results.select(columns):\n try:\n _type = self.Relation.get_relation_type(_type.lower())\n except ValueError:\n _type = self.Relation.External\n relations.append(self.Relation.create(\n database=_database,\n schema=_schema,\n identifier=_identifier,\n quote_policy=quote_policy,\n type=_type\n ))\n\n return relations\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]} | 1,471 | 513 |
gh_patches_debug_32064 | rasdani/github-patches | git_diff | apache__airflow-14274 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
upgrade_check fails db version check
**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15
**Environment**: DEV
- **Cloud provider or hardware configuration**: AWS
- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)
- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux
- **Install tools**:
- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base
**What happened**: Running `airflow upgrade_check` returns the following error:
```
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check
==================================================== STATUS ====================================================
Check for latest versions of apache-airflow and checker...............................................SUCCESS
Remove airflow.AirflowMacroPlugin class...............................................................SUCCESS
/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.
DeprecationWarning)
Ensure users are not using custom metaclasses in custom operators.....................................SUCCESS
Chain between DAG and operator not allowed............................................................SUCCESS
Connection.conn_type is not nullable..................................................................SUCCESS
Custom Executors now require full path................................................................SUCCESS
Traceback (most recent call last):
File "/home/airflow/.local/bin/airflow", line 37, in <module>
args.func(args)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 118, in run
all_problems = check_upgrade(formatter, rules)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py", line 38, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py", line 56, in check
installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
File "/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py", line 298, in __init__
raise InvalidVersion("Invalid version: '{0}'".format(version))
packaging.version.InvalidVersion: Invalid version: '5.7.26-log'
airflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$
```
**What you expected to happen**: commands runs through and prints helpful messages
<!-- What do you think went wrong? -->
Running `SELECT VERSION();').scalar()` against the metastore db returns "5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the "-log" ending?
```
mysql> select VERSION();
+------------+
| VERSION() |
+------------+
| 5.7.26-log |
+------------+
1 row in set (0.00 sec)
```
**How to reproduce it**: Run `airflow upgrade_check` again.
**Anything else we need to know**:
Dockerfile snippet:
```
FROM apache/airflow:1.10.14
...
USER ${AIRFLOW_UID}
RUN pip install --user \
airflow-kubernetes-job-operator \
apache-airflow-backport-providers-cncf-kubernetes \
apache-airflow-backport-providers-ssh \
apache-airflow-upgrade-check
```
How often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 from packaging.version import Version
19
20 from airflow.configuration import conf
21 from airflow.upgrade.rules.base_rule import BaseRule
22 from airflow.utils.db import provide_session
23
24
25 class DatabaseVersionCheckRule(BaseRule):
26 title = "Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0"
27
28 description = """\
29 From Airflow 2.0, the following database versions are supported:
30 PostgreSQl - 9.6, 10, 11, 12, 13;
31 MySQL - 5.7, 8;
32 SQLite - 3.15+
33 """
34
35 @provide_session
36 def check(self, session=None):
37
38 more_info = "See link below for more details: https://github.com/apache/airflow#requirements"
39
40 conn_str = conf.get(section="core", key="sql_alchemy_conn")
41
42 if "sqlite" in conn_str:
43 min_req_sqlite_version = Version('3.15')
44 installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
45 if installed_sqlite_version < min_req_sqlite_version:
46 return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
47
48 elif "postgres" in conn_str:
49 min_req_postgres_version = Version('9.6')
50 installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
51 if installed_postgres_version < min_req_postgres_version:
52 return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
53
54 elif "mysql" in conn_str:
55 min_req_mysql_version = Version('5.7')
56 installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
57 if installed_mysql_version < min_req_mysql_version:
58 return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
59
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py
@@ -43,16 +43,23 @@
min_req_sqlite_version = Version('3.15')
installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())
if installed_sqlite_version < min_req_sqlite_version:
- return "From Airflow 2.0, SQLite version below 3.15 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, SQLite version below {} is no longer supported. \n{}".format(
+ min_req_sqlite_version, more_info
+ )
elif "postgres" in conn_str:
min_req_postgres_version = Version('9.6')
installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())
if installed_postgres_version < min_req_postgres_version:
- return "From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, PostgreSQL version below {} is no longer supported. \n{}".format(
+ min_req_postgres_version, more_info
+ )
elif "mysql" in conn_str:
min_req_mysql_version = Version('5.7')
- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())
+ # special treatment is needed here, because MySQL version may include a suffix like '-log'
+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])
if installed_mysql_version < min_req_mysql_version:
- return "From Airflow 2.0, MySQL version below 5.7 is no longer supported. \n" + more_info
+ return "From Airflow 2.0, MySQL version below {} is no longer supported. \n{}".format(
+ min_req_mysql_version, more_info
+ )
| {"golden_diff": "diff --git a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n--- a/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n+++ b/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\n@@ -43,16 +43,23 @@\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n- return \"From Airflow 2.0, SQLite version below 3.15 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, SQLite version below {} is no longer supported. \\n{}\".format(\n+ min_req_sqlite_version, more_info\n+ )\n \n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n- return \"From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, PostgreSQL version below {} is no longer supported. \\n{}\".format(\n+ min_req_postgres_version, more_info\n+ )\n \n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n- installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\n+ # special treatment is needed here, because MySQL version may include a suffix like '-log'\n+ installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])\n if installed_mysql_version < min_req_mysql_version:\n- return \"From Airflow 2.0, MySQL version below 5.7 is no longer supported. \\n\" + more_info\n+ return \"From Airflow 2.0, MySQL version below {} is no longer supported. \\n{}\".format(\n+ min_req_mysql_version, more_info\n+ )\n", "issue": "upgrade_check fails db version check\n**Apache Airflow version**: 1.10.14 with AWS RDS mysql 5.7.26 as metastore db\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`): v1.16.15\r\n\r\n**Environment**: DEV\r\n\r\n- **Cloud provider or hardware configuration**: AWS\r\n- **OS** (e.g. from /etc/os-release): Debian GNU/Linux 10 (buster)\r\n- **Kernel** (e.g. `uname -a`): Linux airflow-scheduler-765f664c56-4bsfq 4.14.186-146.268.amzn2.x86_64 #1 SMP Tue Jul 14 18:16:52 UTC 2020 x86_64 GNU/Linux\r\n- **Install tools**: \r\n- **Others**: Running on K8S as docker container with apache/airflow:1.10.14 as base\r\n\r\n**What happened**: Running `airflow upgrade_check` returns the following error:\r\n```\r\nairflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$ airflow upgrade_check\r\n\r\n==================================================== STATUS ====================================================\r\nCheck for latest versions of apache-airflow and checker...............................................SUCCESS\r\nRemove airflow.AirflowMacroPlugin class...............................................................SUCCESS\r\n/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/helpers.py:442: DeprecationWarning: Importing 'DummyOperator' directly from 'airflow.operators' has been deprecated. Please import from 'airflow.operators.[operator_module]' instead. Support for direct imports will be dropped entirely in Airflow 2.0.\r\n DeprecationWarning)\r\nEnsure users are not using custom metaclasses in custom operators.....................................SUCCESS\r\nChain between DAG and operator not allowed............................................................SUCCESS\r\nConnection.conn_type is not nullable..................................................................SUCCESS\r\nCustom Executors now require full path................................................................SUCCESS\r\nTraceback (most recent call last):\r\n File \"/home/airflow/.local/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py\", line 118, in run\r\n all_problems = check_upgrade(formatter, rules)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/checker.py\", line 38, in check_upgrade\r\n rule_status = RuleStatus.from_rule(rule)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/problem.py\", line 44, in from_rule\r\n result = rule.check()\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/utils/db.py\", line 74, in wrapper\r\n return func(*args, **kwargs)\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py\", line 56, in check\r\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\r\n File \"/home/airflow/.local/lib/python3.6/site-packages/packaging/version.py\", line 298, in __init__\r\n raise InvalidVersion(\"Invalid version: '{0}'\".format(version))\r\npackaging.version.InvalidVersion: Invalid version: '5.7.26-log'\r\nairflow@airflow-web-54d6577c8b-g9vcn:/opt/airflow$\r\n```\r\n\r\n**What you expected to happen**: commands runs through and prints helpful messages\r\n\r\n<!-- What do you think went wrong? -->\r\nRunning `SELECT VERSION();').scalar()` against the metastore db returns \"5.7.26-log' which is possibly not a valid value for Version class `__init__` function because of the \"-log\" ending?\r\n\r\n```\r\nmysql> select VERSION();\r\n+------------+\r\n| VERSION() |\r\n+------------+\r\n| 5.7.26-log |\r\n+------------+\r\n1 row in set (0.00 sec)\r\n```\r\n\r\n**How to reproduce it**: Run `airflow upgrade_check` again.\r\n\r\n**Anything else we need to know**: \r\nDockerfile snippet:\r\n```\r\nFROM apache/airflow:1.10.14\r\n...\r\nUSER ${AIRFLOW_UID}\r\nRUN pip install --user \\\r\n airflow-kubernetes-job-operator \\\r\n apache-airflow-backport-providers-cncf-kubernetes \\\r\n apache-airflow-backport-providers-ssh \\\r\n apache-airflow-upgrade-check\r\n```\r\n\r\nHow often does this problem occur? Once? Every time etc?: Every time since last week. Has worked before.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom packaging.version import Version\n\nfrom airflow.configuration import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.db import provide_session\n\n\nclass DatabaseVersionCheckRule(BaseRule):\n title = \"Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0\"\n\n description = \"\"\"\\\nFrom Airflow 2.0, the following database versions are supported:\nPostgreSQl - 9.6, 10, 11, 12, 13;\nMySQL - 5.7, 8;\nSQLite - 3.15+\n \"\"\"\n\n @provide_session\n def check(self, session=None):\n\n more_info = \"See link below for more details: https://github.com/apache/airflow#requirements\"\n\n conn_str = conf.get(section=\"core\", key=\"sql_alchemy_conn\")\n\n if \"sqlite\" in conn_str:\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n return \"From Airflow 2.0, SQLite version below 3.15 is no longer supported. \\n\" + more_info\n\n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n return \"From Airflow 2.0, PostgreSQL version below 9.6 is no longer supported. \\n\" + more_info\n\n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar())\n if installed_mysql_version < min_req_mysql_version:\n return \"From Airflow 2.0, MySQL version below 5.7 is no longer supported. \\n\" + more_info\n", "path": "airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom packaging.version import Version\n\nfrom airflow.configuration import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.db import provide_session\n\n\nclass DatabaseVersionCheckRule(BaseRule):\n title = \"Check versions of PostgreSQL, MySQL, and SQLite to ease upgrade to Airflow 2.0\"\n\n description = \"\"\"\\\nFrom Airflow 2.0, the following database versions are supported:\nPostgreSQl - 9.6, 10, 11, 12, 13;\nMySQL - 5.7, 8;\nSQLite - 3.15+\n \"\"\"\n\n @provide_session\n def check(self, session=None):\n\n more_info = \"See link below for more details: https://github.com/apache/airflow#requirements\"\n\n conn_str = conf.get(section=\"core\", key=\"sql_alchemy_conn\")\n\n if \"sqlite\" in conn_str:\n min_req_sqlite_version = Version('3.15')\n installed_sqlite_version = Version(session.execute('select sqlite_version();').scalar())\n if installed_sqlite_version < min_req_sqlite_version:\n return \"From Airflow 2.0, SQLite version below {} is no longer supported. \\n{}\".format(\n min_req_sqlite_version, more_info\n )\n\n elif \"postgres\" in conn_str:\n min_req_postgres_version = Version('9.6')\n installed_postgres_version = Version(session.execute('SHOW server_version;').scalar())\n if installed_postgres_version < min_req_postgres_version:\n return \"From Airflow 2.0, PostgreSQL version below {} is no longer supported. \\n{}\".format(\n min_req_postgres_version, more_info\n )\n\n elif \"mysql\" in conn_str:\n min_req_mysql_version = Version('5.7')\n # special treatment is needed here, because MySQL version may include a suffix like '-log'\n installed_mysql_version = Version(session.execute('SELECT VERSION();').scalar().split('-')[0])\n if installed_mysql_version < min_req_mysql_version:\n return \"From Airflow 2.0, MySQL version below {} is no longer supported. \\n{}\".format(\n min_req_mysql_version, more_info\n )\n", "path": "airflow/upgrade/rules/postgres_mysql_sqlite_version_upgrade_check.py"}]} | 2,025 | 486 |
gh_patches_debug_12748 | rasdani/github-patches | git_diff | searx__searx-233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Requests does not follow on an HTTP 301
We had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.
Anyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.
So why isn't 301 followed ? Is there a way to force it ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/kickass.py`
Content:
```
1 ## Kickass Torrent (Videos, Music, Files)
2 #
3 # @website https://kickass.so
4 # @provide-api no (nothing found)
5 #
6 # @using-api no
7 # @results HTML (using search portal)
8 # @stable yes (HTML can change)
9 # @parse url, title, content, seed, leech, magnetlink
10
11 from urlparse import urljoin
12 from cgi import escape
13 from urllib import quote
14 from lxml import html
15 from operator import itemgetter
16 from searx.engines.xpath import extract_text
17
18 # engine dependent config
19 categories = ['videos', 'music', 'files']
20 paging = True
21
22 # search-url
23 url = 'https://kickass.so/'
24 search_url = url + 'search/{search_term}/{pageno}/'
25
26 # specific xpath variables
27 magnet_xpath = './/a[@title="Torrent magnet link"]'
28 torrent_xpath = './/a[@title="Download torrent file"]'
29 content_xpath = './/span[@class="font11px lightgrey block"]'
30
31
32 # do search-request
33 def request(query, params):
34 params['url'] = search_url.format(search_term=quote(query),
35 pageno=params['pageno'])
36
37 # FIX: SSLError: hostname 'kickass.so'
38 # doesn't match either of '*.kickass.to', 'kickass.to'
39 params['verify'] = False
40
41 return params
42
43
44 # get response from search-request
45 def response(resp):
46 results = []
47
48 dom = html.fromstring(resp.text)
49
50 search_res = dom.xpath('//table[@class="data"]//tr')
51
52 # return empty array if nothing is found
53 if not search_res:
54 return []
55
56 # parse results
57 for result in search_res[1:]:
58 link = result.xpath('.//a[@class="cellMainLink"]')[0]
59 href = urljoin(url, link.attrib['href'])
60 title = extract_text(link)
61 content = escape(extract_text(result.xpath(content_xpath)))
62 seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
63 leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
64 filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
65 filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
66 files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
67
68 # convert seed to int if possible
69 if seed.isdigit():
70 seed = int(seed)
71 else:
72 seed = 0
73
74 # convert leech to int if possible
75 if leech.isdigit():
76 leech = int(leech)
77 else:
78 leech = 0
79
80 # convert filesize to byte if possible
81 try:
82 filesize = float(filesize)
83
84 # convert filesize to byte
85 if filesize_multiplier == 'TB':
86 filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
87 elif filesize_multiplier == 'GB':
88 filesize = int(filesize * 1024 * 1024 * 1024)
89 elif filesize_multiplier == 'MB':
90 filesize = int(filesize * 1024 * 1024)
91 elif filesize_multiplier == 'KB':
92 filesize = int(filesize * 1024)
93 except:
94 filesize = None
95
96 # convert files to int if possible
97 if files.isdigit():
98 files = int(files)
99 else:
100 files = None
101
102 magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
103
104 torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
105 torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
106
107 # append result
108 results.append({'url': href,
109 'title': title,
110 'content': content,
111 'seed': seed,
112 'leech': leech,
113 'filesize': filesize,
114 'files': files,
115 'magnetlink': magnetlink,
116 'torrentfile': torrentfileurl,
117 'template': 'torrent.html'})
118
119 # return results sorted by seeder
120 return sorted(results, key=itemgetter('seed'), reverse=True)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -20,7 +20,7 @@
paging = True
# search-url
-url = 'https://kickass.so/'
+url = 'https://kickass.to/'
search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
@@ -45,6 +45,11 @@
def response(resp):
results = []
+ # check if redirect comparing to the True value,
+ # because resp can be a Mock object, and any attribut name returns something.
+ if resp.is_redirect is True:
+ return results
+
dom = html.fromstring(resp.text)
search_res = dom.xpath('//table[@class="data"]//tr')
| {"golden_diff": "diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py\n--- a/searx/engines/kickass.py\n+++ b/searx/engines/kickass.py\n@@ -20,7 +20,7 @@\n paging = True\n \n # search-url\n-url = 'https://kickass.so/'\n+url = 'https://kickass.to/'\n search_url = url + 'search/{search_term}/{pageno}/'\n \n # specific xpath variables\n@@ -45,6 +45,11 @@\n def response(resp):\n results = []\n \n+ # check if redirect comparing to the True value,\n+ # because resp can be a Mock object, and any attribut name returns something.\n+ if resp.is_redirect is True:\n+ return results\n+\n dom = html.fromstring(resp.text)\n \n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n", "issue": "Requests does not follow on an HTTP 301\nWe had already an issue when kickass redirected from https to http I seem to remember. Or it was on another engine. And the request wasn't followed so we had an empty response with status_code 301 in the engine.\n\nAnyway, that is still the case in kickass if we try to perform a search when the query contains capitalized letters.\n\nSo why isn't 301 followed ? Is there a way to force it ?\n\n", "before_files": [{"content": "## Kickass Torrent (Videos, Music, Files)\n#\n# @website https://kickass.so\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://kickass.so/'\nsearch_url = url + 'search/{search_term}/{pageno}/'\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Torrent magnet link\"]'\ntorrent_xpath = './/a[@title=\"Download torrent file\"]'\ncontent_xpath = './/span[@class=\"font11px lightgrey block\"]'\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(search_term=quote(query),\n pageno=params['pageno'])\n\n # FIX: SSLError: hostname 'kickass.so'\n # doesn't match either of '*.kickass.to', 'kickass.to'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//a[@class=\"cellMainLink\"]')[0]\n href = urljoin(url, link.attrib['href'])\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed = result.xpath('.//td[contains(@class, \"green\")]/text()')[0]\n leech = result.xpath('.//td[contains(@class, \"red\")]/text()')[0]\n filesize = result.xpath('.//td[contains(@class, \"nobr\")]/text()')[0]\n filesize_multiplier = result.xpath('.//td[contains(@class, \"nobr\")]//span/text()')[0]\n files = result.xpath('.//td[contains(@class, \"center\")][2]/text()')[0]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n # convert filesize to byte if possible\n try:\n filesize = float(filesize)\n\n # convert filesize to byte\n if filesize_multiplier == 'TB':\n filesize = int(filesize * 1024 * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'GB':\n filesize = int(filesize * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'MB':\n filesize = int(filesize * 1024 * 1024)\n elif filesize_multiplier == 'KB':\n filesize = int(filesize * 1024)\n except:\n filesize = None\n\n # convert files to int if possible\n if files.isdigit():\n files = int(files)\n else:\n files = None\n\n magnetlink = result.xpath(magnet_xpath)[0].attrib['href']\n\n torrentfile = result.xpath(torrent_xpath)[0].attrib['href']\n torrentfileurl = quote(torrentfile, safe=\"%/:=&?~#+!$,;'@()*\")\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'filesize': filesize,\n 'files': files,\n 'magnetlink': magnetlink,\n 'torrentfile': torrentfileurl,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/kickass.py"}], "after_files": [{"content": "## Kickass Torrent (Videos, Music, Files)\n#\n# @website https://kickass.so\n# @provide-api no (nothing found)\n#\n# @using-api no\n# @results HTML (using search portal)\n# @stable yes (HTML can change)\n# @parse url, title, content, seed, leech, magnetlink\n\nfrom urlparse import urljoin\nfrom cgi import escape\nfrom urllib import quote\nfrom lxml import html\nfrom operator import itemgetter\nfrom searx.engines.xpath import extract_text\n\n# engine dependent config\ncategories = ['videos', 'music', 'files']\npaging = True\n\n# search-url\nurl = 'https://kickass.to/'\nsearch_url = url + 'search/{search_term}/{pageno}/'\n\n# specific xpath variables\nmagnet_xpath = './/a[@title=\"Torrent magnet link\"]'\ntorrent_xpath = './/a[@title=\"Download torrent file\"]'\ncontent_xpath = './/span[@class=\"font11px lightgrey block\"]'\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(search_term=quote(query),\n pageno=params['pageno'])\n\n # FIX: SSLError: hostname 'kickass.so'\n # doesn't match either of '*.kickass.to', 'kickass.to'\n params['verify'] = False\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n # check if redirect comparing to the True value,\n # because resp can be a Mock object, and any attribut name returns something.\n if resp.is_redirect is True:\n return results\n\n dom = html.fromstring(resp.text)\n\n search_res = dom.xpath('//table[@class=\"data\"]//tr')\n\n # return empty array if nothing is found\n if not search_res:\n return []\n\n # parse results\n for result in search_res[1:]:\n link = result.xpath('.//a[@class=\"cellMainLink\"]')[0]\n href = urljoin(url, link.attrib['href'])\n title = extract_text(link)\n content = escape(extract_text(result.xpath(content_xpath)))\n seed = result.xpath('.//td[contains(@class, \"green\")]/text()')[0]\n leech = result.xpath('.//td[contains(@class, \"red\")]/text()')[0]\n filesize = result.xpath('.//td[contains(@class, \"nobr\")]/text()')[0]\n filesize_multiplier = result.xpath('.//td[contains(@class, \"nobr\")]//span/text()')[0]\n files = result.xpath('.//td[contains(@class, \"center\")][2]/text()')[0]\n\n # convert seed to int if possible\n if seed.isdigit():\n seed = int(seed)\n else:\n seed = 0\n\n # convert leech to int if possible\n if leech.isdigit():\n leech = int(leech)\n else:\n leech = 0\n\n # convert filesize to byte if possible\n try:\n filesize = float(filesize)\n\n # convert filesize to byte\n if filesize_multiplier == 'TB':\n filesize = int(filesize * 1024 * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'GB':\n filesize = int(filesize * 1024 * 1024 * 1024)\n elif filesize_multiplier == 'MB':\n filesize = int(filesize * 1024 * 1024)\n elif filesize_multiplier == 'KB':\n filesize = int(filesize * 1024)\n except:\n filesize = None\n\n # convert files to int if possible\n if files.isdigit():\n files = int(files)\n else:\n files = None\n\n magnetlink = result.xpath(magnet_xpath)[0].attrib['href']\n\n torrentfile = result.xpath(torrent_xpath)[0].attrib['href']\n torrentfileurl = quote(torrentfile, safe=\"%/:=&?~#+!$,;'@()*\")\n\n # append result\n results.append({'url': href,\n 'title': title,\n 'content': content,\n 'seed': seed,\n 'leech': leech,\n 'filesize': filesize,\n 'files': files,\n 'magnetlink': magnetlink,\n 'torrentfile': torrentfileurl,\n 'template': 'torrent.html'})\n\n # return results sorted by seeder\n return sorted(results, key=itemgetter('seed'), reverse=True)\n", "path": "searx/engines/kickass.py"}]} | 1,570 | 206 |
gh_patches_debug_21270 | rasdani/github-patches | git_diff | uccser__cs-unplugged-210 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add 'About' page content
Using content from existing website.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/general/urls.py`
Content:
```
1 """URL routing for the general application."""
2
3 from django.conf.urls import url
4
5 from . import views
6
7 urlpatterns = [
8 url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
9 url(r'^$', views.GeneralIndexView.as_view(), name='home'),
10 ]
11
```
Path: `csunplugged/general/views.py`
Content:
```
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = 'general/index.html'
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = 'general/about.html'
17
18
19 def health_check(request):
20 """Return heath check response for Google App Engine.
21
22 Returns a 200 HTTP response for Google App Engine to detect the system
23 is running.
24 """
25 return HttpResponse(status=200)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py
--- a/csunplugged/general/urls.py
+++ b/csunplugged/general/urls.py
@@ -6,5 +6,8 @@
urlpatterns = [
url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),
+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),
+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),
+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),
url(r'^$', views.GeneralIndexView.as_view(), name='home'),
]
diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -16,6 +16,24 @@
template_name = 'general/about.html'
+class GeneralContactView(TemplateView):
+ """View for the contact page that renders from a template."""
+
+ template_name = 'general/contact.html'
+
+
+class GeneralPeopleView(TemplateView):
+ """View for the people page that renders from a template."""
+
+ template_name = 'general/people.html'
+
+
+class GeneralPrinciplesView(TemplateView):
+ """View for the princples page that renders from a template."""
+
+ template_name = 'general/principles.html'
+
+
def health_check(request):
"""Return heath check response for Google App Engine.
| {"golden_diff": "diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py\n--- a/csunplugged/general/urls.py\n+++ b/csunplugged/general/urls.py\n@@ -6,5 +6,8 @@\n \n urlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n+ url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),\n+ url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),\n+ url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n ]\ndiff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -16,6 +16,24 @@\n template_name = 'general/about.html'\n \n \n+class GeneralContactView(TemplateView):\n+ \"\"\"View for the contact page that renders from a template.\"\"\"\n+\n+ template_name = 'general/contact.html'\n+\n+\n+class GeneralPeopleView(TemplateView):\n+ \"\"\"View for the people page that renders from a template.\"\"\"\n+\n+ template_name = 'general/people.html'\n+\n+\n+class GeneralPrinciplesView(TemplateView):\n+ \"\"\"View for the princples page that renders from a template.\"\"\"\n+\n+ template_name = 'general/principles.html'\n+\n+\n def health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n", "issue": "Add 'About' page content\nUsing content from existing website.\n", "before_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = 'general/about.html'\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}], "after_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^about/$', views.GeneralAboutView.as_view(), name='about'),\n url(r'^contact/$', views.GeneralContactView.as_view(), name='contact'),\n url(r'^people/$', views.GeneralPeopleView.as_view(), name='people'),\n url(r'^principles/$', views.GeneralPrinciplesView.as_view(), name='principles'),\n url(r'^$', views.GeneralIndexView.as_view(), name='home'),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = 'general/index.html'\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = 'general/about.html'\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = 'general/contact.html'\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = 'general/people.html'\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = 'general/principles.html'\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]} | 548 | 345 |
gh_patches_debug_12277 | rasdani/github-patches | git_diff | kserve__kserve-2899 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make poetry version plugin to update the pyproject.toml as well not just the build
/kind bug
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
Currently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml
This makes it harder to easily decipher the version of the package.
**What did you expect to happen:**
Running poetry build or install should set the version of the package in pyproject.toml
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py`
Content:
```
1 import ast
2 import subprocess
3 from pathlib import Path
4 from typing import Any, Dict, Optional
5
6 from cleo.io.io import IO
7 from poetry.plugins.plugin import Plugin
8 from poetry.poetry import Poetry
9 from poetry.core.utils.helpers import module_name
10
11
12 class VersionPlugin(Plugin): # type: ignore
13 def activate(self, poetry: Poetry, io: IO) -> None:
14 poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(
15 "tool", {}
16 ).get("poetry-version-plugin")
17 if poetry_version_config is None:
18 return
19 version_source = poetry_version_config.get("source")
20 version_file_path = poetry_version_config.get("file_path")
21 if not version_source:
22 message = (
23 "<b>poetry-version-plugin</b>: No <b>source</b> configuration found in "
24 "[tool.poetry-version-plugin] in pyproject.toml, not extracting "
25 "dynamic version"
26 )
27 io.write_error_line(message)
28 raise RuntimeError(message)
29
30 # Update version using file
31 if version_source == "file" and version_file_path:
32 with open(version_file_path, "r") as version_file:
33 version = version_file.read().strip()
34 poetry.package._set_version(version)
35 io.write_line(
36 "<b>poetry-version-plugin</b>: New package "
37 "version is updated from given file "
38 f"version is: <b>{version}</b>"
39 )
40
41 if version_source == "init":
42 packages = poetry.local_config.get("packages")
43 if packages:
44 if len(packages) == 1:
45 package_name = packages[0]["include"]
46 else:
47 message = (
48 "<b>poetry-version-plugin</b>: More than one package set, "
49 "cannot extract dynamic version"
50 )
51 io.write_error_line(message)
52 raise RuntimeError(message)
53 else:
54 package_name = module_name(poetry.package.name)
55 init_path = Path(package_name) / "__init__.py"
56 if not init_path.is_file():
57 message = (
58 "<b>poetry-version-plugin</b>: __init__.py file not found at "
59 f"{init_path} cannot extract dynamic version"
60 )
61 io.write_error_line(message)
62 raise RuntimeError(message)
63 else:
64 io.write_line(
65 "<b>poetry-version-plugin</b>: Using __init__.py file at "
66 f"{init_path} for dynamic version"
67 )
68 tree = ast.parse(init_path.read_text())
69 for el in tree.body:
70 if isinstance(el, ast.Assign):
71 if len(el.targets) == 1:
72 target = el.targets[0]
73 if isinstance(target, ast.Name):
74 if target.id == "__version__":
75 value_node = el.value
76 if isinstance(value_node, ast.Constant):
77 version = value_node.value
78 elif isinstance(value_node, ast.Str):
79 version = value_node.s
80 else: # pragma: nocover
81 # This is actually covered by tests, but can't be
82 # reported by Coverage
83 # Ref: https://github.com/nedbat/coveragepy/issues/198
84 continue
85 io.write_line(
86 "<b>poetry-version-plugin</b>: Setting package "
87 "dynamic version to __version__ "
88 f"variable from __init__.py: <b>{version}</b>"
89 )
90 poetry.package._set_version(version)
91 return
92 message = (
93 "<b>poetry-version-plugin</b>: No valid __version__ variable found "
94 "in __init__.py, cannot extract dynamic version"
95 )
96 io.write_error_line(message)
97 raise RuntimeError(message)
98 elif version_source == "git-tag":
99 result = subprocess.run(
100 ["git", "describe", "--exact-match", "--tags", "HEAD"],
101 stdin=subprocess.PIPE,
102 stdout=subprocess.PIPE,
103 universal_newlines=True,
104 )
105 if result.returncode == 0:
106 tag = result.stdout.strip()
107 io.write_line(
108 "<b>poetry-version-plugin</b>: Git tag found, setting "
109 f"dynamic version to: {tag}"
110 )
111 poetry.package._set_version(tag)
112 return
113 else:
114 message = (
115 "<b>poetry-version-plugin</b>: No Git tag found, not "
116 "extracting dynamic version"
117 )
118 io.write_error_line(message)
119 raise RuntimeError(message)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py
@@ -37,6 +37,13 @@
"version is updated from given file "
f"version is: <b>{version}</b>"
)
+ content: dict[str, Any] = poetry.file.read()
+ poetry_content = content["tool"]["poetry"]
+ poetry_content["version"] = version
+ poetry.file.write(content)
+ io.write_line(
+ "<b>poetry-version-plugin</b>: New version updated in toml file"
+ )
if version_source == "init":
packages = poetry.local_config.get("packages")
| {"golden_diff": "diff --git a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n--- a/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n+++ b/python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py\n@@ -37,6 +37,13 @@\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n+ content: dict[str, Any] = poetry.file.read()\n+ poetry_content = content[\"tool\"][\"poetry\"]\n+ poetry_content[\"version\"] = version\n+ poetry.file.write(content)\n+ io.write_line(\n+ \"<b>poetry-version-plugin</b>: New version updated in toml file\"\n+ )\n \n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n", "issue": "Make poetry version plugin to update the pyproject.toml as well not just the build\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n[A clear and concise description of what the bug is.]\r\nCurrently the poetry plugin updates the version during the build, install phases but does not update the pyproject.toml\r\nThis makes it harder to easily decipher the version of the package.\r\n\r\n**What did you expect to happen:**\r\nRunning poetry build or install should set the version of the package in pyproject.toml\r\n\r\n\n", "before_files": [{"content": "import ast\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom cleo.io.io import IO\nfrom poetry.plugins.plugin import Plugin\nfrom poetry.poetry import Poetry\nfrom poetry.core.utils.helpers import module_name\n\n\nclass VersionPlugin(Plugin): # type: ignore\n def activate(self, poetry: Poetry, io: IO) -> None:\n poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(\n \"tool\", {}\n ).get(\"poetry-version-plugin\")\n if poetry_version_config is None:\n return\n version_source = poetry_version_config.get(\"source\")\n version_file_path = poetry_version_config.get(\"file_path\")\n if not version_source:\n message = (\n \"<b>poetry-version-plugin</b>: No <b>source</b> configuration found in \"\n \"[tool.poetry-version-plugin] in pyproject.toml, not extracting \"\n \"dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n\n # Update version using file\n if version_source == \"file\" and version_file_path:\n with open(version_file_path, \"r\") as version_file:\n version = version_file.read().strip()\n poetry.package._set_version(version)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New package \"\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n\n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n if packages:\n if len(packages) == 1:\n package_name = packages[0][\"include\"]\n else:\n message = (\n \"<b>poetry-version-plugin</b>: More than one package set, \"\n \"cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n package_name = module_name(poetry.package.name)\n init_path = Path(package_name) / \"__init__.py\"\n if not init_path.is_file():\n message = (\n \"<b>poetry-version-plugin</b>: __init__.py file not found at \"\n f\"{init_path} cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n io.write_line(\n \"<b>poetry-version-plugin</b>: Using __init__.py file at \"\n f\"{init_path} for dynamic version\"\n )\n tree = ast.parse(init_path.read_text())\n for el in tree.body:\n if isinstance(el, ast.Assign):\n if len(el.targets) == 1:\n target = el.targets[0]\n if isinstance(target, ast.Name):\n if target.id == \"__version__\":\n value_node = el.value\n if isinstance(value_node, ast.Constant):\n version = value_node.value\n elif isinstance(value_node, ast.Str):\n version = value_node.s\n else: # pragma: nocover\n # This is actually covered by tests, but can't be\n # reported by Coverage\n # Ref: https://github.com/nedbat/coveragepy/issues/198\n continue\n io.write_line(\n \"<b>poetry-version-plugin</b>: Setting package \"\n \"dynamic version to __version__ \"\n f\"variable from __init__.py: <b>{version}</b>\"\n )\n poetry.package._set_version(version)\n return\n message = (\n \"<b>poetry-version-plugin</b>: No valid __version__ variable found \"\n \"in __init__.py, cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n elif version_source == \"git-tag\":\n result = subprocess.run(\n [\"git\", \"describe\", \"--exact-match\", \"--tags\", \"HEAD\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if result.returncode == 0:\n tag = result.stdout.strip()\n io.write_line(\n \"<b>poetry-version-plugin</b>: Git tag found, setting \"\n f\"dynamic version to: {tag}\"\n )\n poetry.package._set_version(tag)\n return\n else:\n message = (\n \"<b>poetry-version-plugin</b>: No Git tag found, not \"\n \"extracting dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n", "path": "python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py"}], "after_files": [{"content": "import ast\nimport subprocess\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nfrom cleo.io.io import IO\nfrom poetry.plugins.plugin import Plugin\nfrom poetry.poetry import Poetry\nfrom poetry.core.utils.helpers import module_name\n\n\nclass VersionPlugin(Plugin): # type: ignore\n def activate(self, poetry: Poetry, io: IO) -> None:\n poetry_version_config: Optional[Dict[str, Any]] = poetry.pyproject.data.get(\n \"tool\", {}\n ).get(\"poetry-version-plugin\")\n if poetry_version_config is None:\n return\n version_source = poetry_version_config.get(\"source\")\n version_file_path = poetry_version_config.get(\"file_path\")\n if not version_source:\n message = (\n \"<b>poetry-version-plugin</b>: No <b>source</b> configuration found in \"\n \"[tool.poetry-version-plugin] in pyproject.toml, not extracting \"\n \"dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n\n # Update version using file\n if version_source == \"file\" and version_file_path:\n with open(version_file_path, \"r\") as version_file:\n version = version_file.read().strip()\n poetry.package._set_version(version)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New package \"\n \"version is updated from given file \"\n f\"version is: <b>{version}</b>\"\n )\n content: dict[str, Any] = poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n poetry_content[\"version\"] = version\n poetry.file.write(content)\n io.write_line(\n \"<b>poetry-version-plugin</b>: New version updated in toml file\"\n )\n\n if version_source == \"init\":\n packages = poetry.local_config.get(\"packages\")\n if packages:\n if len(packages) == 1:\n package_name = packages[0][\"include\"]\n else:\n message = (\n \"<b>poetry-version-plugin</b>: More than one package set, \"\n \"cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n package_name = module_name(poetry.package.name)\n init_path = Path(package_name) / \"__init__.py\"\n if not init_path.is_file():\n message = (\n \"<b>poetry-version-plugin</b>: __init__.py file not found at \"\n f\"{init_path} cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n else:\n io.write_line(\n \"<b>poetry-version-plugin</b>: Using __init__.py file at \"\n f\"{init_path} for dynamic version\"\n )\n tree = ast.parse(init_path.read_text())\n for el in tree.body:\n if isinstance(el, ast.Assign):\n if len(el.targets) == 1:\n target = el.targets[0]\n if isinstance(target, ast.Name):\n if target.id == \"__version__\":\n value_node = el.value\n if isinstance(value_node, ast.Constant):\n version = value_node.value\n elif isinstance(value_node, ast.Str):\n version = value_node.s\n else: # pragma: nocover\n # This is actually covered by tests, but can't be\n # reported by Coverage\n # Ref: https://github.com/nedbat/coveragepy/issues/198\n continue\n io.write_line(\n \"<b>poetry-version-plugin</b>: Setting package \"\n \"dynamic version to __version__ \"\n f\"variable from __init__.py: <b>{version}</b>\"\n )\n poetry.package._set_version(version)\n return\n message = (\n \"<b>poetry-version-plugin</b>: No valid __version__ variable found \"\n \"in __init__.py, cannot extract dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n elif version_source == \"git-tag\":\n result = subprocess.run(\n [\"git\", \"describe\", \"--exact-match\", \"--tags\", \"HEAD\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n )\n if result.returncode == 0:\n tag = result.stdout.strip()\n io.write_line(\n \"<b>poetry-version-plugin</b>: Git tag found, setting \"\n f\"dynamic version to: {tag}\"\n )\n poetry.package._set_version(tag)\n return\n else:\n message = (\n \"<b>poetry-version-plugin</b>: No Git tag found, not \"\n \"extracting dynamic version\"\n )\n io.write_error_line(message)\n raise RuntimeError(message)\n", "path": "python/plugin/poetry-version-plugin/poetry_version_plugin/plugin.py"}]} | 1,573 | 196 |
gh_patches_debug_3921 | rasdani/github-patches | git_diff | bokeh__bokeh-5427 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
embed/animated fails with "did not find model"
`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/embed/animated.py`
Content:
```
1 """ To view this example, first start a Bokeh server:
2
3 bokeh serve --allow-websocket-origin=localhost:8000
4
5 And then load the example into the Bokeh server by
6 running the script:
7
8 python animated.py
9
10 in this directory. Finally, start a simple web server
11 by running:
12
13 python -m SimpleHTTPServer (python 2)
14
15 or
16
17 python -m http.server (python 3)
18
19 in this directory. Navigate to
20
21 http://localhost:8000/animated.html
22
23 """
24 from __future__ import print_function
25
26 from numpy import pi, cos, sin, linspace, roll
27
28 from bokeh.client import push_session
29 from bokeh.embed import autoload_server
30 from bokeh.plotting import figure, curdoc
31
32 M = 5
33 N = M*10 + 1
34 r_base = 8
35 theta = linspace(0, 2*pi, N)
36 r_x = linspace(0, 6*pi, N-1)
37 rmin = r_base - cos(r_x) - 1
38 rmax = r_base + sin(r_x) + 1
39
40 colors = ["FFFFCC", "#C7E9B4", "#7FCDBB", "#41B6C4", "#2C7FB8",
41 "#253494", "#2C7FB8", "#41B6C4", "#7FCDBB", "#C7E9B4"] * 5
42
43 # figure() function auto-adds the figure to curdoc()
44 p = figure(x_range=(-11, 11), y_range=(-11, 11))
45 r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],
46 fill_color=colors, line_color="white")
47
48 # open a session which will keep our local doc in sync with server
49 session = push_session(curdoc())
50
51 html = """
52 <html>
53 <head></head>
54 <body>
55 %s
56 </body>
57 </html>
58 """ % autoload_server(p, session_id=session.id)
59
60 with open("animated.html", "w+") as f:
61 f.write(html)
62
63 print(__doc__)
64
65 ds = r.data_source
66
67 def update():
68 rmin = roll(ds.data["inner_radius"], 1)
69 rmax = roll(ds.data["outer_radius"], -1)
70 ds.data.update(inner_radius=rmin, outer_radius=rmax)
71
72 curdoc().add_periodic_callback(update, 30)
73
74 session.loop_until_closed() # run forever
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/embed/animated.py b/examples/embed/animated.py
--- a/examples/embed/animated.py
+++ b/examples/embed/animated.py
@@ -45,6 +45,9 @@
r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],
fill_color=colors, line_color="white")
+# add the plot to curdoc
+curdoc().add_root(p)
+
# open a session which will keep our local doc in sync with server
session = push_session(curdoc())
| {"golden_diff": "diff --git a/examples/embed/animated.py b/examples/embed/animated.py\n--- a/examples/embed/animated.py\n+++ b/examples/embed/animated.py\n@@ -45,6 +45,9 @@\n r = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n \n+# add the plot to curdoc\n+curdoc().add_root(p)\n+\n # open a session which will keep our local doc in sync with server\n session = push_session(curdoc())\n", "issue": "embed/animated fails with \"did not find model\"\n`Error rendering Bokeh items Error: Did not find model ID_HERE in session`.\n\n", "before_files": [{"content": "\"\"\" To view this example, first start a Bokeh server:\n\n bokeh serve --allow-websocket-origin=localhost:8000\n\nAnd then load the example into the Bokeh server by\nrunning the script:\n\n python animated.py\n\nin this directory. Finally, start a simple web server\nby running:\n\n python -m SimpleHTTPServer (python 2)\n\nor\n\n python -m http.server (python 3)\n\nin this directory. Navigate to\n\n http://localhost:8000/animated.html\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom numpy import pi, cos, sin, linspace, roll\n\nfrom bokeh.client import push_session\nfrom bokeh.embed import autoload_server\nfrom bokeh.plotting import figure, curdoc\n\nM = 5\nN = M*10 + 1\nr_base = 8\ntheta = linspace(0, 2*pi, N)\nr_x = linspace(0, 6*pi, N-1)\nrmin = r_base - cos(r_x) - 1\nrmax = r_base + sin(r_x) + 1\n\ncolors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n# figure() function auto-adds the figure to curdoc()\np = figure(x_range=(-11, 11), y_range=(-11, 11))\nr = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n\n# open a session which will keep our local doc in sync with server\nsession = push_session(curdoc())\n\nhtml = \"\"\"\n<html>\n <head></head>\n <body>\n %s\n </body>\n</html>\n\"\"\" % autoload_server(p, session_id=session.id)\n\nwith open(\"animated.html\", \"w+\") as f:\n f.write(html)\n\nprint(__doc__)\n\nds = r.data_source\n\ndef update():\n rmin = roll(ds.data[\"inner_radius\"], 1)\n rmax = roll(ds.data[\"outer_radius\"], -1)\n ds.data.update(inner_radius=rmin, outer_radius=rmax)\n\ncurdoc().add_periodic_callback(update, 30)\n\nsession.loop_until_closed() # run forever\n", "path": "examples/embed/animated.py"}], "after_files": [{"content": "\"\"\" To view this example, first start a Bokeh server:\n\n bokeh serve --allow-websocket-origin=localhost:8000\n\nAnd then load the example into the Bokeh server by\nrunning the script:\n\n python animated.py\n\nin this directory. Finally, start a simple web server\nby running:\n\n python -m SimpleHTTPServer (python 2)\n\nor\n\n python -m http.server (python 3)\n\nin this directory. Navigate to\n\n http://localhost:8000/animated.html\n\n\"\"\"\nfrom __future__ import print_function\n\nfrom numpy import pi, cos, sin, linspace, roll\n\nfrom bokeh.client import push_session\nfrom bokeh.embed import autoload_server\nfrom bokeh.plotting import figure, curdoc\n\nM = 5\nN = M*10 + 1\nr_base = 8\ntheta = linspace(0, 2*pi, N)\nr_x = linspace(0, 6*pi, N-1)\nrmin = r_base - cos(r_x) - 1\nrmax = r_base + sin(r_x) + 1\n\ncolors = [\"FFFFCC\", \"#C7E9B4\", \"#7FCDBB\", \"#41B6C4\", \"#2C7FB8\",\n \"#253494\", \"#2C7FB8\", \"#41B6C4\", \"#7FCDBB\", \"#C7E9B4\"] * 5\n\n# figure() function auto-adds the figure to curdoc()\np = figure(x_range=(-11, 11), y_range=(-11, 11))\nr = p.annular_wedge(0, 0, rmin, rmax, theta[:-1], theta[1:],\n fill_color=colors, line_color=\"white\")\n\n# add the plot to curdoc\ncurdoc().add_root(p)\n\n# open a session which will keep our local doc in sync with server\nsession = push_session(curdoc())\n\nhtml = \"\"\"\n<html>\n <head></head>\n <body>\n %s\n </body>\n</html>\n\"\"\" % autoload_server(p, session_id=session.id)\n\nwith open(\"animated.html\", \"w+\") as f:\n f.write(html)\n\nprint(__doc__)\n\nds = r.data_source\n\ndef update():\n rmin = roll(ds.data[\"inner_radius\"], 1)\n rmax = roll(ds.data[\"outer_radius\"], -1)\n ds.data.update(inner_radius=rmin, outer_radius=rmax)\n\ncurdoc().add_periodic_callback(update, 30)\n\nsession.loop_until_closed() # run forever\n", "path": "examples/embed/animated.py"}]} | 997 | 124 |
gh_patches_debug_40839 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stop reading Cloud SDK's configuration to obtain the project ID
Instead, we should subprocess out to `gcloud config config-helper --format json`
Example output:
```
{
"configuration": {
"active_configuration": "default",
"properties": {
"core": {
"account": "[ELIDED]",
"disable_usage_reporting": "False",
"project": "[ELIDED]"
}
}
},
"credential": {
"access_token": "[ELIDED]",
"token_expiry": "2017-03-23T23:09:49Z"
},
"sentinels": {
"config_sentinel": "/Users/jonwayne/.config/gcloud/config_sentinel"
}
}
```
Note: we should *not* use the auth token provided here at all. We should keep our existing logic of reading `~/.config/gcloud/application_default_credentials.json` because those credentials are populated exclusively by `gcloud auth application-default login` and intended for Application Default Credentials.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/auth/_cloud_sdk.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Helpers for reading the Google Cloud SDK's configuration."""
16
17 import io
18 import os
19
20 import six
21 from six.moves import configparser
22
23 from google.auth import environment_vars
24 import google.oauth2.credentials
25
26 # The Google OAuth 2.0 token endpoint. Used for authorized user credentials.
27 _GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'
28
29 # The ~/.config subdirectory containing gcloud credentials.
30 _CONFIG_DIRECTORY = 'gcloud'
31 # Windows systems store config at %APPDATA%\gcloud
32 _WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'
33 # The name of the file in the Cloud SDK config that contains default
34 # credentials.
35 _CREDENTIALS_FILENAME = 'application_default_credentials.json'
36 # The config section and key for the project ID in the cloud SDK config.
37 _PROJECT_CONFIG_SECTION = 'core'
38 _PROJECT_CONFIG_KEY = 'project'
39
40
41 def get_config_path():
42 """Returns the absolute path the the Cloud SDK's configuration directory.
43
44 Returns:
45 str: The Cloud SDK config path.
46 """
47 # If the path is explicitly set, return that.
48 try:
49 return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]
50 except KeyError:
51 pass
52
53 # Non-windows systems store this at ~/.config/gcloud
54 if os.name != 'nt':
55 return os.path.join(
56 os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)
57 # Windows systems store config at %APPDATA%\gcloud
58 else:
59 try:
60 return os.path.join(
61 os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],
62 _CONFIG_DIRECTORY)
63 except KeyError:
64 # This should never happen unless someone is really
65 # messing with things, but we'll cover the case anyway.
66 drive = os.environ.get('SystemDrive', 'C:')
67 return os.path.join(
68 drive, '\\', _CONFIG_DIRECTORY)
69
70
71 def get_application_default_credentials_path():
72 """Gets the path to the application default credentials file.
73
74 The path may or may not exist.
75
76 Returns:
77 str: The full path to application default credentials.
78 """
79 config_path = get_config_path()
80 return os.path.join(config_path, _CREDENTIALS_FILENAME)
81
82
83 def _get_active_config(config_path):
84 """Gets the active config for the Cloud SDK.
85
86 Args:
87 config_path (str): The Cloud SDK's config path.
88
89 Returns:
90 str: The active configuration name.
91 """
92 active_config_filename = os.path.join(config_path, 'active_config')
93
94 if not os.path.isfile(active_config_filename):
95 return 'default'
96
97 with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:
98 active_config_name = file_obj.read().strip()
99
100 return active_config_name
101
102
103 def _get_config_file(config_path, config_name):
104 """Returns the full path to a configuration's config file.
105
106 Args:
107 config_path (str): The Cloud SDK's config path.
108 config_name (str): The configuration name.
109
110 Returns:
111 str: The config file path.
112 """
113 return os.path.join(
114 config_path, 'configurations', 'config_{}'.format(config_name))
115
116
117 def get_project_id():
118 """Gets the project ID from the Cloud SDK's configuration.
119
120 Returns:
121 Optional[str]: The project ID.
122 """
123 config_path = get_config_path()
124 active_config = _get_active_config(config_path)
125 config_file = _get_config_file(config_path, active_config)
126
127 if not os.path.isfile(config_file):
128 return None
129
130 config = configparser.RawConfigParser()
131
132 try:
133 config.read(config_file)
134
135 if config.has_section(_PROJECT_CONFIG_SECTION):
136 return config.get(
137 _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)
138
139 except configparser.Error:
140 return None
141
142
143 def load_authorized_user_credentials(info):
144 """Loads an authorized user credential.
145
146 Args:
147 info (Mapping[str, str]): The loaded file's data.
148
149 Returns:
150 google.oauth2.credentials.Credentials: The constructed credentials.
151
152 Raises:
153 ValueError: if the info is in the wrong format or missing data.
154 """
155 keys_needed = set(('refresh_token', 'client_id', 'client_secret'))
156 missing = keys_needed.difference(six.iterkeys(info))
157
158 if missing:
159 raise ValueError(
160 'Authorized user info was not in the expected format, missing '
161 'fields {}.'.format(', '.join(missing)))
162
163 return google.oauth2.credentials.Credentials(
164 None, # No access token, must be refreshed.
165 refresh_token=info['refresh_token'],
166 token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,
167 client_id=info['client_id'],
168 client_secret=info['client_secret'])
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py
--- a/google/auth/_cloud_sdk.py
+++ b/google/auth/_cloud_sdk.py
@@ -14,11 +14,11 @@
"""Helpers for reading the Google Cloud SDK's configuration."""
-import io
+import json
import os
+import subprocess
import six
-from six.moves import configparser
from google.auth import environment_vars
import google.oauth2.credentials
@@ -33,9 +33,9 @@
# The name of the file in the Cloud SDK config that contains default
# credentials.
_CREDENTIALS_FILENAME = 'application_default_credentials.json'
-# The config section and key for the project ID in the cloud SDK config.
-_PROJECT_CONFIG_SECTION = 'core'
-_PROJECT_CONFIG_KEY = 'project'
+# The command to get the Cloud SDK configuration
+_CLOUD_SDK_CONFIG_COMMAND = (
+ 'gcloud', 'config', 'config-helper', '--format', 'json')
def get_config_path():
@@ -80,66 +80,6 @@
return os.path.join(config_path, _CREDENTIALS_FILENAME)
-def _get_active_config(config_path):
- """Gets the active config for the Cloud SDK.
-
- Args:
- config_path (str): The Cloud SDK's config path.
-
- Returns:
- str: The active configuration name.
- """
- active_config_filename = os.path.join(config_path, 'active_config')
-
- if not os.path.isfile(active_config_filename):
- return 'default'
-
- with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:
- active_config_name = file_obj.read().strip()
-
- return active_config_name
-
-
-def _get_config_file(config_path, config_name):
- """Returns the full path to a configuration's config file.
-
- Args:
- config_path (str): The Cloud SDK's config path.
- config_name (str): The configuration name.
-
- Returns:
- str: The config file path.
- """
- return os.path.join(
- config_path, 'configurations', 'config_{}'.format(config_name))
-
-
-def get_project_id():
- """Gets the project ID from the Cloud SDK's configuration.
-
- Returns:
- Optional[str]: The project ID.
- """
- config_path = get_config_path()
- active_config = _get_active_config(config_path)
- config_file = _get_config_file(config_path, active_config)
-
- if not os.path.isfile(config_file):
- return None
-
- config = configparser.RawConfigParser()
-
- try:
- config.read(config_file)
-
- if config.has_section(_PROJECT_CONFIG_SECTION):
- return config.get(
- _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)
-
- except configparser.Error:
- return None
-
-
def load_authorized_user_credentials(info):
"""Loads an authorized user credential.
@@ -166,3 +106,28 @@
token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,
client_id=info['client_id'],
client_secret=info['client_secret'])
+
+
+def get_project_id():
+ """Gets the project ID from the Cloud SDK.
+
+ Returns:
+ Optional[str]: The project ID.
+ """
+
+ try:
+ output = subprocess.check_output(
+ _CLOUD_SDK_CONFIG_COMMAND,
+ stderr=subprocess.STDOUT)
+ except (subprocess.CalledProcessError, OSError, IOError):
+ return None
+
+ try:
+ configuration = json.loads(output.decode('utf-8'))
+ except ValueError:
+ return None
+
+ try:
+ return configuration['configuration']['properties']['core']['project']
+ except KeyError:
+ return None
| {"golden_diff": "diff --git a/google/auth/_cloud_sdk.py b/google/auth/_cloud_sdk.py\n--- a/google/auth/_cloud_sdk.py\n+++ b/google/auth/_cloud_sdk.py\n@@ -14,11 +14,11 @@\n \n \"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n \n-import io\n+import json\n import os\n+import subprocess\n \n import six\n-from six.moves import configparser\n \n from google.auth import environment_vars\n import google.oauth2.credentials\n@@ -33,9 +33,9 @@\n # The name of the file in the Cloud SDK config that contains default\n # credentials.\n _CREDENTIALS_FILENAME = 'application_default_credentials.json'\n-# The config section and key for the project ID in the cloud SDK config.\n-_PROJECT_CONFIG_SECTION = 'core'\n-_PROJECT_CONFIG_KEY = 'project'\n+# The command to get the Cloud SDK configuration\n+_CLOUD_SDK_CONFIG_COMMAND = (\n+ 'gcloud', 'config', 'config-helper', '--format', 'json')\n \n \n def get_config_path():\n@@ -80,66 +80,6 @@\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n \n \n-def _get_active_config(config_path):\n- \"\"\"Gets the active config for the Cloud SDK.\n-\n- Args:\n- config_path (str): The Cloud SDK's config path.\n-\n- Returns:\n- str: The active configuration name.\n- \"\"\"\n- active_config_filename = os.path.join(config_path, 'active_config')\n-\n- if not os.path.isfile(active_config_filename):\n- return 'default'\n-\n- with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n- active_config_name = file_obj.read().strip()\n-\n- return active_config_name\n-\n-\n-def _get_config_file(config_path, config_name):\n- \"\"\"Returns the full path to a configuration's config file.\n-\n- Args:\n- config_path (str): The Cloud SDK's config path.\n- config_name (str): The configuration name.\n-\n- Returns:\n- str: The config file path.\n- \"\"\"\n- return os.path.join(\n- config_path, 'configurations', 'config_{}'.format(config_name))\n-\n-\n-def get_project_id():\n- \"\"\"Gets the project ID from the Cloud SDK's configuration.\n-\n- Returns:\n- Optional[str]: The project ID.\n- \"\"\"\n- config_path = get_config_path()\n- active_config = _get_active_config(config_path)\n- config_file = _get_config_file(config_path, active_config)\n-\n- if not os.path.isfile(config_file):\n- return None\n-\n- config = configparser.RawConfigParser()\n-\n- try:\n- config.read(config_file)\n-\n- if config.has_section(_PROJECT_CONFIG_SECTION):\n- return config.get(\n- _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n-\n- except configparser.Error:\n- return None\n-\n-\n def load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n \n@@ -166,3 +106,28 @@\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n+\n+\n+def get_project_id():\n+ \"\"\"Gets the project ID from the Cloud SDK.\n+\n+ Returns:\n+ Optional[str]: The project ID.\n+ \"\"\"\n+\n+ try:\n+ output = subprocess.check_output(\n+ _CLOUD_SDK_CONFIG_COMMAND,\n+ stderr=subprocess.STDOUT)\n+ except (subprocess.CalledProcessError, OSError, IOError):\n+ return None\n+\n+ try:\n+ configuration = json.loads(output.decode('utf-8'))\n+ except ValueError:\n+ return None\n+\n+ try:\n+ return configuration['configuration']['properties']['core']['project']\n+ except KeyError:\n+ return None\n", "issue": "Stop reading Cloud SDK's configuration to obtain the project ID\nInstead, we should subprocess out to `gcloud config config-helper --format json`\r\n\r\nExample output:\r\n\r\n```\r\n{\r\n \"configuration\": {\r\n \"active_configuration\": \"default\",\r\n \"properties\": {\r\n \"core\": {\r\n \"account\": \"[ELIDED]\",\r\n \"disable_usage_reporting\": \"False\",\r\n \"project\": \"[ELIDED]\"\r\n }\r\n }\r\n },\r\n \"credential\": {\r\n \"access_token\": \"[ELIDED]\",\r\n \"token_expiry\": \"2017-03-23T23:09:49Z\"\r\n },\r\n \"sentinels\": {\r\n \"config_sentinel\": \"/Users/jonwayne/.config/gcloud/config_sentinel\"\r\n }\r\n}\r\n```\r\n\r\nNote: we should *not* use the auth token provided here at all. We should keep our existing logic of reading `~/.config/gcloud/application_default_credentials.json` because those credentials are populated exclusively by `gcloud auth application-default login` and intended for Application Default Credentials.\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport io\nimport os\n\nimport six\nfrom six.moves import configparser\n\nfrom google.auth import environment_vars\nimport google.oauth2.credentials\n\n# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.\n_GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = 'gcloud'\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = 'application_default_credentials.json'\n# The config section and key for the project ID in the cloud SDK config.\n_PROJECT_CONFIG_SECTION = 'core'\n_PROJECT_CONFIG_KEY = 'project'\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != 'nt':\n return os.path.join(\n os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],\n _CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get('SystemDrive', 'C:')\n return os.path.join(\n drive, '\\\\', _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef _get_active_config(config_path):\n \"\"\"Gets the active config for the Cloud SDK.\n\n Args:\n config_path (str): The Cloud SDK's config path.\n\n Returns:\n str: The active configuration name.\n \"\"\"\n active_config_filename = os.path.join(config_path, 'active_config')\n\n if not os.path.isfile(active_config_filename):\n return 'default'\n\n with io.open(active_config_filename, 'r', encoding='utf-8') as file_obj:\n active_config_name = file_obj.read().strip()\n\n return active_config_name\n\n\ndef _get_config_file(config_path, config_name):\n \"\"\"Returns the full path to a configuration's config file.\n\n Args:\n config_path (str): The Cloud SDK's config path.\n config_name (str): The configuration name.\n\n Returns:\n str: The config file path.\n \"\"\"\n return os.path.join(\n config_path, 'configurations', 'config_{}'.format(config_name))\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK's configuration.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n config_path = get_config_path()\n active_config = _get_active_config(config_path)\n config_file = _get_config_file(config_path, active_config)\n\n if not os.path.isfile(config_file):\n return None\n\n config = configparser.RawConfigParser()\n\n try:\n config.read(config_file)\n\n if config.has_section(_PROJECT_CONFIG_SECTION):\n return config.get(\n _PROJECT_CONFIG_SECTION, _PROJECT_CONFIG_KEY)\n\n except configparser.Error:\n return None\n\n\ndef load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n\n Args:\n info (Mapping[str, str]): The loaded file's data.\n\n Returns:\n google.oauth2.credentials.Credentials: The constructed credentials.\n\n Raises:\n ValueError: if the info is in the wrong format or missing data.\n \"\"\"\n keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n missing = keys_needed.difference(six.iterkeys(info))\n\n if missing:\n raise ValueError(\n 'Authorized user info was not in the expected format, missing '\n 'fields {}.'.format(', '.join(missing)))\n\n return google.oauth2.credentials.Credentials(\n None, # No access token, must be refreshed.\n refresh_token=info['refresh_token'],\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n", "path": "google/auth/_cloud_sdk.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Helpers for reading the Google Cloud SDK's configuration.\"\"\"\n\nimport json\nimport os\nimport subprocess\n\nimport six\n\nfrom google.auth import environment_vars\nimport google.oauth2.credentials\n\n# The Google OAuth 2.0 token endpoint. Used for authorized user credentials.\n_GOOGLE_OAUTH2_TOKEN_ENDPOINT = 'https://accounts.google.com/o/oauth2/token'\n\n# The ~/.config subdirectory containing gcloud credentials.\n_CONFIG_DIRECTORY = 'gcloud'\n# Windows systems store config at %APPDATA%\\gcloud\n_WINDOWS_CONFIG_ROOT_ENV_VAR = 'APPDATA'\n# The name of the file in the Cloud SDK config that contains default\n# credentials.\n_CREDENTIALS_FILENAME = 'application_default_credentials.json'\n# The command to get the Cloud SDK configuration\n_CLOUD_SDK_CONFIG_COMMAND = (\n 'gcloud', 'config', 'config-helper', '--format', 'json')\n\n\ndef get_config_path():\n \"\"\"Returns the absolute path the the Cloud SDK's configuration directory.\n\n Returns:\n str: The Cloud SDK config path.\n \"\"\"\n # If the path is explicitly set, return that.\n try:\n return os.environ[environment_vars.CLOUD_SDK_CONFIG_DIR]\n except KeyError:\n pass\n\n # Non-windows systems store this at ~/.config/gcloud\n if os.name != 'nt':\n return os.path.join(\n os.path.expanduser('~'), '.config', _CONFIG_DIRECTORY)\n # Windows systems store config at %APPDATA%\\gcloud\n else:\n try:\n return os.path.join(\n os.environ[_WINDOWS_CONFIG_ROOT_ENV_VAR],\n _CONFIG_DIRECTORY)\n except KeyError:\n # This should never happen unless someone is really\n # messing with things, but we'll cover the case anyway.\n drive = os.environ.get('SystemDrive', 'C:')\n return os.path.join(\n drive, '\\\\', _CONFIG_DIRECTORY)\n\n\ndef get_application_default_credentials_path():\n \"\"\"Gets the path to the application default credentials file.\n\n The path may or may not exist.\n\n Returns:\n str: The full path to application default credentials.\n \"\"\"\n config_path = get_config_path()\n return os.path.join(config_path, _CREDENTIALS_FILENAME)\n\n\ndef load_authorized_user_credentials(info):\n \"\"\"Loads an authorized user credential.\n\n Args:\n info (Mapping[str, str]): The loaded file's data.\n\n Returns:\n google.oauth2.credentials.Credentials: The constructed credentials.\n\n Raises:\n ValueError: if the info is in the wrong format or missing data.\n \"\"\"\n keys_needed = set(('refresh_token', 'client_id', 'client_secret'))\n missing = keys_needed.difference(six.iterkeys(info))\n\n if missing:\n raise ValueError(\n 'Authorized user info was not in the expected format, missing '\n 'fields {}.'.format(', '.join(missing)))\n\n return google.oauth2.credentials.Credentials(\n None, # No access token, must be refreshed.\n refresh_token=info['refresh_token'],\n token_uri=_GOOGLE_OAUTH2_TOKEN_ENDPOINT,\n client_id=info['client_id'],\n client_secret=info['client_secret'])\n\n\ndef get_project_id():\n \"\"\"Gets the project ID from the Cloud SDK.\n\n Returns:\n Optional[str]: The project ID.\n \"\"\"\n\n try:\n output = subprocess.check_output(\n _CLOUD_SDK_CONFIG_COMMAND,\n stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError, IOError):\n return None\n\n try:\n configuration = json.loads(output.decode('utf-8'))\n except ValueError:\n return None\n\n try:\n return configuration['configuration']['properties']['core']['project']\n except KeyError:\n return None\n", "path": "google/auth/_cloud_sdk.py"}]} | 2,033 | 837 |
gh_patches_debug_23289 | rasdani/github-patches | git_diff | fossasia__open-event-server-6100 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Speakers are queried by role in after event mail cron job but no such role exists
Currently, speakers are being queried under the assumption that there is a role called speaker, no such role exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/helpers/scheduled_jobs.py`
Content:
```
1 import datetime
2
3 import pytz
4 from dateutil.relativedelta import relativedelta
5
6 from app.api.helpers.db import safe_query, save_to_db
7 from app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \
8 send_followup_email_for_monthly_fee_payment
9 from app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \
10 send_notif_after_event
11 from app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name
12 from app.api.helpers.utilities import monthdelta
13 from app.models import db
14 from app.models.event import Event
15 from app.models.event_invoice import EventInvoice
16 from app.models.order import Order
17 from app.models.session import Session
18 from app.models.ticket import Ticket
19 from app.models.ticket_fee import get_fee
20 from app.settings import get_settings
21
22
23 def send_after_event_mail():
24 from app import current_app as app
25 with app.app_context():
26 events = Event.query.all()
27 upcoming_events = get_upcoming_events()
28 upcoming_event_links = "<ul>"
29 for upcoming_event in upcoming_events:
30 frontend_url = get_settings()['frontend_url']
31 upcoming_event_links += "<li><a href='{}/events/{}'>{}</a></li>" \
32 .format(frontend_url, upcoming_event.id, upcoming_event.name)
33 upcoming_event_links += "</ul>"
34 for event in events:
35 organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
36 speakers = get_user_event_roles_by_role_name(event.id, 'speaker')
37 current_time = datetime.datetime.now(pytz.timezone(event.timezone))
38 time_difference = current_time - event.ends_at
39 time_difference_minutes = (time_difference.days * 24 * 60) + \
40 (time_difference.seconds / 60)
41 if current_time > event.ends_at and time_difference_minutes < 1440:
42 for speaker in speakers:
43 send_email_after_event(speaker.user.email, event.name, upcoming_event_links)
44 send_notif_after_event(speaker.user, event.name)
45 for organizer in organizers:
46 send_email_after_event(organizer.user.email, event.name, upcoming_event_links)
47 send_notif_after_event(organizer.user, event.name)
48
49
50 def change_session_state_on_event_completion():
51 from app import current_app as app
52 with app.app_context():
53 sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\
54 .filter(Event.ends_at < datetime.datetime.now())
55 for session in sessions_to_be_changed:
56 session.state = 'rejected'
57 save_to_db(session, 'Changed {} session state to rejected'.format(session.title))
58
59
60 def send_event_fee_notification():
61 from app import current_app as app
62 with app.app_context():
63 events = Event.query.all()
64 for event in events:
65 latest_invoice = EventInvoice.query.filter_by(
66 event_id=event.id).order_by(EventInvoice.created_at.desc()).first()
67
68 if latest_invoice:
69 orders = Order.query \
70 .filter_by(event_id=event.id) \
71 .filter_by(status='completed') \
72 .filter(Order.completed_at > latest_invoice.created_at).all()
73 else:
74 orders = Order.query.filter_by(
75 event_id=event.id).filter_by(status='completed').all()
76
77 fee_total = 0
78 for order in orders:
79 for order_ticket in order.tickets:
80 ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')
81 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:
82 fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)
83 fee_total += fee
84
85 if fee_total > 0:
86 organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()
87 new_invoice = EventInvoice(
88 amount=fee_total, event_id=event.id, user_id=organizer.user.id)
89
90 if event.discount_code_id and event.discount_code:
91 r = relativedelta(datetime.utcnow(), event.created_at)
92 if r <= event.discount_code.valid_till:
93 new_invoice.amount = fee_total - \
94 (fee_total * (event.discount_code.value / 100.0))
95 new_invoice.discount_code_id = event.discount_code_id
96
97 save_to_db(new_invoice)
98 prev_month = monthdelta(new_invoice.created_at, 1).strftime(
99 "%b %Y") # Displayed as Aug 2016
100 app_name = get_settings()['app_name']
101 frontend_url = get_settings()['frontend_url']
102 link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)
103 send_email_for_monthly_fee_payment(new_invoice.user.email,
104 event.name,
105 prev_month,
106 new_invoice.amount,
107 app_name,
108 link)
109 send_notif_monthly_fee_payment(new_invoice.user,
110 event.name,
111 prev_month,
112 new_invoice.amount,
113 app_name,
114 link,
115 new_invoice.event_id)
116
117
118 def send_event_fee_notification_followup():
119 from app import current_app as app
120 with app.app_context():
121 incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()
122 for incomplete_invoice in incomplete_invoices:
123 if incomplete_invoice.amount > 0:
124 prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(
125 "%b %Y") # Displayed as Aug 2016
126 app_name = get_settings()['app_name']
127 frontend_url = get_settings()['frontend_url']
128 link = '{}/invoices/{}'.format(frontend_url,
129 incomplete_invoice.identifier)
130 send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,
131 incomplete_invoice.event.name,
132 prev_month,
133 incomplete_invoice.amount,
134 app_name,
135 link)
136 send_followup_notif_monthly_fee_payment(incomplete_invoice.user,
137 incomplete_invoice.event.name,
138 prev_month,
139 incomplete_invoice.amount,
140 app_name,
141 link,
142 incomplete_invoice.event.id)
143
144
145 def expire_pending_tickets():
146 from app import current_app as app
147 with app.app_context():
148 db.session.query(Order).filter(Order.status == 'pending',
149 (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\
150 update({'status': 'expired'})
151 db.session.commit()
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py
--- a/app/api/helpers/scheduled_jobs.py
+++ b/app/api/helpers/scheduled_jobs.py
@@ -14,6 +14,7 @@
from app.models.event import Event
from app.models.event_invoice import EventInvoice
from app.models.order import Order
+from app.models.speaker import Speaker
from app.models.session import Session
from app.models.ticket import Ticket
from app.models.ticket_fee import get_fee
@@ -33,7 +34,7 @@
upcoming_event_links += "</ul>"
for event in events:
organizers = get_user_event_roles_by_role_name(event.id, 'organizer')
- speakers = get_user_event_roles_by_role_name(event.id, 'speaker')
+ speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()
current_time = datetime.datetime.now(pytz.timezone(event.timezone))
time_difference = current_time - event.ends_at
time_difference_minutes = (time_difference.days * 24 * 60) + \
| {"golden_diff": "diff --git a/app/api/helpers/scheduled_jobs.py b/app/api/helpers/scheduled_jobs.py\n--- a/app/api/helpers/scheduled_jobs.py\n+++ b/app/api/helpers/scheduled_jobs.py\n@@ -14,6 +14,7 @@\n from app.models.event import Event\n from app.models.event_invoice import EventInvoice\n from app.models.order import Order\n+from app.models.speaker import Speaker\n from app.models.session import Session\n from app.models.ticket import Ticket\n from app.models.ticket_fee import get_fee\n@@ -33,7 +34,7 @@\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n- speakers = get_user_event_roles_by_role_name(event.id, 'speaker')\n+ speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n", "issue": "Speakers are queried by role in after event mail cron job but no such role exists\nCurrently, speakers are being queried under the assumption that there is a role called speaker, no such role exists.\n", "before_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import get_fee\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = get_user_event_roles_by_role_name(event.id, 'speaker')\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=organizer.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n", "path": "app/api/helpers/scheduled_jobs.py"}], "after_files": [{"content": "import datetime\n\nimport pytz\nfrom dateutil.relativedelta import relativedelta\n\nfrom app.api.helpers.db import safe_query, save_to_db\nfrom app.api.helpers.mail import send_email_after_event, send_email_for_monthly_fee_payment, \\\n send_followup_email_for_monthly_fee_payment\nfrom app.api.helpers.notification import send_notif_monthly_fee_payment, send_followup_notif_monthly_fee_payment, \\\n send_notif_after_event\nfrom app.api.helpers.query import get_upcoming_events, get_user_event_roles_by_role_name\nfrom app.api.helpers.utilities import monthdelta\nfrom app.models import db\nfrom app.models.event import Event\nfrom app.models.event_invoice import EventInvoice\nfrom app.models.order import Order\nfrom app.models.speaker import Speaker\nfrom app.models.session import Session\nfrom app.models.ticket import Ticket\nfrom app.models.ticket_fee import get_fee\nfrom app.settings import get_settings\n\n\ndef send_after_event_mail():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n upcoming_events = get_upcoming_events()\n upcoming_event_links = \"<ul>\"\n for upcoming_event in upcoming_events:\n frontend_url = get_settings()['frontend_url']\n upcoming_event_links += \"<li><a href='{}/events/{}'>{}</a></li>\" \\\n .format(frontend_url, upcoming_event.id, upcoming_event.name)\n upcoming_event_links += \"</ul>\"\n for event in events:\n organizers = get_user_event_roles_by_role_name(event.id, 'organizer')\n speakers = Speaker.query.filter_by(event_id=event.id, deleted_at=None).all()\n current_time = datetime.datetime.now(pytz.timezone(event.timezone))\n time_difference = current_time - event.ends_at\n time_difference_minutes = (time_difference.days * 24 * 60) + \\\n (time_difference.seconds / 60)\n if current_time > event.ends_at and time_difference_minutes < 1440:\n for speaker in speakers:\n send_email_after_event(speaker.user.email, event.name, upcoming_event_links)\n send_notif_after_event(speaker.user, event.name)\n for organizer in organizers:\n send_email_after_event(organizer.user.email, event.name, upcoming_event_links)\n send_notif_after_event(organizer.user, event.name)\n\n\ndef change_session_state_on_event_completion():\n from app import current_app as app\n with app.app_context():\n sessions_to_be_changed = Session.query.join(Event).filter(Session.state == 'pending')\\\n .filter(Event.ends_at < datetime.datetime.now())\n for session in sessions_to_be_changed:\n session.state = 'rejected'\n save_to_db(session, 'Changed {} session state to rejected'.format(session.title))\n\n\ndef send_event_fee_notification():\n from app import current_app as app\n with app.app_context():\n events = Event.query.all()\n for event in events:\n latest_invoice = EventInvoice.query.filter_by(\n event_id=event.id).order_by(EventInvoice.created_at.desc()).first()\n\n if latest_invoice:\n orders = Order.query \\\n .filter_by(event_id=event.id) \\\n .filter_by(status='completed') \\\n .filter(Order.completed_at > latest_invoice.created_at).all()\n else:\n orders = Order.query.filter_by(\n event_id=event.id).filter_by(status='completed').all()\n\n fee_total = 0\n for order in orders:\n for order_ticket in order.tickets:\n ticket = safe_query(db, Ticket, 'id', order_ticket.ticket_id, 'ticket_id')\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency) / 100.0)\n fee_total += fee\n\n if fee_total > 0:\n organizer = get_user_event_roles_by_role_name(event.id, 'organizer').first()\n new_invoice = EventInvoice(\n amount=fee_total, event_id=event.id, user_id=organizer.user.id)\n\n if event.discount_code_id and event.discount_code:\n r = relativedelta(datetime.utcnow(), event.created_at)\n if r <= event.discount_code.valid_till:\n new_invoice.amount = fee_total - \\\n (fee_total * (event.discount_code.value / 100.0))\n new_invoice.discount_code_id = event.discount_code_id\n\n save_to_db(new_invoice)\n prev_month = monthdelta(new_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url, new_invoice.identifier)\n send_email_for_monthly_fee_payment(new_invoice.user.email,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link)\n send_notif_monthly_fee_payment(new_invoice.user,\n event.name,\n prev_month,\n new_invoice.amount,\n app_name,\n link,\n new_invoice.event_id)\n\n\ndef send_event_fee_notification_followup():\n from app import current_app as app\n with app.app_context():\n incomplete_invoices = EventInvoice.query.filter(EventInvoice.status != 'completed').all()\n for incomplete_invoice in incomplete_invoices:\n if incomplete_invoice.amount > 0:\n prev_month = monthdelta(incomplete_invoice.created_at, 1).strftime(\n \"%b %Y\") # Displayed as Aug 2016\n app_name = get_settings()['app_name']\n frontend_url = get_settings()['frontend_url']\n link = '{}/invoices/{}'.format(frontend_url,\n incomplete_invoice.identifier)\n send_followup_email_for_monthly_fee_payment(incomplete_invoice.user.email,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link)\n send_followup_notif_monthly_fee_payment(incomplete_invoice.user,\n incomplete_invoice.event.name,\n prev_month,\n incomplete_invoice.amount,\n app_name,\n link,\n incomplete_invoice.event.id)\n\n\ndef expire_pending_tickets():\n from app import current_app as app\n with app.app_context():\n db.session.query(Order).filter(Order.status == 'pending',\n (Order.created_at + datetime.timedelta(minutes=30)) <= datetime.datetime.now()).\\\n update({'status': 'expired'})\n db.session.commit()\n", "path": "app/api/helpers/scheduled_jobs.py"}]} | 2,007 | 231 |
gh_patches_debug_30257 | rasdani/github-patches | git_diff | SeldonIO__MLServer-317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gRPC: Received message larger than max (11059277 vs. 4194304)
We are using mlserver (gRPC) for serving custom PyTorch ML Model and are sending images of size 640x640x3 . But because of the size we are getting the below error:
> status = StatusCode.RESOURCE_EXHAUSTED
> details = "Received message larger than max (11059277 vs. 4194304)"
> debug_error_string = "{"created":"@1630074151.799330000","description":"Error received from peer ipv4:0.0.0.0:28081","file":"src/core/lib/surface/call.cc","file_line":1070,"grpc_message":"Received message larger than max (11059277 vs. 4194304)","grpc_status":8}"
Setting the following on client side did not work:
```
maxMsgLength = 20 * 1024 * 1024
ch_options = [('grpc.max_message_length', maxMsgLength),
('grpc.max_send_message_length', maxMsgLength),
('grpc.max_receive_message_length', maxMsgLength)]
channel = grpc.insecure_channel(f'{host}:{port}', options=ch_options)
```
However setting it on server side worked:
```
def _create_server(self):
self._inference_servicer = InferenceServicer(self._data_plane)
self._model_repository_servicer = ModelRepositoryServicer(
self._model_repository_handlers
)
channel_opt = [('grpc.max_send_message_length', 20 * 1024 * 1024), ('grpc.max_receive_message_length', 20 * 1024 * 1024)]
self._server = aio.server(
ThreadPoolExecutor(max_workers=self._settings.grpc_workers), options = channel_opt
)
add_GRPCInferenceServiceServicer_to_server(
self._inference_servicer, self._server
)
```
But this is not a proper solution, since our mlserver runs in a docker and does a pip install.
This should be part of the server config in MLServer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/grpc/server.py`
Content:
```
1 from grpc import aio
2 from concurrent.futures import ThreadPoolExecutor
3
4 from ..handlers import DataPlane, ModelRepositoryHandlers
5 from ..settings import Settings
6
7 from .servicers import InferenceServicer, ModelRepositoryServicer
8 from .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server
9 from .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server
10
11
12 class GRPCServer:
13 def __init__(
14 self,
15 settings: Settings,
16 data_plane: DataPlane,
17 model_repository_handlers: ModelRepositoryHandlers,
18 ):
19 self._settings = settings
20 self._data_plane = data_plane
21 self._model_repository_handlers = model_repository_handlers
22
23 def _create_server(self):
24 self._inference_servicer = InferenceServicer(self._data_plane)
25 self._model_repository_servicer = ModelRepositoryServicer(
26 self._model_repository_handlers
27 )
28 self._server = aio.server(
29 ThreadPoolExecutor(max_workers=self._settings.grpc_workers)
30 )
31
32 add_GRPCInferenceServiceServicer_to_server(
33 self._inference_servicer, self._server
34 )
35 add_ModelRepositoryServiceServicer_to_server(
36 self._model_repository_servicer, self._server
37 )
38
39 self._server.add_insecure_port(
40 f"{self._settings.host}:{self._settings.grpc_port}"
41 )
42
43 return self._server
44
45 async def start(self):
46 self._create_server()
47
48 await self._server.start()
49 await self._server.wait_for_termination()
50
51 async def stop(self):
52 # TODO: Read from config
53 await self._server.stop(grace=5)
54
```
Path: `mlserver/settings.py`
Content:
```
1 from typing import List, Optional
2 from pydantic import BaseSettings, PyObject
3
4 from .version import __version__
5 from .types import MetadataTensor
6
7 ENV_PREFIX_SETTINGS = "MLSERVER_"
8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_"
9
10
11 class Settings(BaseSettings):
12 class Config:
13 env_prefix = ENV_PREFIX_SETTINGS
14
15 debug: bool = True
16
17 # Model repository folder
18 model_repository_root: str = "."
19 load_models_at_startup: bool = True
20
21 # Server metadata
22 server_name: str = "mlserver"
23 server_version: str = __version__
24 extensions: List[str] = []
25
26 # Server settings
27 host: str = "0.0.0.0"
28 http_port: int = 8080
29 grpc_port: int = 8081
30 grpc_workers: int = 10
31
32
33 class ModelParameters(BaseSettings):
34 """
35 Parameters that apply only to a particular instance of a model.
36 This can include things like model weights.
37 The main difference with respect to ModelSettings is that parameters can
38 change on each instance (e.g. each version) of the model.
39 """
40
41 class Config:
42 env_prefix = ENV_PREFIX_MODEL_SETTINGS
43
44 uri: Optional[str] = None
45 version: Optional[str] = None
46 format: Optional[str] = None
47 extra: Optional[dict] = {}
48
49
50 class ModelSettings(BaseSettings):
51 class Config:
52 env_prefix = ENV_PREFIX_MODEL_SETTINGS
53
54 name: str = ""
55
56 # Model metadata
57 platform: str = ""
58 versions: Optional[List[str]] = []
59 inputs: Optional[List[MetadataTensor]] = []
60 outputs: Optional[List[MetadataTensor]] = []
61
62 # Parallel settings
63 parallel_workers: Optional[int] = 4
64
65 # Custom model class implementation
66 implementation: PyObject = "mlserver.model.MLModel" # type: ignore
67
68 # Model parameters are meant to be set directly by the MLServer runtime.
69 # However, it's also possible to override them manually.
70 parameters: Optional[ModelParameters] = None
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/grpc/server.py b/mlserver/grpc/server.py
--- a/mlserver/grpc/server.py
+++ b/mlserver/grpc/server.py
@@ -1,5 +1,6 @@
from grpc import aio
from concurrent.futures import ThreadPoolExecutor
+from typing import Any, List, Tuple
from ..handlers import DataPlane, ModelRepositoryHandlers
from ..settings import Settings
@@ -25,8 +26,10 @@
self._model_repository_servicer = ModelRepositoryServicer(
self._model_repository_handlers
)
+
self._server = aio.server(
- ThreadPoolExecutor(max_workers=self._settings.grpc_workers)
+ ThreadPoolExecutor(max_workers=self._settings.grpc_workers),
+ options=self._get_options(),
)
add_GRPCInferenceServiceServicer_to_server(
@@ -42,6 +45,19 @@
return self._server
+ def _get_options(self) -> List[Tuple[str, Any]]:
+ options = []
+
+ max_message_length = self._settings.grpc_max_message_length
+ if max_message_length is not None:
+ options += [
+ ("grpc.max_message_length", max_message_length),
+ ("grpc.max_send_message_length", max_message_length),
+ ("grpc.max_receive_message_length", max_message_length),
+ ]
+
+ return options
+
async def start(self):
self._create_server()
diff --git a/mlserver/settings.py b/mlserver/settings.py
--- a/mlserver/settings.py
+++ b/mlserver/settings.py
@@ -28,6 +28,7 @@
http_port: int = 8080
grpc_port: int = 8081
grpc_workers: int = 10
+ grpc_max_message_length: Optional[int] = None
class ModelParameters(BaseSettings):
| {"golden_diff": "diff --git a/mlserver/grpc/server.py b/mlserver/grpc/server.py\n--- a/mlserver/grpc/server.py\n+++ b/mlserver/grpc/server.py\n@@ -1,5 +1,6 @@\n from grpc import aio\n from concurrent.futures import ThreadPoolExecutor\n+from typing import Any, List, Tuple\n \n from ..handlers import DataPlane, ModelRepositoryHandlers\n from ..settings import Settings\n@@ -25,8 +26,10 @@\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n+\n self._server = aio.server(\n- ThreadPoolExecutor(max_workers=self._settings.grpc_workers)\n+ ThreadPoolExecutor(max_workers=self._settings.grpc_workers),\n+ options=self._get_options(),\n )\n \n add_GRPCInferenceServiceServicer_to_server(\n@@ -42,6 +45,19 @@\n \n return self._server\n \n+ def _get_options(self) -> List[Tuple[str, Any]]:\n+ options = []\n+\n+ max_message_length = self._settings.grpc_max_message_length\n+ if max_message_length is not None:\n+ options += [\n+ (\"grpc.max_message_length\", max_message_length),\n+ (\"grpc.max_send_message_length\", max_message_length),\n+ (\"grpc.max_receive_message_length\", max_message_length),\n+ ]\n+\n+ return options\n+\n async def start(self):\n self._create_server()\n \ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -28,6 +28,7 @@\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n+ grpc_max_message_length: Optional[int] = None\n \n \n class ModelParameters(BaseSettings):\n", "issue": "gRPC: Received message larger than max (11059277 vs. 4194304)\nWe are using mlserver (gRPC) for serving custom PyTorch ML Model and are sending images of size 640x640x3 . But because of the size we are getting the below error:\r\n\r\n> status = StatusCode.RESOURCE_EXHAUSTED\r\n> \tdetails = \"Received message larger than max (11059277 vs. 4194304)\"\r\n> \tdebug_error_string = \"{\"created\":\"@1630074151.799330000\",\"description\":\"Error received from peer ipv4:0.0.0.0:28081\",\"file\":\"src/core/lib/surface/call.cc\",\"file_line\":1070,\"grpc_message\":\"Received message larger than max (11059277 vs. 4194304)\",\"grpc_status\":8}\"\r\n\r\nSetting the following on client side did not work:\r\n```\r\nmaxMsgLength = 20 * 1024 * 1024\r\nch_options = [('grpc.max_message_length', maxMsgLength),\r\n ('grpc.max_send_message_length', maxMsgLength),\r\n ('grpc.max_receive_message_length', maxMsgLength)]\r\nchannel = grpc.insecure_channel(f'{host}:{port}', options=ch_options)\r\n\r\n```\r\nHowever setting it on server side worked:\r\n```\r\ndef _create_server(self):\r\n self._inference_servicer = InferenceServicer(self._data_plane)\r\n self._model_repository_servicer = ModelRepositoryServicer(\r\n self._model_repository_handlers\r\n )\r\n channel_opt = [('grpc.max_send_message_length', 20 * 1024 * 1024), ('grpc.max_receive_message_length', 20 * 1024 * 1024)]\r\n self._server = aio.server(\r\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers), options = channel_opt\r\n )\r\n\r\n add_GRPCInferenceServiceServicer_to_server(\r\n self._inference_servicer, self._server\r\n )\r\n\r\n```\r\nBut this is not a proper solution, since our mlserver runs in a docker and does a pip install.\r\nThis should be part of the server config in MLServer.\n", "before_files": [{"content": "from grpc import aio\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\nfrom ..settings import Settings\n\nfrom .servicers import InferenceServicer, ModelRepositoryServicer\nfrom .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server\nfrom .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server\n\n\nclass GRPCServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n\n def _create_server(self):\n self._inference_servicer = InferenceServicer(self._data_plane)\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n self._server = aio.server(\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers)\n )\n\n add_GRPCInferenceServiceServicer_to_server(\n self._inference_servicer, self._server\n )\n add_ModelRepositoryServiceServicer_to_server(\n self._model_repository_servicer, self._server\n )\n\n self._server.add_insecure_port(\n f\"{self._settings.host}:{self._settings.grpc_port}\"\n )\n\n return self._server\n\n async def start(self):\n self._create_server()\n\n await self._server.start()\n await self._server.wait_for_termination()\n\n async def stop(self):\n # TODO: Read from config\n await self._server.stop(grace=5)\n", "path": "mlserver/grpc/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository folder\n model_repository_root: str = \".\"\n load_models_at_startup: bool = True\n\n # Server metadata\n server_name: str = \"mlserver\"\n server_version: str = __version__\n extensions: List[str] = []\n\n # Server settings\n host: str = \"0.0.0.0\"\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights.\n The main difference with respect to ModelSettings is that parameters can\n change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n version: Optional[str] = None\n format: Optional[str] = None\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n name: str = \"\"\n\n # Model metadata\n platform: str = \"\"\n versions: Optional[List[str]] = []\n inputs: Optional[List[MetadataTensor]] = []\n outputs: Optional[List[MetadataTensor]] = []\n\n # Parallel settings\n parallel_workers: Optional[int] = 4\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}], "after_files": [{"content": "from grpc import aio\nfrom concurrent.futures import ThreadPoolExecutor\nfrom typing import Any, List, Tuple\n\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\nfrom ..settings import Settings\n\nfrom .servicers import InferenceServicer, ModelRepositoryServicer\nfrom .dataplane_pb2_grpc import add_GRPCInferenceServiceServicer_to_server\nfrom .model_repository_pb2_grpc import add_ModelRepositoryServiceServicer_to_server\n\n\nclass GRPCServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n\n def _create_server(self):\n self._inference_servicer = InferenceServicer(self._data_plane)\n self._model_repository_servicer = ModelRepositoryServicer(\n self._model_repository_handlers\n )\n\n self._server = aio.server(\n ThreadPoolExecutor(max_workers=self._settings.grpc_workers),\n options=self._get_options(),\n )\n\n add_GRPCInferenceServiceServicer_to_server(\n self._inference_servicer, self._server\n )\n add_ModelRepositoryServiceServicer_to_server(\n self._model_repository_servicer, self._server\n )\n\n self._server.add_insecure_port(\n f\"{self._settings.host}:{self._settings.grpc_port}\"\n )\n\n return self._server\n\n def _get_options(self) -> List[Tuple[str, Any]]:\n options = []\n\n max_message_length = self._settings.grpc_max_message_length\n if max_message_length is not None:\n options += [\n (\"grpc.max_message_length\", max_message_length),\n (\"grpc.max_send_message_length\", max_message_length),\n (\"grpc.max_receive_message_length\", max_message_length),\n ]\n\n return options\n\n async def start(self):\n self._create_server()\n\n await self._server.start()\n await self._server.wait_for_termination()\n\n async def stop(self):\n # TODO: Read from config\n await self._server.stop(grace=5)\n", "path": "mlserver/grpc/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository folder\n model_repository_root: str = \".\"\n load_models_at_startup: bool = True\n\n # Server metadata\n server_name: str = \"mlserver\"\n server_version: str = __version__\n extensions: List[str] = []\n\n # Server settings\n host: str = \"0.0.0.0\"\n http_port: int = 8080\n grpc_port: int = 8081\n grpc_workers: int = 10\n grpc_max_message_length: Optional[int] = None\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights.\n The main difference with respect to ModelSettings is that parameters can\n change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n version: Optional[str] = None\n format: Optional[str] = None\n extra: Optional[dict] = {}\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n name: str = \"\"\n\n # Model metadata\n platform: str = \"\"\n versions: Optional[List[str]] = []\n inputs: Optional[List[MetadataTensor]] = []\n outputs: Optional[List[MetadataTensor]] = []\n\n # Parallel settings\n parallel_workers: Optional[int] = 4\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n", "path": "mlserver/settings.py"}]} | 1,834 | 399 |
gh_patches_debug_9670 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1500 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Internal server error @ fresh install
### Description
Get "Internal server error" with no description after a fresh install. Have I missed a step, or is this a bug?
### Steps to reproduce
...
### Webserver logs
```bash
Nothing shows up using 'docker-compose logs -f'
webserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)
webserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6
```
### Paperless-ngx version
1.8.0
### Host OS
CentOS 7.9 -fully patched with latest patches
### Installation method
Docker - official image
### Browser
Firefox
### Configuration changes
none
### Other
...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gunicorn.conf.py`
Content:
```
1 import os
2
3 bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
4 workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
5 worker_class = "paperless.workers.ConfigurableWorker"
6 timeout = 120
7
8
9 def pre_fork(server, worker):
10 pass
11
12
13 def pre_exec(server):
14 server.log.info("Forked child, re-executing.")
15
16
17 def when_ready(server):
18 server.log.info("Server is ready. Spawning workers")
19
20
21 def worker_int(worker):
22 worker.log.info("worker received INT or QUIT signal")
23
24 ## get traceback info
25 import threading, sys, traceback
26
27 id2name = {th.ident: th.name for th in threading.enumerate()}
28 code = []
29 for threadId, stack in sys._current_frames().items():
30 code.append("\n# Thread: %s(%d)" % (id2name.get(threadId, ""), threadId))
31 for filename, lineno, name, line in traceback.extract_stack(stack):
32 code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
33 if line:
34 code.append(" %s" % (line.strip()))
35 worker.log.debug("\n".join(code))
36
37
38 def worker_abort(worker):
39 worker.log.info("worker received SIGABRT signal")
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gunicorn.conf.py b/gunicorn.conf.py
--- a/gunicorn.conf.py
+++ b/gunicorn.conf.py
@@ -1,9 +1,17 @@
import os
+# See https://docs.gunicorn.org/en/stable/settings.html for
+# explanations of settings
+
bind = f'{os.getenv("PAPERLESS_BIND_ADDR", "[::]")}:{os.getenv("PAPERLESS_PORT", 8000)}'
+
workers = int(os.getenv("PAPERLESS_WEBSERVER_WORKERS", 1))
worker_class = "paperless.workers.ConfigurableWorker"
timeout = 120
+preload_app = True
+
+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod
+worker_tmp_dir = "/dev/shm"
def pre_fork(server, worker):
| {"golden_diff": "diff --git a/gunicorn.conf.py b/gunicorn.conf.py\n--- a/gunicorn.conf.py\n+++ b/gunicorn.conf.py\n@@ -1,9 +1,17 @@\n import os\n \n+# See https://docs.gunicorn.org/en/stable/settings.html for\n+# explanations of settings\n+\n bind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n+\n workers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\n worker_class = \"paperless.workers.ConfigurableWorker\"\n timeout = 120\n+preload_app = True\n+\n+# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod\n+worker_tmp_dir = \"/dev/shm\"\n \n \n def pre_fork(server, worker):\n", "issue": "[BUG] Internal server error @ fresh install\n### Description\n\nGet \"Internal server error\" with no description after a fresh install. Have I missed a step, or is this a bug?\n\n### Steps to reproduce\n\n...\n\n### Webserver logs\n\n```bash\nNothing shows up using 'docker-compose logs -f'\r\n\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [CRITICAL] WORKER TIMEOUT (pid:47)\r\nwebserver_1 | [2022-08-30 23:21:18 +0000] [44] [WARNING] Worker with pid 47 was terminated due to signal 6\n```\n\n\n### Paperless-ngx version\n\n1.8.0\n\n### Host OS\n\nCentOS 7.9 -fully patched with latest patches\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nFirefox\n\n### Configuration changes\n\nnone\n\n### Other\n\n...\n", "before_files": [{"content": "import os\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}], "after_files": [{"content": "import os\n\n# See https://docs.gunicorn.org/en/stable/settings.html for\n# explanations of settings\n\nbind = f'{os.getenv(\"PAPERLESS_BIND_ADDR\", \"[::]\")}:{os.getenv(\"PAPERLESS_PORT\", 8000)}'\n\nworkers = int(os.getenv(\"PAPERLESS_WEBSERVER_WORKERS\", 1))\nworker_class = \"paperless.workers.ConfigurableWorker\"\ntimeout = 120\npreload_app = True\n\n# https://docs.gunicorn.org/en/stable/faq.html#blocking-os-fchmod\nworker_tmp_dir = \"/dev/shm\"\n\n\ndef pre_fork(server, worker):\n pass\n\n\ndef pre_exec(server):\n server.log.info(\"Forked child, re-executing.\")\n\n\ndef when_ready(server):\n server.log.info(\"Server is ready. Spawning workers\")\n\n\ndef worker_int(worker):\n worker.log.info(\"worker received INT or QUIT signal\")\n\n ## get traceback info\n import threading, sys, traceback\n\n id2name = {th.ident: th.name for th in threading.enumerate()}\n code = []\n for threadId, stack in sys._current_frames().items():\n code.append(\"\\n# Thread: %s(%d)\" % (id2name.get(threadId, \"\"), threadId))\n for filename, lineno, name, line in traceback.extract_stack(stack):\n code.append('File: \"%s\", line %d, in %s' % (filename, lineno, name))\n if line:\n code.append(\" %s\" % (line.strip()))\n worker.log.debug(\"\\n\".join(code))\n\n\ndef worker_abort(worker):\n worker.log.info(\"worker received SIGABRT signal\")\n", "path": "gunicorn.conf.py"}]} | 864 | 178 |
gh_patches_debug_39779 | rasdani/github-patches | git_diff | sunpy__sunpy-4477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Normalise images before handing them to skimage.transform.warp
From @ayshih at https://github.com/sunpy/sunpy/pull/4452#issuecomment-686504683:
> #2041 removed the normalization code and cast everything to float64. However, I believe the removal of the normalization code should not have been done. Scikit-image's documentation is quite clear that it always assumes that floats are in the range of [0, 1]. While warp() does appear to work fine when that assumption is violated, we are currently relying on undefined behavior.
> I suggest that the normalization code be re-inserted.
I believe the work here is to normalise floating point input to `skimage.transform.warp` to the range [0, 1] in `sunpy.image.transform.affine_transform`, and then un-normalise it after the transform.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/image/transform.py`
Content:
```
1 """
2 Functions for geometrical image transformation and warping.
3 """
4 import numbers
5 import warnings
6
7 import numpy as np
8 import scipy.ndimage.interpolation
9
10 from sunpy.util.exceptions import SunpyUserWarning
11
12 __all__ = ['affine_transform']
13
14
15 def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
16 recenter=False, missing=0.0, use_scipy=False):
17 """
18 Rotates, shifts and scales an image.
19
20 Will use `skimage.transform.warp` unless scikit-image can't be imported
21 then it will use`scipy.ndimage.affine_transform`.
22
23 Parameters
24 ----------
25 image : `numpy.ndarray`
26 2D image to be rotated.
27 rmatrix : `numpy.ndarray` that is 2x2
28 Linear transformation rotation matrix.
29 order : `int` 0-5, optional
30 Interpolation order to be used, defaults to 3. When using scikit-image this parameter
31 is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
32 When using scipy it is passed into
33 `scipy.ndimage.affine_transform` where it controls the order of the spline.
34 scale : `float`
35 A scale factor for the image with the default being no scaling.
36 image_center : tuple, optional
37 The point in the image to rotate around (axis of rotation).
38 Defaults to the center of the array.
39 recenter : `bool` or array-like, optional
40 Move the axis of rotation to the center of the array or recenter coords.
41 Defaults to `True` i.e., recenter to the center of the array.
42 missing : `float`, optional
43 The value to replace any missing data after the transformation.
44 use_scipy : `bool`, optional
45 Force use of `scipy.ndimage.affine_transform`.
46 Will set all "NaNs" in image to zero before doing the transform.
47 Defaults to `False`, unless scikit-image can't be imported.
48
49 Returns
50 -------
51 `numpy.ndarray`:
52 New rotated, scaled and translated image.
53
54 Notes
55 -----
56 This algorithm uses an affine transformation as opposed to a polynomial
57 geometrical transformation, which by default is `skimage.transform.warp`.
58 One can specify using `scipy.ndimage.affine_transform` as
59 an alternative affine transformation. The two transformations use different
60 algorithms and thus do not give identical output.
61
62 When using for `skimage.transform.warp` with order >= 4 or using
63 `scipy.ndimage.affine_transform` at all, "NaN" values will
64 replaced with zero prior to rotation. No attempt is made to retain the NaN
65 values.
66
67 Input arrays with integer data are cast to float 64 and can be re-cast using
68 `numpy.ndarray.astype` if desired.
69
70 Although this function is analogous to the IDL's ``rot`` function, it does not
71 use the same algorithm as the IDL ``rot`` function.
72 IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
73 method to calculate the inverse mapping of original to target pixel
74 coordinates. This is a polynomial geometrical transformation.
75 Then optionally it uses a bicubic convolution interpolation
76 algorithm to map the original to target pixel values.
77 """
78 rmatrix = rmatrix / scale
79 array_center = (np.array(image.shape)[::-1] - 1) / 2.0
80
81 # Make sure the image center is an array and is where it's supposed to be
82 if image_center is not None:
83 image_center = np.asanyarray(image_center)
84 else:
85 image_center = array_center
86
87 # Determine center of rotation based on use (or not) of the recenter keyword
88 if recenter:
89 rot_center = array_center
90 else:
91 rot_center = image_center
92
93 displacement = np.dot(rmatrix, rot_center)
94 shift = image_center - displacement
95 if not use_scipy:
96 try:
97 import skimage.transform
98 except ImportError:
99 warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
100 ImportWarning)
101 use_scipy = True
102 if use_scipy:
103 if np.any(np.isnan(image)):
104 warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
105 # Transform the image using the scipy affine transform
106 rotated_image = scipy.ndimage.interpolation.affine_transform(
107 np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
108 mode='constant', cval=missing).T
109 else:
110 # Make the rotation matrix 3x3 to include translation of the image
111 skmatrix = np.zeros((3, 3))
112 skmatrix[:2, :2] = rmatrix
113 skmatrix[2, 2] = 1.0
114 skmatrix[:2, 2] = shift
115 tform = skimage.transform.AffineTransform(skmatrix)
116
117 if issubclass(image.dtype.type, numbers.Integral):
118 warnings.warn("Integer input data has been cast to float64, "
119 "which is required for the skikit-image transform.",
120 SunpyUserWarning)
121 adjusted_image = image.astype(np.float64)
122 else:
123 adjusted_image = image.copy()
124 if np.any(np.isnan(adjusted_image)) and order >= 4:
125 warnings.warn("Setting NaNs to 0 for higher-order scikit-image rotation.",
126 SunpyUserWarning)
127 adjusted_image = np.nan_to_num(adjusted_image)
128
129 rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,
130 mode='constant', cval=missing,
131 preserve_range=True)
132
133 return rotated_image
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py
--- a/sunpy/image/transform.py
+++ b/sunpy/image/transform.py
@@ -59,14 +59,16 @@
an alternative affine transformation. The two transformations use different
algorithms and thus do not give identical output.
- When using for `skimage.transform.warp` with order >= 4 or using
- `scipy.ndimage.affine_transform` at all, "NaN" values will
- replaced with zero prior to rotation. No attempt is made to retain the NaN
- values.
+ When using `skimage.transform.warp` with order >= 4 or using
+ `scipy.ndimage.affine_transform` at all, "NaN" values will be replaced with
+ zero prior to rotation. No attempt is made to retain the "NaN" values.
Input arrays with integer data are cast to float 64 and can be re-cast using
`numpy.ndarray.astype` if desired.
+ In the case of `skimage.transform.warp`, the image is normalized to [0, 1]
+ before passing it to the function. It is later rescaled back to the original range.
+
Although this function is analogous to the IDL's ``rot`` function, it does not
use the same algorithm as the IDL ``rot`` function.
IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
@@ -115,8 +117,7 @@
tform = skimage.transform.AffineTransform(skmatrix)
if issubclass(image.dtype.type, numbers.Integral):
- warnings.warn("Integer input data has been cast to float64, "
- "which is required for the skikit-image transform.",
+ warnings.warn("Integer input data has been cast to float64.",
SunpyUserWarning)
adjusted_image = image.astype(np.float64)
else:
@@ -126,8 +127,28 @@
SunpyUserWarning)
adjusted_image = np.nan_to_num(adjusted_image)
+ # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)
+ is_nan_image = np.all(np.isnan(adjusted_image))
+ if is_nan_image:
+ adjusted_missing = missing
+ else:
+ im_min = np.nanmin(adjusted_image)
+ adjusted_image -= im_min
+ im_max = np.nanmax(adjusted_image)
+ if im_max > 0:
+ adjusted_image /= im_max
+ adjusted_missing = (missing - im_min) / im_max
+ else:
+ # The input array is all one value (aside from NaNs), so no scaling is needed
+ adjusted_missing = missing - im_min
+
rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,
- mode='constant', cval=missing,
- preserve_range=True)
+ mode='constant', cval=adjusted_missing)
+
+ # Convert the image back to its original range if it is valid
+ if not is_nan_image:
+ if im_max > 0:
+ rotated_image *= im_max
+ rotated_image += im_min
return rotated_image
| {"golden_diff": "diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py\n--- a/sunpy/image/transform.py\n+++ b/sunpy/image/transform.py\n@@ -59,14 +59,16 @@\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n \n- When using for `skimage.transform.warp` with order >= 4 or using\n- `scipy.ndimage.affine_transform` at all, \"NaN\" values will\n- replaced with zero prior to rotation. No attempt is made to retain the NaN\n- values.\n+ When using `skimage.transform.warp` with order >= 4 or using\n+ `scipy.ndimage.affine_transform` at all, \"NaN\" values will be replaced with\n+ zero prior to rotation. No attempt is made to retain the \"NaN\" values.\n \n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n \n+ In the case of `skimage.transform.warp`, the image is normalized to [0, 1]\n+ before passing it to the function. It is later rescaled back to the original range.\n+\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n@@ -115,8 +117,7 @@\n tform = skimage.transform.AffineTransform(skmatrix)\n \n if issubclass(image.dtype.type, numbers.Integral):\n- warnings.warn(\"Integer input data has been cast to float64, \"\n- \"which is required for the skikit-image transform.\",\n+ warnings.warn(\"Integer input data has been cast to float64.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n@@ -126,8 +127,28 @@\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n \n+ # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)\n+ is_nan_image = np.all(np.isnan(adjusted_image))\n+ if is_nan_image:\n+ adjusted_missing = missing\n+ else:\n+ im_min = np.nanmin(adjusted_image)\n+ adjusted_image -= im_min\n+ im_max = np.nanmax(adjusted_image)\n+ if im_max > 0:\n+ adjusted_image /= im_max\n+ adjusted_missing = (missing - im_min) / im_max\n+ else:\n+ # The input array is all one value (aside from NaNs), so no scaling is needed\n+ adjusted_missing = missing - im_min\n+\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n- mode='constant', cval=missing,\n- preserve_range=True)\n+ mode='constant', cval=adjusted_missing)\n+\n+ # Convert the image back to its original range if it is valid\n+ if not is_nan_image:\n+ if im_max > 0:\n+ rotated_image *= im_max\n+ rotated_image += im_min\n \n return rotated_image\n", "issue": "Normalise images before handing them to skimage.transform.warp\nFrom @ayshih at https://github.com/sunpy/sunpy/pull/4452#issuecomment-686504683:\r\n\r\n> #2041 removed the normalization code and cast everything to float64. However, I believe the removal of the normalization code should not have been done. Scikit-image's documentation is quite clear that it always assumes that floats are in the range of [0, 1]. While warp() does appear to work fine when that assumption is violated, we are currently relying on undefined behavior.\r\n\r\n> I suggest that the normalization code be re-inserted.\r\n\r\nI believe the work here is to normalise floating point input to `skimage.transform.warp` to the range [0, 1] in `sunpy.image.transform.affine_transform`, and then un-normalise it after the transform.\n", "before_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using for `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.affine_transform` at all, \"NaN\" values will\n replaced with zero prior to rotation. No attempt is made to retain the NaN\n values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n if not use_scipy:\n try:\n import skimage.transform\n except ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n use_scipy = True\n if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n if issubclass(image.dtype.type, numbers.Integral):\n warnings.warn(\"Integer input data has been cast to float64, \"\n \"which is required for the skikit-image transform.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\",\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=missing,\n preserve_range=True)\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}], "after_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport numbers\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.affine_transform` at all, \"NaN\" values will be replaced with\n zero prior to rotation. No attempt is made to retain the \"NaN\" values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n In the case of `skimage.transform.warp`, the image is normalized to [0, 1]\n before passing it to the function. It is later rescaled back to the original range.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n if not use_scipy:\n try:\n import skimage.transform\n except ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n use_scipy = True\n if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n if issubclass(image.dtype.type, numbers.Integral):\n warnings.warn(\"Integer input data has been cast to float64.\",\n SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\",\n SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n # Scale image to range [0, 1] if it is valid (not made up entirely of NaNs)\n is_nan_image = np.all(np.isnan(adjusted_image))\n if is_nan_image:\n adjusted_missing = missing\n else:\n im_min = np.nanmin(adjusted_image)\n adjusted_image -= im_min\n im_max = np.nanmax(adjusted_image)\n if im_max > 0:\n adjusted_image /= im_max\n adjusted_missing = (missing - im_min) / im_max\n else:\n # The input array is all one value (aside from NaNs), so no scaling is needed\n adjusted_missing = missing - im_min\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=adjusted_missing)\n\n # Convert the image back to its original range if it is valid\n if not is_nan_image:\n if im_max > 0:\n rotated_image *= im_max\n rotated_image += im_min\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}]} | 2,022 | 749 |
gh_patches_debug_31683 | rasdani/github-patches | git_diff | ivy-llc__ivy-19089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bernoulli
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/backends/paddle/experimental/random.py`
Content:
```
1 # global
2 from typing import Optional, Union, Sequence
3 import paddle
4
5 from ivy import with_unsupported_device_and_dtypes
6 from ivy.functional.backends.paddle import backend_version
7 from ivy.utils.exceptions import IvyNotImplementedException
8
9 # local
10 import ivy
11 from paddle.device import core
12
13 # dirichlet
14
15
16 @with_unsupported_device_and_dtypes(
17 {
18 "2.5.1 and below": {
19 "cpu": (
20 "int8",
21 "int16",
22 "uint8",
23 "float16",
24 "complex64",
25 "complex128",
26 "bool",
27 )
28 }
29 },
30 backend_version,
31 )
32 def dirichlet(
33 alpha: Union[paddle.Tensor, float, Sequence[float]],
34 /,
35 *,
36 size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
37 out: Optional[paddle.Tensor] = None,
38 seed: Optional[int] = None,
39 dtype: Optional[paddle.dtype] = None,
40 ) -> paddle.Tensor:
41 size = size if size is not None else len(alpha)
42 dtype = dtype if dtype is not None else paddle.float64
43 if seed is not None:
44 paddle.seed(seed)
45 res = paddle.to_tensor(
46 paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),
47 dtype=dtype,
48 )
49 return res
50
51
52 def beta(
53 alpha: Union[float, paddle.Tensor],
54 beta: Union[float, paddle.Tensor],
55 /,
56 *,
57 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
58 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
59 device: core.Place = None,
60 seed: Optional[int] = None,
61 out: Optional[paddle.Tensor] = None,
62 ) -> paddle.Tensor:
63 raise IvyNotImplementedException()
64
65
66 def gamma(
67 alpha: Union[float, paddle.Tensor],
68 beta: Union[float, paddle.Tensor],
69 /,
70 *,
71 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
72 dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,
73 device: core.Place = None,
74 seed: Optional[int] = None,
75 out: Optional[paddle.Tensor] = None,
76 ) -> paddle.Tensor:
77 raise IvyNotImplementedException()
78
79
80 def poisson(
81 lam: Union[float, paddle.Tensor],
82 *,
83 shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,
84 device: core.Place,
85 dtype: paddle.dtype,
86 seed: Optional[int] = None,
87 fill_value: Optional[Union[float, int]] = 0,
88 out: Optional[paddle.Tensor] = None,
89 ):
90 raise IvyNotImplementedException()
91
92
93 def bernoulli(
94 probs: Union[float, paddle.Tensor],
95 *,
96 logits: Union[float, paddle.Tensor] = None,
97 shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,
98 device: core.Place,
99 dtype: paddle.dtype,
100 seed: Optional[int] = None,
101 out: Optional[paddle.Tensor] = None,
102 ) -> paddle.Tensor:
103 raise IvyNotImplementedException()
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py
--- a/ivy/functional/backends/paddle/experimental/random.py
+++ b/ivy/functional/backends/paddle/experimental/random.py
@@ -1,7 +1,7 @@
# global
from typing import Optional, Union, Sequence
import paddle
-
+from ivy.functional.backends.paddle.device import to_device
from ivy import with_unsupported_device_and_dtypes
from ivy.functional.backends.paddle import backend_version
from ivy.utils.exceptions import IvyNotImplementedException
@@ -9,6 +9,7 @@
# local
import ivy
from paddle.device import core
+from ivy import with_supported_device_and_dtypes
# dirichlet
@@ -90,6 +91,23 @@
raise IvyNotImplementedException()
+# bernoulli
+@with_supported_device_and_dtypes(
+ {
+ "2.5.0 and above": {
+ "cpu": ("float32", "float64"),
+ "gpu": ("bfloat16", "float16", "float32", "float64"),
+ },
+ "2.4.2 and below": {
+ "cpu": (
+ "float32",
+ "float64",
+ ),
+ "gpu": ("float16", "float32", "float64"),
+ },
+ },
+ backend_version,
+)
def bernoulli(
probs: Union[float, paddle.Tensor],
*,
@@ -100,4 +118,14 @@
seed: Optional[int] = None,
out: Optional[paddle.Tensor] = None,
) -> paddle.Tensor:
- raise IvyNotImplementedException()
+ if seed is not None:
+ paddle.seed(seed)
+ if probs is not None:
+ probs = probs
+ elif logits is not None:
+ probs = ivy.softmax(logits)
+ probs = paddle.cast(probs, dtype)
+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs
+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))
+ sample = paddle.bernoulli(probs)
+ return to_device(sample, device)
| {"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/random.py b/ivy/functional/backends/paddle/experimental/random.py\n--- a/ivy/functional/backends/paddle/experimental/random.py\n+++ b/ivy/functional/backends/paddle/experimental/random.py\n@@ -1,7 +1,7 @@\n # global\n from typing import Optional, Union, Sequence\n import paddle\n-\n+from ivy.functional.backends.paddle.device import to_device\n from ivy import with_unsupported_device_and_dtypes\n from ivy.functional.backends.paddle import backend_version\n from ivy.utils.exceptions import IvyNotImplementedException\n@@ -9,6 +9,7 @@\n # local\n import ivy\n from paddle.device import core\n+from ivy import with_supported_device_and_dtypes\n \n # dirichlet\n \n@@ -90,6 +91,23 @@\n raise IvyNotImplementedException()\n \n \n+# bernoulli\n+@with_supported_device_and_dtypes(\n+ {\n+ \"2.5.0 and above\": {\n+ \"cpu\": (\"float32\", \"float64\"),\n+ \"gpu\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n+ },\n+ \"2.4.2 and below\": {\n+ \"cpu\": (\n+ \"float32\",\n+ \"float64\",\n+ ),\n+ \"gpu\": (\"float16\", \"float32\", \"float64\"),\n+ },\n+ },\n+ backend_version,\n+)\n def bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n@@ -100,4 +118,14 @@\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n ) -> paddle.Tensor:\n- raise IvyNotImplementedException()\n+ if seed is not None:\n+ paddle.seed(seed)\n+ if probs is not None:\n+ probs = probs\n+ elif logits is not None:\n+ probs = ivy.softmax(logits)\n+ probs = paddle.cast(probs, dtype)\n+ probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs\n+ probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))\n+ sample = paddle.bernoulli(probs)\n+ return to_device(sample, device)\n", "issue": "bernoulli\n\n", "before_files": [{"content": "# global\nfrom typing import Optional, Union, Sequence\nimport paddle\n\nfrom ivy import with_unsupported_device_and_dtypes\nfrom ivy.functional.backends.paddle import backend_version\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n# local\nimport ivy\nfrom paddle.device import core\n\n# dirichlet\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef dirichlet(\n alpha: Union[paddle.Tensor, float, Sequence[float]],\n /,\n *,\n size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n out: Optional[paddle.Tensor] = None,\n seed: Optional[int] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n size = size if size is not None else len(alpha)\n dtype = dtype if dtype is not None else paddle.float64\n if seed is not None:\n paddle.seed(seed)\n res = paddle.to_tensor(\n paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),\n dtype=dtype,\n )\n return res\n\n\ndef beta(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef gamma(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef poisson(\n lam: Union[float, paddle.Tensor],\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n fill_value: Optional[Union[float, int]] = 0,\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\ndef bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n logits: Union[float, paddle.Tensor] = None,\n shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/random.py"}], "after_files": [{"content": "# global\nfrom typing import Optional, Union, Sequence\nimport paddle\nfrom ivy.functional.backends.paddle.device import to_device\nfrom ivy import with_unsupported_device_and_dtypes\nfrom ivy.functional.backends.paddle import backend_version\nfrom ivy.utils.exceptions import IvyNotImplementedException\n\n# local\nimport ivy\nfrom paddle.device import core\nfrom ivy import with_supported_device_and_dtypes\n\n# dirichlet\n\n\n@with_unsupported_device_and_dtypes(\n {\n \"2.5.1 and below\": {\n \"cpu\": (\n \"int8\",\n \"int16\",\n \"uint8\",\n \"float16\",\n \"complex64\",\n \"complex128\",\n \"bool\",\n )\n }\n },\n backend_version,\n)\ndef dirichlet(\n alpha: Union[paddle.Tensor, float, Sequence[float]],\n /,\n *,\n size: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n out: Optional[paddle.Tensor] = None,\n seed: Optional[int] = None,\n dtype: Optional[paddle.dtype] = None,\n) -> paddle.Tensor:\n size = size if size is not None else len(alpha)\n dtype = dtype if dtype is not None else paddle.float64\n if seed is not None:\n paddle.seed(seed)\n res = paddle.to_tensor(\n paddle.distribution.Dirichlet(concentration=alpha).sample(shape=size),\n dtype=dtype,\n )\n return res\n\n\ndef beta(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef gamma(\n alpha: Union[float, paddle.Tensor],\n beta: Union[float, paddle.Tensor],\n /,\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n dtype: Optional[Union[paddle.dtype, ivy.Dtype]] = None,\n device: core.Place = None,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n raise IvyNotImplementedException()\n\n\ndef poisson(\n lam: Union[float, paddle.Tensor],\n *,\n shape: Optional[Union[ivy.NativeShape, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n fill_value: Optional[Union[float, int]] = 0,\n out: Optional[paddle.Tensor] = None,\n):\n raise IvyNotImplementedException()\n\n\n# bernoulli\n@with_supported_device_and_dtypes(\n {\n \"2.5.0 and above\": {\n \"cpu\": (\"float32\", \"float64\"),\n \"gpu\": (\"bfloat16\", \"float16\", \"float32\", \"float64\"),\n },\n \"2.4.2 and below\": {\n \"cpu\": (\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\", \"float32\", \"float64\"),\n },\n },\n backend_version,\n)\ndef bernoulli(\n probs: Union[float, paddle.Tensor],\n *,\n logits: Union[float, paddle.Tensor] = None,\n shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None,\n device: core.Place,\n dtype: paddle.dtype,\n seed: Optional[int] = None,\n out: Optional[paddle.Tensor] = None,\n) -> paddle.Tensor:\n if seed is not None:\n paddle.seed(seed)\n if probs is not None:\n probs = probs\n elif logits is not None:\n probs = ivy.softmax(logits)\n probs = paddle.cast(probs, dtype)\n probs = paddle.unsqueeze(probs, 0) if len(probs.shape) == 0 else probs\n probs = paddle.maximum(probs, paddle.full_like(probs, 1e-6))\n sample = paddle.bernoulli(probs)\n return to_device(sample, device)\n", "path": "ivy/functional/backends/paddle/experimental/random.py"}]} | 1,163 | 521 |
gh_patches_debug_27388 | rasdani/github-patches | git_diff | python-poetry__poetry-2787 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RecursionError when installing mkdocs-material
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Windows 10 Pro 1909 18363.719
- **Poetry version**: 1.0.5
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68
## Issue
Hi,
when running: `poetry install -E docs_material` I get the following error:
```
[RecursionError]
maximum recursion depth exceeded
```
There are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?
Thank you for poetry and your effort :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/utils/extras.py`
Content:
```
1 from typing import Iterator
2 from typing import List
3 from typing import Mapping
4 from typing import Sequence
5
6 from poetry.core.packages import Package
7 from poetry.utils.helpers import canonicalize_name
8
9
10 def get_extra_package_names(
11 packages, # type: Sequence[Package]
12 extras, # type: Mapping[str, List[str]]
13 extra_names, # type: Sequence[str]
14 ): # type: (...) -> Iterator[str]
15 """
16 Returns all package names required by the given extras.
17
18 :param packages: A collection of packages, such as from Repository.packages
19 :param extras: A mapping of `extras` names to lists of package names, as defined
20 in the `extras` section of `poetry.lock`.
21 :param extra_names: A list of strings specifying names of extra groups to resolve.
22 """
23 if not extra_names:
24 return []
25
26 # lookup for packages by name, faster than looping over packages repeatedly
27 packages_by_name = {package.name: package for package in packages}
28
29 # get and flatten names of packages we've opted into as extras
30 extra_package_names = [
31 canonicalize_name(extra_package_name)
32 for extra_name in extra_names
33 for extra_package_name in extras.get(extra_name, ())
34 ]
35
36 def _extra_packages(package_names):
37 """Recursively find dependencies for packages names"""
38 # for each extra pacakge name
39 for package_name in package_names:
40 # Find the actual Package object. A missing key indicates an implicit
41 # dependency (like setuptools), which should be ignored
42 package = packages_by_name.get(canonicalize_name(package_name))
43 if package:
44 yield package.name
45 # Recurse for dependencies
46 for dependency_package_name in _extra_packages(
47 dependency.name for dependency in package.requires
48 ):
49 yield dependency_package_name
50
51 return _extra_packages(extra_package_names)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py
--- a/poetry/utils/extras.py
+++ b/poetry/utils/extras.py
@@ -33,6 +33,9 @@
for extra_package_name in extras.get(extra_name, ())
]
+ # keep record of packages seen during recursion in order to avoid recursion error
+ seen_package_names = set()
+
def _extra_packages(package_names):
"""Recursively find dependencies for packages names"""
# for each extra pacakge name
@@ -41,11 +44,16 @@
# dependency (like setuptools), which should be ignored
package = packages_by_name.get(canonicalize_name(package_name))
if package:
- yield package.name
+ if package.name not in seen_package_names:
+ seen_package_names.add(package.name)
+ yield package.name
# Recurse for dependencies
for dependency_package_name in _extra_packages(
- dependency.name for dependency in package.requires
+ dependency.name
+ for dependency in package.requires
+ if dependency.name not in seen_package_names
):
+ seen_package_names.add(dependency_package_name)
yield dependency_package_name
return _extra_packages(extra_package_names)
| {"golden_diff": "diff --git a/poetry/utils/extras.py b/poetry/utils/extras.py\n--- a/poetry/utils/extras.py\n+++ b/poetry/utils/extras.py\n@@ -33,6 +33,9 @@\n for extra_package_name in extras.get(extra_name, ())\n ]\n \n+ # keep record of packages seen during recursion in order to avoid recursion error\n+ seen_package_names = set()\n+\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n@@ -41,11 +44,16 @@\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n- yield package.name\n+ if package.name not in seen_package_names:\n+ seen_package_names.add(package.name)\n+ yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n- dependency.name for dependency in package.requires\n+ dependency.name\n+ for dependency in package.requires\n+ if dependency.name not in seen_package_names\n ):\n+ seen_package_names.add(dependency_package_name)\n yield dependency_package_name\n \n return _extra_packages(extra_package_names)\n", "issue": "RecursionError when installing mkdocs-material\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Windows 10 Pro 1909 18363.719\r\n- **Poetry version**: 1.0.5\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: https://gist.github.com/ndevln/f27b5b64251241d40e1b960ec96b8b68\r\n\r\n## Issue\r\nHi,\r\n\r\nwhen running: `poetry install -E docs_material` I get the following error:\r\n\r\n```\r\n[RecursionError]\r\nmaximum recursion depth exceeded\r\n```\r\n\r\nThere are quite a few similar issues (like #2329), but it seems that this problem is package specific?!?\r\n\r\nThank you for poetry and your effort :)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Iterator\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Sequence\n\nfrom poetry.core.packages import Package\nfrom poetry.utils.helpers import canonicalize_name\n\n\ndef get_extra_package_names(\n packages, # type: Sequence[Package]\n extras, # type: Mapping[str, List[str]]\n extra_names, # type: Sequence[str]\n): # type: (...) -> Iterator[str]\n \"\"\"\n Returns all package names required by the given extras.\n\n :param packages: A collection of packages, such as from Repository.packages\n :param extras: A mapping of `extras` names to lists of package names, as defined\n in the `extras` section of `poetry.lock`.\n :param extra_names: A list of strings specifying names of extra groups to resolve.\n \"\"\"\n if not extra_names:\n return []\n\n # lookup for packages by name, faster than looping over packages repeatedly\n packages_by_name = {package.name: package for package in packages}\n\n # get and flatten names of packages we've opted into as extras\n extra_package_names = [\n canonicalize_name(extra_package_name)\n for extra_name in extra_names\n for extra_package_name in extras.get(extra_name, ())\n ]\n\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n for package_name in package_names:\n # Find the actual Package object. A missing key indicates an implicit\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n dependency.name for dependency in package.requires\n ):\n yield dependency_package_name\n\n return _extra_packages(extra_package_names)\n", "path": "poetry/utils/extras.py"}], "after_files": [{"content": "from typing import Iterator\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Sequence\n\nfrom poetry.core.packages import Package\nfrom poetry.utils.helpers import canonicalize_name\n\n\ndef get_extra_package_names(\n packages, # type: Sequence[Package]\n extras, # type: Mapping[str, List[str]]\n extra_names, # type: Sequence[str]\n): # type: (...) -> Iterator[str]\n \"\"\"\n Returns all package names required by the given extras.\n\n :param packages: A collection of packages, such as from Repository.packages\n :param extras: A mapping of `extras` names to lists of package names, as defined\n in the `extras` section of `poetry.lock`.\n :param extra_names: A list of strings specifying names of extra groups to resolve.\n \"\"\"\n if not extra_names:\n return []\n\n # lookup for packages by name, faster than looping over packages repeatedly\n packages_by_name = {package.name: package for package in packages}\n\n # get and flatten names of packages we've opted into as extras\n extra_package_names = [\n canonicalize_name(extra_package_name)\n for extra_name in extra_names\n for extra_package_name in extras.get(extra_name, ())\n ]\n\n # keep record of packages seen during recursion in order to avoid recursion error\n seen_package_names = set()\n\n def _extra_packages(package_names):\n \"\"\"Recursively find dependencies for packages names\"\"\"\n # for each extra pacakge name\n for package_name in package_names:\n # Find the actual Package object. A missing key indicates an implicit\n # dependency (like setuptools), which should be ignored\n package = packages_by_name.get(canonicalize_name(package_name))\n if package:\n if package.name not in seen_package_names:\n seen_package_names.add(package.name)\n yield package.name\n # Recurse for dependencies\n for dependency_package_name in _extra_packages(\n dependency.name\n for dependency in package.requires\n if dependency.name not in seen_package_names\n ):\n seen_package_names.add(dependency_package_name)\n yield dependency_package_name\n\n return _extra_packages(extra_package_names)\n", "path": "poetry/utils/extras.py"}]} | 1,049 | 276 |
gh_patches_debug_230 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-2545 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Releasing 1.0
With #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.
This should consist of:
- [x] assembling changelog #2440
- [x] making sure new features are well documented
- [x] publishing beta release
- [x] test beta (perhaps by adding it to the z2jh chart)
- [ ] release 1.0 final
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/_version.py`
Content:
```
1 """JupyterHub version info"""
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4
5 version_info = (
6 1,
7 0,
8 0,
9 "b2", # release (b1, rc1, or "" for final or dev)
10 # "dev", # dev or nothing
11 )
12
13 # pep 440 version: no dot before beta/rc, but before .dev
14 # 0.1.0rc1
15 # 0.1.0a1
16 # 0.1.0b1.dev
17 # 0.1.0.dev
18
19 __version__ = ".".join(map(str, version_info[:3])) + ".".join(version_info[3:])
20
21
22 def _check_version(hub_version, singleuser_version, log):
23 """Compare Hub and single-user server versions"""
24 if not hub_version:
25 log.warning(
26 "Hub has no version header, which means it is likely < 0.8. Expected %s",
27 __version__,
28 )
29 return
30
31 if not singleuser_version:
32 log.warning(
33 "Single-user server has no version header, which means it is likely < 0.8. Expected %s",
34 __version__,
35 )
36 return
37
38 # compare minor X.Y versions
39 if hub_version != singleuser_version:
40 from distutils.version import LooseVersion as V
41
42 hub_major_minor = V(hub_version).version[:2]
43 singleuser_major_minor = V(singleuser_version).version[:2]
44 extra = ""
45 if singleuser_major_minor == hub_major_minor:
46 # patch-level mismatch or lower, log difference at debug-level
47 # because this should be fine
48 log_method = log.debug
49 else:
50 # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.
51 log_method = log.warning
52 extra = " This could cause failure to authenticate and result in redirect loops!"
53 log_method(
54 "jupyterhub version %s != jupyterhub-singleuser version %s." + extra,
55 hub_version,
56 singleuser_version,
57 )
58 else:
59 log.debug(
60 "jupyterhub and jupyterhub-singleuser both on version %s" % hub_version
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py
--- a/jupyterhub/_version.py
+++ b/jupyterhub/_version.py
@@ -6,7 +6,7 @@
1,
0,
0,
- "b2", # release (b1, rc1, or "" for final or dev)
+ # "b2", # release (b1, rc1, or "" for final or dev)
# "dev", # dev or nothing
)
| {"golden_diff": "diff --git a/jupyterhub/_version.py b/jupyterhub/_version.py\n--- a/jupyterhub/_version.py\n+++ b/jupyterhub/_version.py\n@@ -6,7 +6,7 @@\n 1,\n 0,\n 0,\n- \"b2\", # release (b1, rc1, or \"\" for final or dev)\n+ # \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n )\n", "issue": "Releasing 1.0\nWith #2435 rounding out the final thing I think we need for the next release, I think it's time to put together the 1.0 release.\r\n\r\nThis should consist of:\r\n\r\n- [x] assembling changelog #2440\r\n- [x] making sure new features are well documented\r\n- [x] publishing beta release\r\n- [x] test beta (perhaps by adding it to the z2jh chart)\r\n- [ ] release 1.0 final\n", "before_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 0,\n 0,\n \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}], "after_files": [{"content": "\"\"\"JupyterHub version info\"\"\"\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nversion_info = (\n 1,\n 0,\n 0,\n # \"b2\", # release (b1, rc1, or \"\" for final or dev)\n # \"dev\", # dev or nothing\n)\n\n# pep 440 version: no dot before beta/rc, but before .dev\n# 0.1.0rc1\n# 0.1.0a1\n# 0.1.0b1.dev\n# 0.1.0.dev\n\n__version__ = \".\".join(map(str, version_info[:3])) + \".\".join(version_info[3:])\n\n\ndef _check_version(hub_version, singleuser_version, log):\n \"\"\"Compare Hub and single-user server versions\"\"\"\n if not hub_version:\n log.warning(\n \"Hub has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n if not singleuser_version:\n log.warning(\n \"Single-user server has no version header, which means it is likely < 0.8. Expected %s\",\n __version__,\n )\n return\n\n # compare minor X.Y versions\n if hub_version != singleuser_version:\n from distutils.version import LooseVersion as V\n\n hub_major_minor = V(hub_version).version[:2]\n singleuser_major_minor = V(singleuser_version).version[:2]\n extra = \"\"\n if singleuser_major_minor == hub_major_minor:\n # patch-level mismatch or lower, log difference at debug-level\n # because this should be fine\n log_method = log.debug\n else:\n # log warning-level for more significant mismatch, such as 0.8 vs 0.9, etc.\n log_method = log.warning\n extra = \" This could cause failure to authenticate and result in redirect loops!\"\n log_method(\n \"jupyterhub version %s != jupyterhub-singleuser version %s.\" + extra,\n hub_version,\n singleuser_version,\n )\n else:\n log.debug(\n \"jupyterhub and jupyterhub-singleuser both on version %s\" % hub_version\n )\n", "path": "jupyterhub/_version.py"}]} | 980 | 117 |
gh_patches_debug_5303 | rasdani/github-patches | git_diff | googleapis__python-bigquery-498 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Closing a connection tries to close already closed cursors
When closing a connection the library will call `.close()` on every cursor created for that connection, including closed ones. While the code works, it produces a lot of logs of error level, polluting the logs.
#### Environment details
- OS type and version: Mac OS 10.15.7
- Python version: Python 3.8.2
- pip version: pip 20.3.3
- `google-cloud-bigquery` version: 2.7.0
#### Steps to reproduce
1. Close a cursor
2. Close the connection
3. Error log shows `Exception closing connection <google.cloud.bigquery.dbapi.connection.Connection object at 0x...>`
#### Code example
```python
from contextlib import closing
# using pybigquery
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(sql)
```
#### Stack trace
```
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/pool/base.py", line 270, in _close_connection
self._dialect.do_close(connection)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 549, in do_close
dbapi_connection.close()
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py", line 258, in with_closed_check
return method(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/connection.py", line 79, in close
cursor_.close()
File "/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py", line 257, in with_closed_check
raise exc_class(exc_msg)
google.cloud.bigquery.dbapi.exceptions.ProgrammingError: Operating on a closed cursor.
```
#### Suggested fix
```python
# google/cloud/bigquery/dbapi/connection.py
class Connection(object):
...
def close(self):
...
for cursor_ in self._cursors_created:
if not cursor_._closed:
cursor_.close()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/dbapi/connection.py`
Content:
```
1 # Copyright 2017 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Connection for the Google BigQuery DB-API."""
16
17 import weakref
18
19 from google.cloud import bigquery
20 from google.cloud.bigquery.dbapi import cursor
21 from google.cloud.bigquery.dbapi import _helpers
22
23
24 @_helpers.raise_on_closed("Operating on a closed connection.")
25 class Connection(object):
26 """DB-API Connection to Google BigQuery.
27
28 Args:
29 client (Optional[google.cloud.bigquery.Client]):
30 A REST API client used to connect to BigQuery. If not passed, a
31 client is created using default options inferred from the environment.
32 bqstorage_client(\
33 Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \
34 ):
35 A client that uses the faster BigQuery Storage API to fetch rows from
36 BigQuery. If not passed, it is created using the same credentials
37 as ``client`` (provided that BigQuery Storage dependencies are installed).
38
39 If both clients are available, ``bqstorage_client`` is used for
40 fetching query results.
41 """
42
43 def __init__(self, client=None, bqstorage_client=None):
44 if client is None:
45 client = bigquery.Client()
46 self._owns_client = True
47 else:
48 self._owns_client = False
49
50 if bqstorage_client is None:
51 # A warning is already raised by the factory if instantiation fails.
52 bqstorage_client = client._create_bqstorage_client()
53 self._owns_bqstorage_client = bqstorage_client is not None
54 else:
55 self._owns_bqstorage_client = False
56
57 self._client = client
58 self._bqstorage_client = bqstorage_client
59
60 self._closed = False
61 self._cursors_created = weakref.WeakSet()
62
63 def close(self):
64 """Close the connection and any cursors created from it.
65
66 Any BigQuery clients explicitly passed to the constructor are *not*
67 closed, only those created by the connection instance itself.
68 """
69 self._closed = True
70
71 if self._owns_client:
72 self._client.close()
73
74 if self._owns_bqstorage_client:
75 # There is no close() on the BQ Storage client itself.
76 self._bqstorage_client._transport.grpc_channel.close()
77
78 for cursor_ in self._cursors_created:
79 cursor_.close()
80
81 def commit(self):
82 """No-op, but for consistency raise an error if connection is closed."""
83
84 def cursor(self):
85 """Return a new cursor object.
86
87 Returns:
88 google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.
89 """
90 new_cursor = cursor.Cursor(self)
91 self._cursors_created.add(new_cursor)
92 return new_cursor
93
94
95 def connect(client=None, bqstorage_client=None):
96 """Construct a DB-API connection to Google BigQuery.
97
98 Args:
99 client (Optional[google.cloud.bigquery.Client]):
100 A REST API client used to connect to BigQuery. If not passed, a
101 client is created using default options inferred from the environment.
102 bqstorage_client(\
103 Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \
104 ):
105 A client that uses the faster BigQuery Storage API to fetch rows from
106 BigQuery. If not passed, it is created using the same credentials
107 as ``client`` (provided that BigQuery Storage dependencies are installed).
108
109 If both clients are available, ``bqstorage_client`` is used for
110 fetching query results.
111
112 Returns:
113 google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.
114 """
115 return Connection(client, bqstorage_client)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/cloud/bigquery/dbapi/connection.py b/google/cloud/bigquery/dbapi/connection.py
--- a/google/cloud/bigquery/dbapi/connection.py
+++ b/google/cloud/bigquery/dbapi/connection.py
@@ -76,7 +76,8 @@
self._bqstorage_client._transport.grpc_channel.close()
for cursor_ in self._cursors_created:
- cursor_.close()
+ if not cursor_._closed:
+ cursor_.close()
def commit(self):
"""No-op, but for consistency raise an error if connection is closed."""
| {"golden_diff": "diff --git a/google/cloud/bigquery/dbapi/connection.py b/google/cloud/bigquery/dbapi/connection.py\n--- a/google/cloud/bigquery/dbapi/connection.py\n+++ b/google/cloud/bigquery/dbapi/connection.py\n@@ -76,7 +76,8 @@\n self._bqstorage_client._transport.grpc_channel.close()\n \n for cursor_ in self._cursors_created:\n- cursor_.close()\n+ if not cursor_._closed:\n+ cursor_.close()\n \n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n", "issue": "Closing a connection tries to close already closed cursors\nWhen closing a connection the library will call `.close()` on every cursor created for that connection, including closed ones. While the code works, it produces a lot of logs of error level, polluting the logs.\r\n\r\n#### Environment details\r\n\r\n - OS type and version: Mac OS 10.15.7\r\n - Python version: Python 3.8.2\r\n - pip version: pip 20.3.3\r\n - `google-cloud-bigquery` version: 2.7.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Close a cursor\r\n 2. Close the connection\r\n 3. Error log shows `Exception closing connection <google.cloud.bigquery.dbapi.connection.Connection object at 0x...>`\r\n\r\n#### Code example\r\n\r\n```python\r\nfrom contextlib import closing\r\n\r\n# using pybigquery\r\nwith closing(engine.raw_connection()) as conn:\r\n with closing(conn.cursor()) as cursor:\r\n cursor.execute(sql)\r\n```\r\n\r\n#### Stack trace\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/sqlalchemy/pool/base.py\", line 270, in _close_connection\r\n self._dialect.do_close(connection)\r\n File \"/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py\", line 549, in do_close\r\n dbapi_connection.close()\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py\", line 258, in with_closed_check\r\n return method(self, *args, **kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/connection.py\", line 79, in close\r\n cursor_.close()\r\n File \"/usr/local/lib/python3.7/site-packages/google/cloud/bigquery/dbapi/_helpers.py\", line 257, in with_closed_check\r\n raise exc_class(exc_msg)\r\ngoogle.cloud.bigquery.dbapi.exceptions.ProgrammingError: Operating on a closed cursor.\r\n```\r\n\r\n#### Suggested fix\r\n\r\n```python\r\n# google/cloud/bigquery/dbapi/connection.py\r\nclass Connection(object):\r\n ...\r\n def close(self):\r\n ...\r\n for cursor_ in self._cursors_created:\r\n if not cursor_._closed:\r\n cursor_.close()\r\n```\n", "before_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Connection for the Google BigQuery DB-API.\"\"\"\n\nimport weakref\n\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery.dbapi import cursor\nfrom google.cloud.bigquery.dbapi import _helpers\n\n\n@_helpers.raise_on_closed(\"Operating on a closed connection.\")\nclass Connection(object):\n \"\"\"DB-API Connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n \"\"\"\n\n def __init__(self, client=None, bqstorage_client=None):\n if client is None:\n client = bigquery.Client()\n self._owns_client = True\n else:\n self._owns_client = False\n\n if bqstorage_client is None:\n # A warning is already raised by the factory if instantiation fails.\n bqstorage_client = client._create_bqstorage_client()\n self._owns_bqstorage_client = bqstorage_client is not None\n else:\n self._owns_bqstorage_client = False\n\n self._client = client\n self._bqstorage_client = bqstorage_client\n\n self._closed = False\n self._cursors_created = weakref.WeakSet()\n\n def close(self):\n \"\"\"Close the connection and any cursors created from it.\n\n Any BigQuery clients explicitly passed to the constructor are *not*\n closed, only those created by the connection instance itself.\n \"\"\"\n self._closed = True\n\n if self._owns_client:\n self._client.close()\n\n if self._owns_bqstorage_client:\n # There is no close() on the BQ Storage client itself.\n self._bqstorage_client._transport.grpc_channel.close()\n\n for cursor_ in self._cursors_created:\n cursor_.close()\n\n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n\n def cursor(self):\n \"\"\"Return a new cursor object.\n\n Returns:\n google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.\n \"\"\"\n new_cursor = cursor.Cursor(self)\n self._cursors_created.add(new_cursor)\n return new_cursor\n\n\ndef connect(client=None, bqstorage_client=None):\n \"\"\"Construct a DB-API connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n\n Returns:\n google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.\n \"\"\"\n return Connection(client, bqstorage_client)\n", "path": "google/cloud/bigquery/dbapi/connection.py"}], "after_files": [{"content": "# Copyright 2017 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Connection for the Google BigQuery DB-API.\"\"\"\n\nimport weakref\n\nfrom google.cloud import bigquery\nfrom google.cloud.bigquery.dbapi import cursor\nfrom google.cloud.bigquery.dbapi import _helpers\n\n\n@_helpers.raise_on_closed(\"Operating on a closed connection.\")\nclass Connection(object):\n \"\"\"DB-API Connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n \"\"\"\n\n def __init__(self, client=None, bqstorage_client=None):\n if client is None:\n client = bigquery.Client()\n self._owns_client = True\n else:\n self._owns_client = False\n\n if bqstorage_client is None:\n # A warning is already raised by the factory if instantiation fails.\n bqstorage_client = client._create_bqstorage_client()\n self._owns_bqstorage_client = bqstorage_client is not None\n else:\n self._owns_bqstorage_client = False\n\n self._client = client\n self._bqstorage_client = bqstorage_client\n\n self._closed = False\n self._cursors_created = weakref.WeakSet()\n\n def close(self):\n \"\"\"Close the connection and any cursors created from it.\n\n Any BigQuery clients explicitly passed to the constructor are *not*\n closed, only those created by the connection instance itself.\n \"\"\"\n self._closed = True\n\n if self._owns_client:\n self._client.close()\n\n if self._owns_bqstorage_client:\n # There is no close() on the BQ Storage client itself.\n self._bqstorage_client._transport.grpc_channel.close()\n\n for cursor_ in self._cursors_created:\n if not cursor_._closed:\n cursor_.close()\n\n def commit(self):\n \"\"\"No-op, but for consistency raise an error if connection is closed.\"\"\"\n\n def cursor(self):\n \"\"\"Return a new cursor object.\n\n Returns:\n google.cloud.bigquery.dbapi.Cursor: A DB-API cursor that uses this connection.\n \"\"\"\n new_cursor = cursor.Cursor(self)\n self._cursors_created.add(new_cursor)\n return new_cursor\n\n\ndef connect(client=None, bqstorage_client=None):\n \"\"\"Construct a DB-API connection to Google BigQuery.\n\n Args:\n client (Optional[google.cloud.bigquery.Client]):\n A REST API client used to connect to BigQuery. If not passed, a\n client is created using default options inferred from the environment.\n bqstorage_client(\\\n Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient] \\\n ):\n A client that uses the faster BigQuery Storage API to fetch rows from\n BigQuery. If not passed, it is created using the same credentials\n as ``client`` (provided that BigQuery Storage dependencies are installed).\n\n If both clients are available, ``bqstorage_client`` is used for\n fetching query results.\n\n Returns:\n google.cloud.bigquery.dbapi.Connection: A new DB-API connection to BigQuery.\n \"\"\"\n return Connection(client, bqstorage_client)\n", "path": "google/cloud/bigquery/dbapi/connection.py"}]} | 1,904 | 124 |
gh_patches_debug_24087 | rasdani/github-patches | git_diff | web2py__web2py-1960 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't initiate simplejsonrpc service object
When attempting to instatiate a simplejsonrpc service using Python3, the following happens:
```
>>> from gluon.contrib.simplejsonrpc import ServerProxy
>>> URL = "http://localhost:8000/app/webservice/call/jsonrpc"
>>> service = ServerProxy(URL, verbose=False)
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "/opt/web2py/gluon/contrib/simplejsonrpc.py", line 92, in __init__
type, uri = urllib.splittype(uri)
AttributeError: module 'urllib' has no attribute 'splittype'
```
As far as I could see, the main problem is, among other little issues, that the function splittype has been moved to urllib.request, therefore it is not found.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gluon/contrib/simplejsonrpc.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This program is free software; you can redistribute it and/or modify
3 # it under the terms of the GNU Lesser General Public License as published by the
4 # Free Software Foundation; either version 3, or (at your option) any later
5 # version.
6 #
7 # This program is distributed in the hope that it will be useful, but
8 # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
9 # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
10 # for more details.
11
12 "Pythonic simple JSON RPC Client implementation"
13 from __future__ import print_function
14
15 __author__ = "Mariano Reingart ([email protected])"
16 __copyright__ = "Copyright (C) 2011 Mariano Reingart"
17 __license__ = "LGPL 3.0"
18 __version__ = "0.05"
19
20 import sys
21 PY2 = sys.version_info[0] == 2
22
23 import urllib
24 if PY2:
25 from xmlrpclib import Transport, SafeTransport
26 from cStringIO import StringIO
27 else:
28 from xmlrpc.client import Transport, SafeTransport
29 from io import StringIO
30 import random
31 import json
32
33
34 class JSONRPCError(RuntimeError):
35 "Error object for remote procedure call fail"
36 def __init__(self, code, message, data=''):
37 value = "%s: %s\n%s" % (code, message, '\n'.join(data))
38 RuntimeError.__init__(self, value)
39 self.code = code
40 self.message = message
41 self.data = data
42
43
44 class JSONDummyParser:
45 "json wrapper for xmlrpclib parser interfase"
46 def __init__(self):
47 self.buf = StringIO()
48
49 def feed(self, data):
50 self.buf.write(data)
51
52 def close(self):
53 return self.buf.getvalue()
54
55
56 class JSONTransportMixin:
57 "json wrapper for xmlrpclib transport interfase"
58
59 def send_content(self, connection, request_body):
60 connection.putheader("Content-Type", "application/json")
61 connection.putheader("Content-Length", str(len(request_body)))
62 connection.endheaders()
63 if request_body:
64 connection.send(request_body)
65 # todo: add gzip compression
66
67 def getparser(self):
68 # get parser and unmarshaller
69 parser = JSONDummyParser()
70 return parser, parser
71
72
73 class JSONTransport(JSONTransportMixin, Transport):
74 pass
75
76
77 class JSONSafeTransport(JSONTransportMixin, SafeTransport):
78 pass
79
80
81 class ServerProxy(object):
82 "JSON RPC Simple Client Service Proxy"
83
84 def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):
85 self.location = uri # server location (url)
86 self.trace = verbose # show debug messages
87 self.exceptions = True # raise errors? (JSONRPCError)
88 self.timeout = None
89 self.json_request = self.json_response = ''
90 self.version = version # '2.0' for jsonrpc2
91
92 type, uri = urllib.splittype(uri)
93 if type not in ("http", "https"):
94 raise IOError("unsupported JSON-RPC protocol")
95 self.__host, self.__handler = urllib.splithost(uri)
96
97 if transport is None:
98 if type == "https":
99 transport = JSONSafeTransport()
100 else:
101 transport = JSONTransport()
102 self.__transport = transport
103 self.__encoding = encoding
104 self.__verbose = verbose
105
106 def __getattr__(self, attr):
107 "pseudo method that can be called"
108 return lambda *args, **vars: self.call(attr, *args, **vars)
109
110 def call(self, method, *args, **vars):
111 "JSON RPC communication (method invocation)"
112
113 # build data sent to the service
114 request_id = random.randint(0, sys.maxsize)
115 data = {'id': request_id, 'method': method, 'params': args or vars, }
116 if self.version:
117 data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600
118 request = json.dumps(data)
119
120 # make HTTP request (retry if connection is lost)
121 response = self.__transport.request(
122 self.__host,
123 self.__handler,
124 request,
125 verbose=self.__verbose
126 )
127
128 # store plain request and response for further debugging
129 self.json_request = request
130 self.json_response = response
131
132 # parse json data coming from service
133 # {'version': '1.1', 'id': id, 'result': result, 'error': None}
134 response = json.loads(response)
135
136 self.error = response.get('error', {})
137 if self.error and self.exceptions:
138 raise JSONRPCError(self.error.get('code', 0),
139 self.error.get('message', ''),
140 self.error.get('data', None))
141 if response['id'] != request_id:
142 raise JSONRPCError(0, "JSON Request ID != Response ID")
143
144 return response.get('result')
145
146
147 ServiceProxy = ServerProxy
148
149
150 if __name__ == "__main__":
151 # basic tests:
152 location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc"
153 client = ServerProxy(location, verbose='--verbose' in sys.argv,)
154 print(client.add(1, 2))
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py
--- a/gluon/contrib/simplejsonrpc.py
+++ b/gluon/contrib/simplejsonrpc.py
@@ -20,11 +20,12 @@
import sys
PY2 = sys.version_info[0] == 2
-import urllib
if PY2:
+ import urllib
from xmlrpclib import Transport, SafeTransport
from cStringIO import StringIO
else:
+ import urllib.request as urllib
from xmlrpc.client import Transport, SafeTransport
from io import StringIO
import random
@@ -47,7 +48,7 @@
self.buf = StringIO()
def feed(self, data):
- self.buf.write(data)
+ self.buf.write(data.decode('utf-8'))
def close(self):
return self.buf.getvalue()
@@ -61,7 +62,7 @@
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
- connection.send(request_body)
+ connection.send(str.encode(request_body))
# todo: add gzip compression
def getparser(self):
| {"golden_diff": "diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py\n--- a/gluon/contrib/simplejsonrpc.py\n+++ b/gluon/contrib/simplejsonrpc.py\n@@ -20,11 +20,12 @@\n import sys\n PY2 = sys.version_info[0] == 2\n \n-import urllib\n if PY2:\n+ import urllib\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\n else:\n+ import urllib.request as urllib\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\n import random\n@@ -47,7 +48,7 @@\n self.buf = StringIO()\n \n def feed(self, data):\n- self.buf.write(data)\n+ self.buf.write(data.decode('utf-8'))\n \n def close(self):\n return self.buf.getvalue()\n@@ -61,7 +62,7 @@\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n- connection.send(request_body)\n+ connection.send(str.encode(request_body))\n # todo: add gzip compression\n \n def getparser(self):\n", "issue": "Can't initiate simplejsonrpc service object\nWhen attempting to instatiate a simplejsonrpc service using Python3, the following happens:\r\n```\r\n>>> from gluon.contrib.simplejsonrpc import ServerProxy\r\n>>> URL = \"http://localhost:8000/app/webservice/call/jsonrpc\"\r\n>>> service = ServerProxy(URL, verbose=False)\r\nTraceback (most recent call last):\r\n File \"<console>\", line 1, in <module>\r\n File \"/opt/web2py/gluon/contrib/simplejsonrpc.py\", line 92, in __init__\r\n type, uri = urllib.splittype(uri)\r\nAttributeError: module 'urllib' has no attribute 'splittype'\r\n```\r\nAs far as I could see, the main problem is, among other little issues, that the function splittype has been moved to urllib.request, therefore it is not found.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation; either version 3, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n\n\"Pythonic simple JSON RPC Client implementation\"\nfrom __future__ import print_function\n\n__author__ = \"Mariano Reingart ([email protected])\"\n__copyright__ = \"Copyright (C) 2011 Mariano Reingart\"\n__license__ = \"LGPL 3.0\"\n__version__ = \"0.05\"\n\nimport sys\nPY2 = sys.version_info[0] == 2\n\nimport urllib\nif PY2:\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\nelse:\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\nimport random\nimport json\n\n\nclass JSONRPCError(RuntimeError):\n \"Error object for remote procedure call fail\"\n def __init__(self, code, message, data=''):\n value = \"%s: %s\\n%s\" % (code, message, '\\n'.join(data))\n RuntimeError.__init__(self, value)\n self.code = code\n self.message = message\n self.data = data\n\n\nclass JSONDummyParser:\n \"json wrapper for xmlrpclib parser interfase\"\n def __init__(self):\n self.buf = StringIO()\n\n def feed(self, data):\n self.buf.write(data)\n\n def close(self):\n return self.buf.getvalue()\n\n\nclass JSONTransportMixin:\n \"json wrapper for xmlrpclib transport interfase\"\n\n def send_content(self, connection, request_body):\n connection.putheader(\"Content-Type\", \"application/json\")\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n connection.send(request_body)\n # todo: add gzip compression\n\n def getparser(self):\n # get parser and unmarshaller\n parser = JSONDummyParser()\n return parser, parser\n\n\nclass JSONTransport(JSONTransportMixin, Transport):\n pass\n\n\nclass JSONSafeTransport(JSONTransportMixin, SafeTransport):\n pass\n\n\nclass ServerProxy(object):\n \"JSON RPC Simple Client Service Proxy\"\n\n def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):\n self.location = uri # server location (url)\n self.trace = verbose # show debug messages\n self.exceptions = True # raise errors? (JSONRPCError)\n self.timeout = None\n self.json_request = self.json_response = ''\n self.version = version # '2.0' for jsonrpc2\n\n type, uri = urllib.splittype(uri)\n if type not in (\"http\", \"https\"):\n raise IOError(\"unsupported JSON-RPC protocol\")\n self.__host, self.__handler = urllib.splithost(uri)\n\n if transport is None:\n if type == \"https\":\n transport = JSONSafeTransport()\n else:\n transport = JSONTransport()\n self.__transport = transport\n self.__encoding = encoding\n self.__verbose = verbose\n\n def __getattr__(self, attr):\n \"pseudo method that can be called\"\n return lambda *args, **vars: self.call(attr, *args, **vars)\n\n def call(self, method, *args, **vars):\n \"JSON RPC communication (method invocation)\"\n\n # build data sent to the service\n request_id = random.randint(0, sys.maxsize)\n data = {'id': request_id, 'method': method, 'params': args or vars, }\n if self.version:\n data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600\n request = json.dumps(data)\n\n # make HTTP request (retry if connection is lost)\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n\n # store plain request and response for further debugging\n self.json_request = request\n self.json_response = response\n\n # parse json data coming from service\n # {'version': '1.1', 'id': id, 'result': result, 'error': None}\n response = json.loads(response)\n\n self.error = response.get('error', {})\n if self.error and self.exceptions:\n raise JSONRPCError(self.error.get('code', 0),\n self.error.get('message', ''),\n self.error.get('data', None))\n if response['id'] != request_id:\n raise JSONRPCError(0, \"JSON Request ID != Response ID\")\n\n return response.get('result')\n\n\nServiceProxy = ServerProxy\n\n\nif __name__ == \"__main__\":\n # basic tests:\n location = \"http://www.web2py.com.ar/webservices/sample/call/jsonrpc\"\n client = ServerProxy(location, verbose='--verbose' in sys.argv,)\n print(client.add(1, 2))\n", "path": "gluon/contrib/simplejsonrpc.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by the\n# Free Software Foundation; either version 3, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY\n# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License\n# for more details.\n\n\"Pythonic simple JSON RPC Client implementation\"\nfrom __future__ import print_function\n\n__author__ = \"Mariano Reingart ([email protected])\"\n__copyright__ = \"Copyright (C) 2011 Mariano Reingart\"\n__license__ = \"LGPL 3.0\"\n__version__ = \"0.05\"\n\nimport sys\nPY2 = sys.version_info[0] == 2\n\nif PY2:\n import urllib\n from xmlrpclib import Transport, SafeTransport\n from cStringIO import StringIO\nelse:\n import urllib.request as urllib\n from xmlrpc.client import Transport, SafeTransport\n from io import StringIO\nimport random\nimport json\n\n\nclass JSONRPCError(RuntimeError):\n \"Error object for remote procedure call fail\"\n def __init__(self, code, message, data=''):\n value = \"%s: %s\\n%s\" % (code, message, '\\n'.join(data))\n RuntimeError.__init__(self, value)\n self.code = code\n self.message = message\n self.data = data\n\n\nclass JSONDummyParser:\n \"json wrapper for xmlrpclib parser interfase\"\n def __init__(self):\n self.buf = StringIO()\n\n def feed(self, data):\n self.buf.write(data.decode('utf-8'))\n\n def close(self):\n return self.buf.getvalue()\n\n\nclass JSONTransportMixin:\n \"json wrapper for xmlrpclib transport interfase\"\n\n def send_content(self, connection, request_body):\n connection.putheader(\"Content-Type\", \"application/json\")\n connection.putheader(\"Content-Length\", str(len(request_body)))\n connection.endheaders()\n if request_body:\n connection.send(str.encode(request_body))\n # todo: add gzip compression\n\n def getparser(self):\n # get parser and unmarshaller\n parser = JSONDummyParser()\n return parser, parser\n\n\nclass JSONTransport(JSONTransportMixin, Transport):\n pass\n\n\nclass JSONSafeTransport(JSONTransportMixin, SafeTransport):\n pass\n\n\nclass ServerProxy(object):\n \"JSON RPC Simple Client Service Proxy\"\n\n def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):\n self.location = uri # server location (url)\n self.trace = verbose # show debug messages\n self.exceptions = True # raise errors? (JSONRPCError)\n self.timeout = None\n self.json_request = self.json_response = ''\n self.version = version # '2.0' for jsonrpc2\n\n type, uri = urllib.splittype(uri)\n if type not in (\"http\", \"https\"):\n raise IOError(\"unsupported JSON-RPC protocol\")\n self.__host, self.__handler = urllib.splithost(uri)\n\n if transport is None:\n if type == \"https\":\n transport = JSONSafeTransport()\n else:\n transport = JSONTransport()\n self.__transport = transport\n self.__encoding = encoding\n self.__verbose = verbose\n\n def __getattr__(self, attr):\n \"pseudo method that can be called\"\n return lambda *args, **vars: self.call(attr, *args, **vars)\n\n def call(self, method, *args, **vars):\n \"JSON RPC communication (method invocation)\"\n\n # build data sent to the service\n request_id = random.randint(0, sys.maxsize)\n data = {'id': request_id, 'method': method, 'params': args or vars, }\n if self.version:\n data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600\n request = json.dumps(data)\n\n # make HTTP request (retry if connection is lost)\n response = self.__transport.request(\n self.__host,\n self.__handler,\n request,\n verbose=self.__verbose\n )\n\n # store plain request and response for further debugging\n self.json_request = request\n self.json_response = response\n\n # parse json data coming from service\n # {'version': '1.1', 'id': id, 'result': result, 'error': None}\n response = json.loads(response)\n\n self.error = response.get('error', {})\n if self.error and self.exceptions:\n raise JSONRPCError(self.error.get('code', 0),\n self.error.get('message', ''),\n self.error.get('data', None))\n if response['id'] != request_id:\n raise JSONRPCError(0, \"JSON Request ID != Response ID\")\n\n return response.get('result')\n\n\nServiceProxy = ServerProxy\n\n\nif __name__ == \"__main__\":\n # basic tests:\n location = \"http://www.web2py.com.ar/webservices/sample/call/jsonrpc\"\n client = ServerProxy(location, verbose='--verbose' in sys.argv,)\n print(client.add(1, 2))\n", "path": "gluon/contrib/simplejsonrpc.py"}]} | 1,963 | 262 |
gh_patches_debug_57772 | rasdani/github-patches | git_diff | celery__kombu-400 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Kombu app conflicts with Django app with Django 1.7
``` python
Traceback (most recent call last):
File "manage.py", line 13, in <module>
execute_from_command_line(sys.argv)
File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 385, in execute_from_command_line
utility.execute()
File "/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py", line 354, in execute
django.setup()
File "/.../.env/lib/python2.7/site-packages/django/__init__.py", line 21, in setup
apps.populate(settings.INSTALLED_APPS)
File "/.../.env/lib/python2.7/site-packages/django/apps/registry.py", line 89, in populate
"duplicates: %s" % app_config.label)
django.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django
```
More info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kombu/transport/django/__init__.py`
Content:
```
1 """Kombu transport using the Django database as a message store."""
2 from __future__ import absolute_import
3
4 from django.conf import settings
5 from django.core import exceptions as errors
6
7 from kombu.five import Empty
8 from kombu.transport import virtual
9 from kombu.utils.encoding import bytes_to_str
10 from kombu.utils.json import loads, dumps
11
12
13 from .models import Queue
14
15 try:
16 from django.apps import AppConfig
17 except ImportError: # pragma: no cover
18 pass
19 else:
20 class KombuAppConfig(AppConfig):
21 name = __name__
22 label = name.replace('.', '_')
23 verbose_name = 'Message queue'
24 default_app_config = 'kombu.transport.django.KombuAppConfig'
25
26 VERSION = (1, 0, 0)
27 __version__ = '.'.join(map(str, VERSION))
28
29 POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',
30 getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))
31
32
33 class Channel(virtual.Channel):
34
35 def _new_queue(self, queue, **kwargs):
36 Queue.objects.get_or_create(name=queue)
37
38 def _put(self, queue, message, **kwargs):
39 Queue.objects.publish(queue, dumps(message))
40
41 def basic_consume(self, queue, *args, **kwargs):
42 qinfo = self.state.bindings[queue]
43 exchange = qinfo[0]
44 if self.typeof(exchange).type == 'fanout':
45 return
46 super(Channel, self).basic_consume(queue, *args, **kwargs)
47
48 def _get(self, queue):
49 m = Queue.objects.fetch(queue)
50 if m:
51 return loads(bytes_to_str(m))
52 raise Empty()
53
54 def _size(self, queue):
55 return Queue.objects.size(queue)
56
57 def _purge(self, queue):
58 return Queue.objects.purge(queue)
59
60 def refresh_connection(self):
61 from django import db
62 db.close_connection()
63
64
65 class Transport(virtual.Transport):
66 Channel = Channel
67
68 default_port = 0
69 polling_interval = POLLING_INTERVAL
70 channel_errors = (
71 virtual.Transport.channel_errors + (
72 errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)
73 )
74 driver_type = 'sql'
75 driver_name = 'django'
76
77 def driver_version(self):
78 import django
79 return '.'.join(map(str, django.VERSION))
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py
--- a/kombu/transport/django/__init__.py
+++ b/kombu/transport/django/__init__.py
@@ -18,7 +18,7 @@
pass
else:
class KombuAppConfig(AppConfig):
- name = __name__
+ name = 'kombu.transport.django'
label = name.replace('.', '_')
verbose_name = 'Message queue'
default_app_config = 'kombu.transport.django.KombuAppConfig'
| {"golden_diff": "diff --git a/kombu/transport/django/__init__.py b/kombu/transport/django/__init__.py\n--- a/kombu/transport/django/__init__.py\n+++ b/kombu/transport/django/__init__.py\n@@ -18,7 +18,7 @@\n pass\n else:\n class KombuAppConfig(AppConfig):\n- name = __name__\n+ name = 'kombu.transport.django'\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n", "issue": "Django Kombu app conflicts with Django app with Django 1.7\n``` python\nTraceback (most recent call last):\n File \"manage.py\", line 13, in <module>\n execute_from_command_line(sys.argv)\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 385, in execute_from_command_line\n utility.execute()\n File \"/.../.env/lib/python2.7/site-packages/django/core/management/__init__.py\", line 354, in execute\n django.setup()\n File \"/.../.env/lib/python2.7/site-packages/django/__init__.py\", line 21, in setup\n apps.populate(settings.INSTALLED_APPS)\n File \"/.../.env/lib/python2.7/site-packages/django/apps/registry.py\", line 89, in populate\n \"duplicates: %s\" % app_config.label)\ndjango.core.exceptions.ImproperlyConfigured: Application labels aren't unique, duplicates: django\n```\n\nMore info [here](https://docs.djangoproject.com/en/1.7/releases/1.7/#app-loading-refactor)\n\n", "before_files": [{"content": "\"\"\"Kombu transport using the Django database as a message store.\"\"\"\nfrom __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core import exceptions as errors\n\nfrom kombu.five import Empty\nfrom kombu.transport import virtual\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\n\nfrom .models import Queue\n\ntry:\n from django.apps import AppConfig\nexcept ImportError: # pragma: no cover\n pass\nelse:\n class KombuAppConfig(AppConfig):\n name = __name__\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n\nVERSION = (1, 0, 0)\n__version__ = '.'.join(map(str, VERSION))\n\nPOLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',\n getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))\n\n\nclass Channel(virtual.Channel):\n\n def _new_queue(self, queue, **kwargs):\n Queue.objects.get_or_create(name=queue)\n\n def _put(self, queue, message, **kwargs):\n Queue.objects.publish(queue, dumps(message))\n\n def basic_consume(self, queue, *args, **kwargs):\n qinfo = self.state.bindings[queue]\n exchange = qinfo[0]\n if self.typeof(exchange).type == 'fanout':\n return\n super(Channel, self).basic_consume(queue, *args, **kwargs)\n\n def _get(self, queue):\n m = Queue.objects.fetch(queue)\n if m:\n return loads(bytes_to_str(m))\n raise Empty()\n\n def _size(self, queue):\n return Queue.objects.size(queue)\n\n def _purge(self, queue):\n return Queue.objects.purge(queue)\n\n def refresh_connection(self):\n from django import db\n db.close_connection()\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n default_port = 0\n polling_interval = POLLING_INTERVAL\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)\n )\n driver_type = 'sql'\n driver_name = 'django'\n\n def driver_version(self):\n import django\n return '.'.join(map(str, django.VERSION))\n", "path": "kombu/transport/django/__init__.py"}], "after_files": [{"content": "\"\"\"Kombu transport using the Django database as a message store.\"\"\"\nfrom __future__ import absolute_import\n\nfrom django.conf import settings\nfrom django.core import exceptions as errors\n\nfrom kombu.five import Empty\nfrom kombu.transport import virtual\nfrom kombu.utils.encoding import bytes_to_str\nfrom kombu.utils.json import loads, dumps\n\n\nfrom .models import Queue\n\ntry:\n from django.apps import AppConfig\nexcept ImportError: # pragma: no cover\n pass\nelse:\n class KombuAppConfig(AppConfig):\n name = 'kombu.transport.django'\n label = name.replace('.', '_')\n verbose_name = 'Message queue'\n default_app_config = 'kombu.transport.django.KombuAppConfig'\n\nVERSION = (1, 0, 0)\n__version__ = '.'.join(map(str, VERSION))\n\nPOLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL',\n getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0))\n\n\nclass Channel(virtual.Channel):\n\n def _new_queue(self, queue, **kwargs):\n Queue.objects.get_or_create(name=queue)\n\n def _put(self, queue, message, **kwargs):\n Queue.objects.publish(queue, dumps(message))\n\n def basic_consume(self, queue, *args, **kwargs):\n qinfo = self.state.bindings[queue]\n exchange = qinfo[0]\n if self.typeof(exchange).type == 'fanout':\n return\n super(Channel, self).basic_consume(queue, *args, **kwargs)\n\n def _get(self, queue):\n m = Queue.objects.fetch(queue)\n if m:\n return loads(bytes_to_str(m))\n raise Empty()\n\n def _size(self, queue):\n return Queue.objects.size(queue)\n\n def _purge(self, queue):\n return Queue.objects.purge(queue)\n\n def refresh_connection(self):\n from django import db\n db.close_connection()\n\n\nclass Transport(virtual.Transport):\n Channel = Channel\n\n default_port = 0\n polling_interval = POLLING_INTERVAL\n channel_errors = (\n virtual.Transport.channel_errors + (\n errors.ObjectDoesNotExist, errors.MultipleObjectsReturned)\n )\n driver_type = 'sql'\n driver_name = 'django'\n\n def driver_version(self):\n import django\n return '.'.join(map(str, django.VERSION))\n", "path": "kombu/transport/django/__init__.py"}]} | 1,180 | 136 |
gh_patches_debug_32907 | rasdani/github-patches | git_diff | scikit-hep__pyhf-873 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move metadata from setup.py to setup.cfg
# Description
@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.
@henryiii Can you comment on how to deal with `project_urls`?
https://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from pathlib import Path
3
4 this_directory = Path(__file__).parent.resolve()
5 with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
6 long_description = readme_rst.read()
7
8 extras_require = {
9 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
10 'torch': ['torch~=1.2'],
11 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
12 'xmlio': ['uproot'],
13 'minuit': ['iminuit'],
14 }
15 extras_require['backends'] = sorted(
16 set(
17 extras_require['tensorflow']
18 + extras_require['torch']
19 + extras_require['jax']
20 + extras_require['minuit']
21 )
22 )
23 extras_require['contrib'] = sorted(set(['matplotlib']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + [
31 'pyflakes',
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'check-manifest',
43 'jupyter',
44 'uproot~=3.3',
45 'graphviz',
46 'jsonpatch',
47 'black',
48 ]
49 )
50 )
51 extras_require['docs'] = sorted(
52 set(
53 [
54 'sphinx',
55 'sphinxcontrib-bibtex',
56 'sphinx-click',
57 'sphinx_rtd_theme',
58 'nbsphinx',
59 'ipywidgets',
60 'sphinx-issues',
61 'sphinx-copybutton>0.2.9',
62 ]
63 )
64 )
65 extras_require['develop'] = sorted(
66 set(
67 extras_require['docs']
68 + extras_require['test']
69 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
70 )
71 )
72 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
73
74
75 setup(
76 name='pyhf',
77 version='0.4.1',
78 description='(partial) pure python histfactory implementation',
79 long_description=long_description,
80 long_description_content_type='text/x-rst',
81 url='https://github.com/scikit-hep/pyhf',
82 project_urls={
83 "Documentation": "https://scikit-hep.org/pyhf/",
84 "Source": "https://github.com/scikit-hep/pyhf",
85 "Tracker": "https://github.com/scikit-hep/pyhf/issues",
86 },
87 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
88 author_email='[email protected], [email protected], [email protected]',
89 license='Apache',
90 keywords='physics fitting numpy scipy tensorflow pytorch jax',
91 classifiers=[
92 "Development Status :: 4 - Beta",
93 "License :: OSI Approved :: Apache Software License",
94 "Intended Audience :: Science/Research",
95 "Topic :: Scientific/Engineering",
96 "Topic :: Scientific/Engineering :: Physics",
97 "Programming Language :: Python :: 3",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 "Programming Language :: Python :: 3.8",
101 ],
102 package_dir={'': 'src'},
103 packages=find_packages(where='src'),
104 include_package_data=True,
105 python_requires=">=3.6",
106 install_requires=[
107 'scipy', # requires numpy, which is required by pyhf and tensorflow
108 'click>=6.0', # for console scripts,
109 'tqdm', # for readxml
110 'jsonschema>=3.2.0', # for utils
111 'jsonpatch',
112 'pyyaml', # for parsing CLI equal-delimited options
113 ],
114 extras_require=extras_require,
115 entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
116 dependency_links=[],
117 use_scm_version=lambda: {'local_scheme': lambda version: ''},
118 )
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,9 +1,4 @@
-from setuptools import setup, find_packages
-from pathlib import Path
-
-this_directory = Path(__file__).parent.resolve()
-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:
- long_description = readme_rst.read()
+from setuptools import setup
extras_require = {
'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
@@ -73,46 +68,6 @@
setup(
- name='pyhf',
- version='0.4.1',
- description='(partial) pure python histfactory implementation',
- long_description=long_description,
- long_description_content_type='text/x-rst',
- url='https://github.com/scikit-hep/pyhf',
- project_urls={
- "Documentation": "https://scikit-hep.org/pyhf/",
- "Source": "https://github.com/scikit-hep/pyhf",
- "Tracker": "https://github.com/scikit-hep/pyhf/issues",
- },
- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
- author_email='[email protected], [email protected], [email protected]',
- license='Apache',
- keywords='physics fitting numpy scipy tensorflow pytorch jax',
- classifiers=[
- "Development Status :: 4 - Beta",
- "License :: OSI Approved :: Apache Software License",
- "Intended Audience :: Science/Research",
- "Topic :: Scientific/Engineering",
- "Topic :: Scientific/Engineering :: Physics",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.6",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- ],
- package_dir={'': 'src'},
- packages=find_packages(where='src'),
- include_package_data=True,
- python_requires=">=3.6",
- install_requires=[
- 'scipy', # requires numpy, which is required by pyhf and tensorflow
- 'click>=6.0', # for console scripts,
- 'tqdm', # for readxml
- 'jsonschema>=3.2.0', # for utils
- 'jsonpatch',
- 'pyyaml', # for parsing CLI equal-delimited options
- ],
extras_require=extras_require,
- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},
- dependency_links=[],
use_scm_version=lambda: {'local_scheme': lambda version: ''},
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,9 +1,4 @@\n-from setuptools import setup, find_packages\n-from pathlib import Path\n-\n-this_directory = Path(__file__).parent.resolve()\n-with open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n- long_description = readme_rst.read()\n+from setuptools import setup\n \n extras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n@@ -73,46 +68,6 @@\n \n \n setup(\n- name='pyhf',\n- version='0.4.1',\n- description='(partial) pure python histfactory implementation',\n- long_description=long_description,\n- long_description_content_type='text/x-rst',\n- url='https://github.com/scikit-hep/pyhf',\n- project_urls={\n- \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n- \"Source\": \"https://github.com/scikit-hep/pyhf\",\n- \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n- },\n- author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n- author_email='[email protected], [email protected], [email protected]',\n- license='Apache',\n- keywords='physics fitting numpy scipy tensorflow pytorch jax',\n- classifiers=[\n- \"Development Status :: 4 - Beta\",\n- \"License :: OSI Approved :: Apache Software License\",\n- \"Intended Audience :: Science/Research\",\n- \"Topic :: Scientific/Engineering\",\n- \"Topic :: Scientific/Engineering :: Physics\",\n- \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.6\",\n- \"Programming Language :: Python :: 3.7\",\n- \"Programming Language :: Python :: 3.8\",\n- ],\n- package_dir={'': 'src'},\n- packages=find_packages(where='src'),\n- include_package_data=True,\n- python_requires=\">=3.6\",\n- install_requires=[\n- 'scipy', # requires numpy, which is required by pyhf and tensorflow\n- 'click>=6.0', # for console scripts,\n- 'tqdm', # for readxml\n- 'jsonschema>=3.2.0', # for utils\n- 'jsonpatch',\n- 'pyyaml', # for parsing CLI equal-delimited options\n- ],\n extras_require=extras_require,\n- entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n- dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n )\n", "issue": "Move metadata from setup.py to setup.cfg\n# Description\r\n\r\n@henryiii has documented on the [Scikit-HEP packaging information page](https://scikit-hep.org/developer/packaging#setup-configuration-medium-priority) how one can move from having PyPI metadata be stored in `setup.py` to `setup.cfg`. We've known about this for sometime but haven't taken the time to do it yet, but it is probably worth doing.\r\n\r\n@henryiii Can you comment on how to deal with `project_urls`?\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/3e1f157119dbcb4d8db8ffd8c98e16a2d12d0239/setup.py#L82-L86\r\n\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom pathlib import Path\n\nthis_directory = Path(__file__).parent.resolve()\nwith open(Path(this_directory).joinpath('README.rst'), encoding='utf-8') as readme_rst:\n long_description = readme_rst.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n name='pyhf',\n version='0.4.1',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/x-rst',\n url='https://github.com/scikit-hep/pyhf',\n project_urls={\n \"Documentation\": \"https://scikit-hep.org/pyhf/\",\n \"Source\": \"https://github.com/scikit-hep/pyhf\",\n \"Tracker\": \"https://github.com/scikit-hep/pyhf/issues\",\n },\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch jax',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Physics\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=3.6\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'jsonschema>=3.2.0', # for utils\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.cli:cli']},\n dependency_links=[],\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,640 | 617 |
gh_patches_debug_27479 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1308 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aioredis instrumentation: AttributeError: 'str' object has no attribute 'decode'
**Describe the bug**
In some cases `aioredis` passes `str` commands to its `aioredis.Redis.execute()` method rather than `bytes` commands, like e.g. when executing `aioredis.Redis.ping()`, which is unacceptable by `elastic-apm`.
**Current workaround**
Disable `aioredis` instrumenting (set environment variable `SKIP_INSTRUMENT_AIOREDIS="true"`) prior `elasticapm.instrument()` is called.
**To Reproduce**
```python3
import asyncio
import aioredis
import elasticapm
REDIS_URL = "redis://localhost:6379"
async def main():
elasticapm.instrument()
client = await aioredis.create_redis_pool(REDIS_URL)
elastic_apm_client = elasticapm.Client()
elastic_apm_client.begin_transaction("redis-ping")
try:
assert await client.ping() == b"PONG"
finally:
client.close()
await client.wait_closed()
elastic_apm_client.end_transaction("redis-ping")
if __name__ == "__main__":
asyncio.run(main())
```
**Expected result**
No exception is raised.
**Actual result**
```python3
Traceback (most recent call last):
File "<...>/test.py", line 24, in <module>
asyncio.run(main())
File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "<...>/test.py", line 17, in main
assert await client.ping() == b"PONG"
File "<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py", line 114, in ping
return self.execute('PING', *args, encoding=encoding)
File "<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py", line 51, in execute
return self._pool_or_conn.execute(command, *args, **kwargs)
File "<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/base.py", line 210, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/asyncio/aioredis.py", line 46, in call
wrapped_name = args[0].decode()
AttributeError: 'str' object has no attribute 'decode'
```
**Environment**
- OS: Ubuntu 18.04.5 LTS
- Python version: 3.8.9
- Framework and version: `aioredis==1.3.1`
- APM Server version: --
- Agent version: `elasticapm==6.3.3`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/instrumentation/packages/asyncio/aioredis.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from __future__ import absolute_import
32
33 from elasticapm.contrib.asyncio.traces import async_capture_span
34 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
35 from elasticapm.traces import execution_context
36
37
38 class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):
39 name = "aioredis"
40
41 instrument_list = [("aioredis.pool", "ConnectionsPool.execute"),
42 ("aioredis.pool", "ConnectionsPool.execute_pubsub")]
43
44 def call(self, module, method, wrapped, instance, args, kwargs):
45 if len(args) > 0:
46 wrapped_name = args[0].decode()
47 else:
48 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
49
50 with async_capture_span(
51 wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True
52 ) as span:
53 span.context["destination"] = _get_destination_info(instance)
54
55 return wrapped(*args, **kwargs)
56
57
58 class RedisPipelineInstrumentation(AbstractInstrumentedModule):
59 name = "aioredis"
60
61 instrument_list = [("aioredis.commands.transaction", "Pipeline.execute")]
62
63 def call(self, module, method, wrapped, instance, args, kwargs):
64 wrapped_name = self.get_wrapped_name(wrapped, instance, method)
65
66 with async_capture_span(
67 wrapped_name, span_type="db", span_subtype="redis", span_action="query", leaf=True
68 ) as span:
69 span.context["destination"] = _get_destination_info(instance)
70
71 return wrapped(*args, **kwargs)
72
73
74 class RedisConnectionInstrumentation(AbstractInstrumentedModule):
75 name = "aioredis"
76
77 instrument_list = (("aioredis.connection", "RedisConnection.execute"),
78 ("aioredis.pool", "ConnectionsPool.execute_pubsub"))
79
80 def call(self, module, method, wrapped, instance, args, kwargs):
81 span = execution_context.get_span()
82 if span and span.subtype == "aioredis":
83 span.context["destination"] = _get_destination_info(instance)
84 return wrapped(*args, **kwargs)
85
86
87 def _get_destination_info(connection):
88 destination_info = {"service": {"name": "aioredis", "resource": "redis", "type": "db"}}
89
90 if hasattr(connection, "_pool_or_conn"):
91 destination_info["port"] = connection._pool_or_conn.address[1]
92 destination_info["address"] = connection._pool_or_conn.address[0]
93 else:
94 destination_info["port"] = connection.address[1]
95 destination_info["address"] = connection.address[0]
96
97 return destination_info
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/instrumentation/packages/asyncio/aioredis.py b/elasticapm/instrumentation/packages/asyncio/aioredis.py
--- a/elasticapm/instrumentation/packages/asyncio/aioredis.py
+++ b/elasticapm/instrumentation/packages/asyncio/aioredis.py
@@ -38,12 +38,16 @@
class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):
name = "aioredis"
- instrument_list = [("aioredis.pool", "ConnectionsPool.execute"),
- ("aioredis.pool", "ConnectionsPool.execute_pubsub")]
+ instrument_list = [
+ ("aioredis.pool", "ConnectionsPool.execute"),
+ ("aioredis.pool", "ConnectionsPool.execute_pubsub"),
+ ]
def call(self, module, method, wrapped, instance, args, kwargs):
if len(args) > 0:
- wrapped_name = args[0].decode()
+ wrapped_name = args[0]
+ if isinstance(wrapped_name, bytes):
+ wrapped_name = wrapped_name.decode()
else:
wrapped_name = self.get_wrapped_name(wrapped, instance, method)
@@ -74,8 +78,10 @@
class RedisConnectionInstrumentation(AbstractInstrumentedModule):
name = "aioredis"
- instrument_list = (("aioredis.connection", "RedisConnection.execute"),
- ("aioredis.pool", "ConnectionsPool.execute_pubsub"))
+ instrument_list = (
+ ("aioredis.connection", "RedisConnection.execute"),
+ ("aioredis.pool", "ConnectionsPool.execute_pubsub"),
+ )
def call(self, module, method, wrapped, instance, args, kwargs):
span = execution_context.get_span()
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/aioredis.py b/elasticapm/instrumentation/packages/asyncio/aioredis.py\n--- a/elasticapm/instrumentation/packages/asyncio/aioredis.py\n+++ b/elasticapm/instrumentation/packages/asyncio/aioredis.py\n@@ -38,12 +38,16 @@\n class RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n \n- instrument_list = [(\"aioredis.pool\", \"ConnectionsPool.execute\"),\n- (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\")]\n+ instrument_list = [\n+ (\"aioredis.pool\", \"ConnectionsPool.execute\"),\n+ (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n+ ]\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n- wrapped_name = args[0].decode()\n+ wrapped_name = args[0]\n+ if isinstance(wrapped_name, bytes):\n+ wrapped_name = wrapped_name.decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n \n@@ -74,8 +78,10 @@\n class RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n \n- instrument_list = ((\"aioredis.connection\", \"RedisConnection.execute\"),\n- (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"))\n+ instrument_list = (\n+ (\"aioredis.connection\", \"RedisConnection.execute\"),\n+ (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n+ )\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n", "issue": "aioredis instrumentation: AttributeError: 'str' object has no attribute 'decode'\n**Describe the bug**\r\nIn some cases `aioredis` passes `str` commands to its `aioredis.Redis.execute()` method rather than `bytes` commands, like e.g. when executing `aioredis.Redis.ping()`, which is unacceptable by `elastic-apm`. \r\n\r\n**Current workaround**\r\nDisable `aioredis` instrumenting (set environment variable `SKIP_INSTRUMENT_AIOREDIS=\"true\"`) prior `elasticapm.instrument()` is called.\r\n\r\n**To Reproduce**\r\n```python3\r\nimport asyncio\r\n\r\nimport aioredis\r\nimport elasticapm\r\n\r\nREDIS_URL = \"redis://localhost:6379\"\r\n\r\n\r\nasync def main():\r\n elasticapm.instrument()\r\n\r\n client = await aioredis.create_redis_pool(REDIS_URL)\r\n elastic_apm_client = elasticapm.Client()\r\n\r\n elastic_apm_client.begin_transaction(\"redis-ping\")\r\n try:\r\n assert await client.ping() == b\"PONG\"\r\n finally:\r\n client.close()\r\n await client.wait_closed()\r\n elastic_apm_client.end_transaction(\"redis-ping\")\r\n\r\nif __name__ == \"__main__\":\r\n asyncio.run(main())\r\n```\r\n\r\n**Expected result**\r\nNo exception is raised.\r\n\r\n**Actual result**\r\n```python3\r\nTraceback (most recent call last):\r\n File \"<...>/test.py\", line 24, in <module>\r\n asyncio.run(main())\r\n File \"/usr/local/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"/usr/local/lib/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\r\n return future.result()\r\n File \"<...>/test.py\", line 17, in main\r\n assert await client.ping() == b\"PONG\"\r\n File \"<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py\", line 114, in ping\r\n return self.execute('PING', *args, encoding=encoding)\r\n File \"<...>/venv/lib/python3.8/site-packages/aioredis/commands/__init__.py\", line 51, in execute\r\n return self._pool_or_conn.execute(command, *args, **kwargs)\r\n File \"<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/base.py\", line 210, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"<...>/venv/lib/python3.8/site-packages/elasticapm/instrumentation/packages/asyncio/aioredis.py\", line 46, in call\r\n wrapped_name = args[0].decode()\r\nAttributeError: 'str' object has no attribute 'decode'\r\n```\r\n\r\n**Environment**\r\n- OS: Ubuntu 18.04.5 LTS\r\n- Python version: 3.8.9\r\n- Framework and version: `aioredis==1.3.1`\r\n- APM Server version: --\r\n- Agent version: `elasticapm==6.3.3`\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\n\n\nclass RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.pool\", \"ConnectionsPool.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = args[0].decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.commands.transaction\", \"Pipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = ((\"aioredis.connection\", \"RedisConnection.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"))\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"aioredis\":\n span.context[\"destination\"] = _get_destination_info(instance)\n return wrapped(*args, **kwargs)\n\n\ndef _get_destination_info(connection):\n destination_info = {\"service\": {\"name\": \"aioredis\", \"resource\": \"redis\", \"type\": \"db\"}}\n\n if hasattr(connection, \"_pool_or_conn\"):\n destination_info[\"port\"] = connection._pool_or_conn.address[1]\n destination_info[\"address\"] = connection._pool_or_conn.address[0]\n else:\n destination_info[\"port\"] = connection.address[1]\n destination_info[\"address\"] = connection.address[0]\n\n return destination_info\n", "path": "elasticapm/instrumentation/packages/asyncio/aioredis.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import absolute_import\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import execution_context\n\n\nclass RedisConnectionPoolInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [\n (\"aioredis.pool\", \"ConnectionsPool.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if len(args) > 0:\n wrapped_name = args[0]\n if isinstance(wrapped_name, bytes):\n wrapped_name = wrapped_name.decode()\n else:\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisPipelineInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = [(\"aioredis.commands.transaction\", \"Pipeline.execute\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n wrapped_name = self.get_wrapped_name(wrapped, instance, method)\n\n with async_capture_span(\n wrapped_name, span_type=\"db\", span_subtype=\"redis\", span_action=\"query\", leaf=True\n ) as span:\n span.context[\"destination\"] = _get_destination_info(instance)\n\n return wrapped(*args, **kwargs)\n\n\nclass RedisConnectionInstrumentation(AbstractInstrumentedModule):\n name = \"aioredis\"\n\n instrument_list = (\n (\"aioredis.connection\", \"RedisConnection.execute\"),\n (\"aioredis.pool\", \"ConnectionsPool.execute_pubsub\"),\n )\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n span = execution_context.get_span()\n if span and span.subtype == \"aioredis\":\n span.context[\"destination\"] = _get_destination_info(instance)\n return wrapped(*args, **kwargs)\n\n\ndef _get_destination_info(connection):\n destination_info = {\"service\": {\"name\": \"aioredis\", \"resource\": \"redis\", \"type\": \"db\"}}\n\n if hasattr(connection, \"_pool_or_conn\"):\n destination_info[\"port\"] = connection._pool_or_conn.address[1]\n destination_info[\"address\"] = connection._pool_or_conn.address[0]\n else:\n destination_info[\"port\"] = connection.address[1]\n destination_info[\"address\"] = connection.address[0]\n\n return destination_info\n", "path": "elasticapm/instrumentation/packages/asyncio/aioredis.py"}]} | 2,030 | 390 |
gh_patches_debug_31047 | rasdani/github-patches | git_diff | conan-io__conan-center-index-11189 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0
### Package and Environment Details
* Package Name/Version: **libpciaccess/0.16**
* Operating System+version: **Fedora 34 x86_64**
* Compiler+version: **gcc 11.3.1**
* Conan version: **conan 1.49.0**
* Python version: **Python 3.9.13**
### Conan profile
```
Configuration for profile default:
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=11
compiler.libcxx=libstdc++11
build_type=Release
[options]
[conf]
[build_requires]
[env]
```
### Steps to reproduce
Simply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.
Expected result:
libpciaccess/0.16 is successfully built and made available in conan cache.
Actual result:
Build fails with the following error:
```
libpciaccess/0.16:
libpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed
libpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646
ERROR: libpciaccess/0.16: Error in build() method, line 66
autotools = Autotools(self, build_script_folder=self._source_subfolder)
TypeError: __init__() got an unexpected keyword argument 'build_script_folder'
```
The problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?
### Logs
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/libpciaccess/all/conanfile.py`
Content:
```
1 import os
2
3 from conan.tools.gnu import Autotools, AutotoolsToolchain
4 from conans import ConanFile, tools
5 from conans.errors import ConanInvalidConfiguration
6
7 required_conan_version = ">=1.33.0"
8
9
10 class LibPciAccessConan(ConanFile):
11 name = "libpciaccess"
12 description = "Generic PCI access library"
13 topics = ("pci", "xorg")
14 url = "https://github.com/conan-io/conan-center-index"
15 homepage = "https://gitlab.freedesktop.org/xorg/lib/libpciaccess"
16 license = "MIT", "X11"
17
18 settings = "os", "arch", "compiler", "build_type"
19 options = {"shared": [True, False], "fPIC": [True, False]}
20 default_options = {"shared": False, "fPIC": True}
21
22 _source_subfolder = "source_subfolder"
23 _build_subfolder = "build_subfolder"
24
25 def validate(self):
26 def is_supported(settings):
27 if settings.os in ("Linux", "FreeBSD", "SunOS"):
28 return True
29 return settings.os == "Windows" and settings.get_safe("os.subsystem") == "cygwin"
30 if not is_supported(self.settings):
31 raise ConanInvalidConfiguration("Unsupported architecture.")
32
33 def config_options(self):
34 if self.settings.os == "Windows":
35 del self.options.fPIC
36
37 def configure(self):
38 if self.options.shared:
39 del self.options.fPIC
40 del self.settings.compiler.libcxx
41 del self.settings.compiler.cppstd
42
43 def build_requirements(self):
44 self.build_requires("pkgconf/1.7.4")
45 self.build_requires("xorg-macros/1.19.3")
46 self.build_requires("libtool/2.4.6")
47
48 def generate(self):
49 tc = AutotoolsToolchain(self)
50 tc.default_configure_install_args = True
51 tc.generate()
52
53 def source(self):
54 tools.get(**self.conan_data["sources"][self.version],
55 strip_root=True, destination=self._source_subfolder)
56
57 def build(self):
58 # autoreconf
59 self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"),
60 win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)
61
62 autotools = Autotools(self)
63 autotools.configure(build_script_folder=self._source_subfolder)
64 autotools.make()
65
66 def package(self):
67 self.copy(pattern="COPYING", dst="licenses",
68 src=self._source_subfolder)
69
70 autotools = Autotools(self)
71 autotools.install()
72
73 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
74 tools.remove_files_by_mask(os.path.join(
75 self.package_folder, "lib"), "*.la")
76
77 def package_info(self):
78 self.cpp_info.libs = tools.collect_libs(self)
79 self.cpp_info.set_property("pkg_config_name", "pciaccess")
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py
--- a/recipes/libpciaccess/all/conanfile.py
+++ b/recipes/libpciaccess/all/conanfile.py
@@ -1,10 +1,10 @@
import os
-
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
-required_conan_version = ">=1.33.0"
+
+required_conan_version = ">=1.44.0"
class LibPciAccessConan(ConanFile):
@@ -19,8 +19,9 @@
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
- _source_subfolder = "source_subfolder"
- _build_subfolder = "build_subfolder"
+ @property
+ def _source_subfolder(self):
+ return "source_subfolder"
def validate(self):
def is_supported(settings):
@@ -55,7 +56,6 @@
strip_root=True, destination=self._source_subfolder)
def build(self):
- # autoreconf
self.run("{} -fiv".format(tools.get_env("AUTORECONF") or "autoreconf"),
win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)
@@ -75,5 +75,5 @@
self.package_folder, "lib"), "*.la")
def package_info(self):
- self.cpp_info.libs = tools.collect_libs(self)
+ self.cpp_info.libs = ["pciaccess"]
self.cpp_info.set_property("pkg_config_name", "pciaccess")
| {"golden_diff": "diff --git a/recipes/libpciaccess/all/conanfile.py b/recipes/libpciaccess/all/conanfile.py\n--- a/recipes/libpciaccess/all/conanfile.py\n+++ b/recipes/libpciaccess/all/conanfile.py\n@@ -1,10 +1,10 @@\n import os\n-\n from conan.tools.gnu import Autotools, AutotoolsToolchain\n from conans import ConanFile, tools\n from conans.errors import ConanInvalidConfiguration\n \n-required_conan_version = \">=1.33.0\"\n+\n+required_conan_version = \">=1.44.0\"\n \n \n class LibPciAccessConan(ConanFile):\n@@ -19,8 +19,9 @@\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n \n- _source_subfolder = \"source_subfolder\"\n- _build_subfolder = \"build_subfolder\"\n+ @property\n+ def _source_subfolder(self):\n+ return \"source_subfolder\"\n \n def validate(self):\n def is_supported(settings):\n@@ -55,7 +56,6 @@\n strip_root=True, destination=self._source_subfolder)\n \n def build(self):\n- # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n \n@@ -75,5 +75,5 @@\n self.package_folder, \"lib\"), \"*.la\")\n \n def package_info(self):\n- self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.libs = [\"pciaccess\"]\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "issue": "[package] libpciaccess/0.16: TypeError: __init__() got an unexpected keyword argument 'build_script_folder' with conan 1.49.0\n### Package and Environment Details\r\n\r\n* Package Name/Version: **libpciaccess/0.16**\r\n* Operating System+version: **Fedora 34 x86_64**\r\n* Compiler+version: **gcc 11.3.1**\r\n* Conan version: **conan 1.49.0**\r\n* Python version: **Python 3.9.13**\r\n\r\n\r\n### Conan profile\r\n\r\n```\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=11\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\nSimply run `conan install libpciaccess/0.16@ --build` using conan 1.49.0.\r\n\r\nExpected result:\r\nlibpciaccess/0.16 is successfully built and made available in conan cache.\r\n\r\nActual result:\r\nBuild fails with the following error:\r\n\r\n```\r\nlibpciaccess/0.16: \r\nlibpciaccess/0.16: ERROR: Package 'dfbe50feef7f3c6223a476cd5aeadb687084a646' build failed\r\nlibpciaccess/0.16: WARN: Build folder /home/ts/.conan/data/libpciaccess/0.16/_/_/build/dfbe50feef7f3c6223a476cd5aeadb687084a646\r\nERROR: libpciaccess/0.16: Error in build() method, line 66\r\n\tautotools = Autotools(self, build_script_folder=self._source_subfolder)\r\n\tTypeError: __init__() got an unexpected keyword argument 'build_script_folder'\r\n\r\n```\r\nThe problem appears to be that the changes merged in #11021 which reverts the fix introduced for #10909 in #10910 hasn't been updated in the conan center. Possibly because it reverts to a previous version?\r\n\r\n### Logs\r\n\r\n_No response_\n", "before_files": [{"content": "import os\n\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass LibPciAccessConan(ConanFile):\n name = \"libpciaccess\"\n description = \"Generic PCI access library\"\n topics = (\"pci\", \"xorg\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://gitlab.freedesktop.org/xorg/lib/libpciaccess\"\n license = \"MIT\", \"X11\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def validate(self):\n def is_supported(settings):\n if settings.os in (\"Linux\", \"FreeBSD\", \"SunOS\"):\n return True\n return settings.os == \"Windows\" and settings.get_safe(\"os.subsystem\") == \"cygwin\"\n if not is_supported(self.settings):\n raise ConanInvalidConfiguration(\"Unsupported architecture.\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n self.build_requires(\"xorg-macros/1.19.3\")\n self.build_requires(\"libtool/2.4.6\")\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.default_configure_install_args = True\n tc.generate()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self._source_subfolder)\n\n def build(self):\n # autoreconf\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.configure(build_script_folder=self._source_subfolder)\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(\n self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "path": "recipes/libpciaccess/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nrequired_conan_version = \">=1.44.0\"\n\n\nclass LibPciAccessConan(ConanFile):\n name = \"libpciaccess\"\n description = \"Generic PCI access library\"\n topics = (\"pci\", \"xorg\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://gitlab.freedesktop.org/xorg/lib/libpciaccess\"\n license = \"MIT\", \"X11\"\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def validate(self):\n def is_supported(settings):\n if settings.os in (\"Linux\", \"FreeBSD\", \"SunOS\"):\n return True\n return settings.os == \"Windows\" and settings.get_safe(\"os.subsystem\") == \"cygwin\"\n if not is_supported(self.settings):\n raise ConanInvalidConfiguration(\"Unsupported architecture.\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n self.build_requires(\"xorg-macros/1.19.3\")\n self.build_requires(\"libtool/2.4.6\")\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.default_configure_install_args = True\n tc.generate()\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version],\n strip_root=True, destination=self._source_subfolder)\n\n def build(self):\n self.run(\"{} -fiv\".format(tools.get_env(\"AUTORECONF\") or \"autoreconf\"),\n win_bash=tools.os_info.is_windows, run_environment=True, cwd=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.configure(build_script_folder=self._source_subfolder)\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n\n autotools = Autotools(self)\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.remove_files_by_mask(os.path.join(\n self.package_folder, \"lib\"), \"*.la\")\n\n def package_info(self):\n self.cpp_info.libs = [\"pciaccess\"]\n self.cpp_info.set_property(\"pkg_config_name\", \"pciaccess\")\n", "path": "recipes/libpciaccess/all/conanfile.py"}]} | 1,601 | 404 |
gh_patches_debug_2041 | rasdani/github-patches | git_diff | Pyomo__pyomo-2633 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fixed Vars unpickle as stale
## Summary
I'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.
### Steps to reproduce the issue
```
from pyomo.environ import *
import pickle
m = ConcreteModel()
m.x = Var(domain=Binary)
m.x.fix(1)
unpickle = pickle.loads(pickle.dumps(m))
m.x.pprint()
unpickle.x.pprint()
```
```
x : Size=1, Index=None
Key : Lower : Value : Upper : Fixed : Stale : Domain
None : 0 : 1 : 1 : True : False : Binary
x : Size=1, Index=None
Key : Lower : Value : Upper : Fixed : Stale : Domain
None : 0 : 1 : 1 : True : True : Binary
```
### Error Message
It seems like these models should be identical, even up to stale-ness, right?
### Information on your system
Pyomo version: main
Python version: 3.8
Operating system: linux
How Pyomo was installed (PyPI, conda, source): source
Solver (if applicable):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/core/staleflag.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright (c) 2008-2022
5 # National Technology and Engineering Solutions of Sandia, LLC
6 # Under the terms of Contract DE-NA0003525 with National Technology and
7 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
8 # rights in this software.
9 # This software is distributed under the 3-clause BSD License.
10 # ___________________________________________________________________________
11
12 class _StaleFlagManager(object):
13 def __init__(self):
14 self._current = 0
15 self.mark_all_as_stale()
16
17 def stale_mapper(self, encode, value):
18 if encode:
19 return self.is_stale(value)
20 else:
21 if value:
22 return 0
23 else:
24 self.get_flag(0)
25
26 def _get_flag(self, current_flag):
27 """Return the current global stale flag value"""
28 return self._current
29
30 def _get_flag_delayed(self, current_flag):
31 """Implement the "delayed" advancement of the global stale flag value
32
33 This will continue to return the current value of the state flag
34 until the first non-stale variable is updated (that it, it is
35 passed the current stale flag when called). This allows for
36 updating stale variable values without incrementing the global
37 stale flag, but will mark everything as stale as soon as a
38 non-stale variable value is changed.
39
40 """
41 if current_flag == self._current:
42 self._current += 1
43 setattr(self, 'get_flag', getattr(self, '_get_flag'))
44 return self._current
45
46 def is_stale(self, val):
47 """Return ``True`` if the passed value indicated a stale variable"""
48 return val != self._current
49
50 def mark_all_as_stale(self, delayed=False):
51 """Advance the global stale flag, marking all variables as stale
52
53 This is generally called immediately before and after a batch
54 variable update (i.e. loading values from a solver result or
55 stored solution). Before the batch update
56 :meth:`mark_all_as_stale` is called with ``delayed=False``,
57 which immediately marks all variables as stale. After the batch
58 update, :meth:`mark_all_as_stale` is typically called with
59 ``delayed=True``. This allows additional stale variables to be
60 updated without advancing the global flag, but as soon as any
61 non-stale variable has its value changed, then the flag is
62 advanced and all other variables become stale.
63
64 """
65 if delayed:
66 setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))
67 else:
68 setattr(self, 'get_flag', getattr(self, '_get_flag'))
69 self._current += 1
70
71 StaleFlagManager = _StaleFlagManager()
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py
--- a/pyomo/core/staleflag.py
+++ b/pyomo/core/staleflag.py
@@ -21,7 +21,7 @@
if value:
return 0
else:
- self.get_flag(0)
+ return self.get_flag(0)
def _get_flag(self, current_flag):
"""Return the current global stale flag value"""
| {"golden_diff": "diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py\n--- a/pyomo/core/staleflag.py\n+++ b/pyomo/core/staleflag.py\n@@ -21,7 +21,7 @@\n if value:\n return 0\n else:\n- self.get_flag(0)\n+ return self.get_flag(0)\n \n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n", "issue": "Fixed Vars unpickle as stale\n## Summary\r\n\r\nI'm not sure if this is a bug, but it seems unexpected? Anyway, if you pickle a model that has a fixed variable (not stale), when you unpickle it, it comes back as stale.\r\n\r\n### Steps to reproduce the issue\r\n\r\n```\r\nfrom pyomo.environ import *\r\nimport pickle\r\n\r\nm = ConcreteModel()\r\nm.x = Var(domain=Binary)\r\nm.x.fix(1)\r\n\r\nunpickle = pickle.loads(pickle.dumps(m))\r\n\r\nm.x.pprint()\r\nunpickle.x.pprint()\r\n```\r\n\r\n```\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : False : Binary\r\nx : Size=1, Index=None\r\n Key : Lower : Value : Upper : Fixed : Stale : Domain\r\n None : 0 : 1 : 1 : True : True : Binary\r\n```\r\n\r\n### Error Message\r\n\r\nIt seems like these models should be identical, even up to stale-ness, right?\r\n\r\n### Information on your system\r\n\r\nPyomo version: main\r\nPython version: 3.8\r\nOperating system: linux\r\nHow Pyomo was installed (PyPI, conda, source): source \r\nSolver (if applicable):\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2008-2022\n# National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nclass _StaleFlagManager(object):\n def __init__(self):\n self._current = 0\n self.mark_all_as_stale()\n\n def stale_mapper(self, encode, value):\n if encode:\n return self.is_stale(value)\n else:\n if value:\n return 0\n else:\n return self.get_flag(0)\n\n def _get_flag(self, current_flag):\n \"\"\"Return the current global stale flag value\"\"\"\n return self._current\n\n def _get_flag_delayed(self, current_flag):\n \"\"\"Implement the \"delayed\" advancement of the global stale flag value\n\n This will continue to return the current value of the state flag\n until the first non-stale variable is updated (that it, it is\n passed the current stale flag when called). This allows for\n updating stale variable values without incrementing the global\n stale flag, but will mark everything as stale as soon as a\n non-stale variable value is changed.\n\n \"\"\"\n if current_flag == self._current:\n self._current += 1\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n return self._current\n\n def is_stale(self, val):\n \"\"\"Return ``True`` if the passed value indicated a stale variable\"\"\"\n return val != self._current\n\n def mark_all_as_stale(self, delayed=False):\n \"\"\"Advance the global stale flag, marking all variables as stale\n\n This is generally called immediately before and after a batch\n variable update (i.e. loading values from a solver result or\n stored solution). Before the batch update\n :meth:`mark_all_as_stale` is called with ``delayed=False``,\n which immediately marks all variables as stale. After the batch\n update, :meth:`mark_all_as_stale` is typically called with\n ``delayed=True``. This allows additional stale variables to be\n updated without advancing the global flag, but as soon as any\n non-stale variable has its value changed, then the flag is\n advanced and all other variables become stale.\n\n \"\"\"\n if delayed:\n setattr(self, 'get_flag', getattr(self, '_get_flag_delayed'))\n else:\n setattr(self, 'get_flag', getattr(self, '_get_flag'))\n self._current += 1\n\nStaleFlagManager = _StaleFlagManager()\n", "path": "pyomo/core/staleflag.py"}]} | 1,314 | 102 |
gh_patches_debug_11964 | rasdani/github-patches | git_diff | urllib3__urllib3-1439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'Failed to parse headers' warning logged when getting message/rfc822 content
I've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:
```py
class TestOkayHeaders(SocketDummyServerTestCase):
def _test_okay_header_parsing(self, header):
self.start_response_handler((
b'HTTP/1.1 200 OK\r\n'
b'Content-Length: 0\r\n'
) + header + b'\r\n\r\n'
)
pool = HTTPConnectionPool(self.host, self.port, retries=False)
self.addCleanup(pool.close)
with LogRecorder() as logs:
pool.request('GET', '/')
for record in logs:
assert 'Failed to parse headers' not in record.msg
def test_header_text_plain(self):
self._test_okay_header_parsing(b'Content-type: text/plain')
def test_header_message_rfc822(self):
self._test_okay_header_parsing(b'Content-type: message/rfc822')
```
The test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:
```py
Failed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]
Traceback (most recent call last):
File "/home/user/git/urllib3/src/urllib3/connectionpool.py", line 396, in _make_request
assert_header_parsing(httplib_response.msg)
File "/home/user/git/urllib3/src/urllib3/util/response.py", line 68, in assert_header_parsing
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
urllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]
```
While retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/util/response.py`
Content:
```
1 from __future__ import absolute_import
2 from ..packages.six.moves import http_client as httplib
3
4 from ..exceptions import HeaderParsingError
5
6
7 def is_fp_closed(obj):
8 """
9 Checks whether a given file-like object is closed.
10
11 :param obj:
12 The file-like object to check.
13 """
14
15 try:
16 # Check `isclosed()` first, in case Python3 doesn't set `closed`.
17 # GH Issue #928
18 return obj.isclosed()
19 except AttributeError:
20 pass
21
22 try:
23 # Check via the official file-like-object way.
24 return obj.closed
25 except AttributeError:
26 pass
27
28 try:
29 # Check if the object is a container for another file-like object that
30 # gets released on exhaustion (e.g. HTTPResponse).
31 return obj.fp is None
32 except AttributeError:
33 pass
34
35 raise ValueError("Unable to determine whether fp is closed.")
36
37
38 def assert_header_parsing(headers):
39 """
40 Asserts whether all headers have been successfully parsed.
41 Extracts encountered errors from the result of parsing headers.
42
43 Only works on Python 3.
44
45 :param headers: Headers to verify.
46 :type headers: `httplib.HTTPMessage`.
47
48 :raises urllib3.exceptions.HeaderParsingError:
49 If parsing errors are found.
50 """
51
52 # This will fail silently if we pass in the wrong kind of parameter.
53 # To make debugging easier add an explicit check.
54 if not isinstance(headers, httplib.HTTPMessage):
55 raise TypeError('expected httplib.Message, got {0}.'.format(
56 type(headers)))
57
58 defects = getattr(headers, 'defects', None)
59 get_payload = getattr(headers, 'get_payload', None)
60
61 unparsed_data = None
62 if get_payload: # Platform-specific: Python 3.
63 unparsed_data = get_payload()
64
65 if defects or unparsed_data:
66 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
67
68
69 def is_response_to_head(response):
70 """
71 Checks whether the request of a response has been a HEAD-request.
72 Handles the quirks of AppEngine.
73
74 :param conn:
75 :type conn: :class:`httplib.HTTPResponse`
76 """
77 # FIXME: Can we do this somehow without accessing private httplib _method?
78 method = response._method
79 if isinstance(method, int): # Platform-specific: Appengine
80 return method == 3
81 return method.upper() == 'HEAD'
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py
--- a/src/urllib3/util/response.py
+++ b/src/urllib3/util/response.py
@@ -59,8 +59,14 @@
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
- if get_payload: # Platform-specific: Python 3.
- unparsed_data = get_payload()
+ if get_payload:
+ # get_payload is actually email.message.Message.get_payload;
+ # we're only interested in the result if it's not a multipart message
+ if not headers.is_multipart():
+ payload = get_payload()
+
+ if isinstance(payload, (bytes, str)):
+ unparsed_data = payload
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
| {"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -59,8 +59,14 @@\n get_payload = getattr(headers, 'get_payload', None)\n \n unparsed_data = None\n- if get_payload: # Platform-specific: Python 3.\n- unparsed_data = get_payload()\n+ if get_payload:\n+ # get_payload is actually email.message.Message.get_payload;\n+ # we're only interested in the result if it's not a multipart message\n+ if not headers.is_multipart():\n+ payload = get_payload()\n+\n+ if isinstance(payload, (bytes, str)):\n+ unparsed_data = payload\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "'Failed to parse headers' warning logged when getting message/rfc822 content\nI've been investigating an [issue](https://github.com/boto/botocore/issues/1551) I've recently discovered when retrieving objects from S3. I've now tracked it to `urllib3`; this test case (which I've added to `urllib3/test/with_dummyserver/test_socketlevel.py`) demonstrates it:\r\n\r\n```py\r\nclass TestOkayHeaders(SocketDummyServerTestCase):\r\n\r\n def _test_okay_header_parsing(self, header):\r\n self.start_response_handler((\r\n b'HTTP/1.1 200 OK\\r\\n'\r\n b'Content-Length: 0\\r\\n'\r\n ) + header + b'\\r\\n\\r\\n'\r\n )\r\n\r\n pool = HTTPConnectionPool(self.host, self.port, retries=False)\r\n self.addCleanup(pool.close)\r\n\r\n with LogRecorder() as logs:\r\n pool.request('GET', '/')\r\n\r\n for record in logs:\r\n assert 'Failed to parse headers' not in record.msg\r\n\r\n def test_header_text_plain(self):\r\n self._test_okay_header_parsing(b'Content-type: text/plain')\r\n\r\n def test_header_message_rfc822(self):\r\n self._test_okay_header_parsing(b'Content-type: message/rfc822')\r\n```\r\n\r\nThe test with `text/plain` passes, while the test with `message/rfc822` fails, and this is logged:\r\n\r\n```py\r\nFailed to parse headers (url=http://localhost:36732/): Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\nTraceback (most recent call last):\r\n File \"/home/user/git/urllib3/src/urllib3/connectionpool.py\", line 396, in _make_request\r\n assert_header_parsing(httplib_response.msg)\r\n File \"/home/user/git/urllib3/src/urllib3/util/response.py\", line 68, in assert_header_parsing\r\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\r\nurllib3.exceptions.HeaderParsingError: Unknown, unparsed data: [<http.client.HTTPMessage object at 0x7f8fab9373c8>]\r\n```\r\n\r\nWhile retrieving content of type `message/rfc822` still works, the warning message being logged is incorrect and unhelpful.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError('expected httplib.Message, got {0}.'.format(\n type(headers)))\n\n defects = getattr(headers, 'defects', None)\n get_payload = getattr(headers, 'get_payload', None)\n\n unparsed_data = None\n if get_payload: # Platform-specific: Python 3.\n unparsed_data = get_payload()\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == 'HEAD'\n", "path": "src/urllib3/util/response.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError('expected httplib.Message, got {0}.'.format(\n type(headers)))\n\n defects = getattr(headers, 'defects', None)\n get_payload = getattr(headers, 'get_payload', None)\n\n unparsed_data = None\n if get_payload:\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == 'HEAD'\n", "path": "src/urllib3/util/response.py"}]} | 1,475 | 204 |
gh_patches_debug_2822 | rasdani/github-patches | git_diff | StackStorm__st2-5092 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove obsolete 'bin/st2-check-license'
The project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.
The task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.
This is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.
Bonus points to find other obsolete, outdated, irrelevant scripts in st2 core.
Help wanted!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `st2common/setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright 2020 The StackStorm Authors.
3 # Copyright 2019 Extreme Networks, Inc.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from __future__ import absolute_import
18 import os.path
19
20 from setuptools import setup, find_packages
21
22 from dist_utils import fetch_requirements
23 from dist_utils import apply_vagrant_workaround
24 from dist_utils import get_version_string
25
26 ST2_COMPONENT = 'st2common'
27 BASE_DIR = os.path.dirname(os.path.abspath(__file__))
28 REQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')
29 INIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')
30
31 install_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)
32
33 apply_vagrant_workaround()
34 setup(
35 name=ST2_COMPONENT,
36 version=get_version_string(INIT_FILE),
37 description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),
38 author='StackStorm',
39 author_email='[email protected]',
40 license='Apache License (2.0)',
41 url='https://stackstorm.com/',
42 install_requires=install_reqs,
43 dependency_links=dep_links,
44 test_suite=ST2_COMPONENT,
45 zip_safe=False,
46 include_package_data=True,
47 packages=find_packages(exclude=['setuptools', 'tests']),
48 scripts=[
49 'bin/st2-bootstrap-rmq',
50 'bin/st2-cleanup-db',
51 'bin/st2-register-content',
52 'bin/st2-purge-executions',
53 'bin/st2-purge-trigger-instances',
54 'bin/st2-run-pack-tests',
55 'bin/st2ctl',
56 'bin/st2-generate-symmetric-crypto-key',
57 'bin/st2-self-check',
58 'bin/st2-track-result',
59 'bin/st2-validate-pack-config',
60 'bin/st2-check-license',
61 'bin/st2-pack-install',
62 'bin/st2-pack-download',
63 'bin/st2-pack-setup-virtualenv'
64 ],
65 entry_points={
66 'st2common.metrics.driver': [
67 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',
68 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',
69 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'
70 ],
71 'st2common.rbac.backend': [
72 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'
73 ],
74 }
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/st2common/setup.py b/st2common/setup.py
--- a/st2common/setup.py
+++ b/st2common/setup.py
@@ -57,7 +57,6 @@
'bin/st2-self-check',
'bin/st2-track-result',
'bin/st2-validate-pack-config',
- 'bin/st2-check-license',
'bin/st2-pack-install',
'bin/st2-pack-download',
'bin/st2-pack-setup-virtualenv'
| {"golden_diff": "diff --git a/st2common/setup.py b/st2common/setup.py\n--- a/st2common/setup.py\n+++ b/st2common/setup.py\n@@ -57,7 +57,6 @@\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n- 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n", "issue": "Remove obsolete 'bin/st2-check-license'\nThe project ships https://github.com/StackStorm/st2/blob/master/st2common/bin/st2-check-license which is irrelevant now, considering ongoing EWC features integration in the st2 core.\r\n\r\nThe task is to find all the places: https://github.com/search?q=org%3AStackStorm+st2-check-license&type=code and remove the `st2-check-license` scripts.\r\n\r\nThis is an easy `good first issue` for someone willing to start contributing and exploring the st2 system.\r\nBonus points to find other obsolete, outdated, irrelevant scripts in st2 core.\r\nHelp wanted!\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-check-license',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport os.path\n\nfrom setuptools import setup, find_packages\n\nfrom dist_utils import fetch_requirements\nfrom dist_utils import apply_vagrant_workaround\nfrom dist_utils import get_version_string\n\nST2_COMPONENT = 'st2common'\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nREQUIREMENTS_FILE = os.path.join(BASE_DIR, 'requirements.txt')\nINIT_FILE = os.path.join(BASE_DIR, 'st2common/__init__.py')\n\ninstall_reqs, dep_links = fetch_requirements(REQUIREMENTS_FILE)\n\napply_vagrant_workaround()\nsetup(\n name=ST2_COMPONENT,\n version=get_version_string(INIT_FILE),\n description='{} StackStorm event-driven automation platform component'.format(ST2_COMPONENT),\n author='StackStorm',\n author_email='[email protected]',\n license='Apache License (2.0)',\n url='https://stackstorm.com/',\n install_requires=install_reqs,\n dependency_links=dep_links,\n test_suite=ST2_COMPONENT,\n zip_safe=False,\n include_package_data=True,\n packages=find_packages(exclude=['setuptools', 'tests']),\n scripts=[\n 'bin/st2-bootstrap-rmq',\n 'bin/st2-cleanup-db',\n 'bin/st2-register-content',\n 'bin/st2-purge-executions',\n 'bin/st2-purge-trigger-instances',\n 'bin/st2-run-pack-tests',\n 'bin/st2ctl',\n 'bin/st2-generate-symmetric-crypto-key',\n 'bin/st2-self-check',\n 'bin/st2-track-result',\n 'bin/st2-validate-pack-config',\n 'bin/st2-pack-install',\n 'bin/st2-pack-download',\n 'bin/st2-pack-setup-virtualenv'\n ],\n entry_points={\n 'st2common.metrics.driver': [\n 'statsd = st2common.metrics.drivers.statsd_driver:StatsdDriver',\n 'noop = st2common.metrics.drivers.noop_driver:NoopDriver',\n 'echo = st2common.metrics.drivers.echo_driver:EchoDriver'\n ],\n 'st2common.rbac.backend': [\n 'noop = st2common.rbac.backends.noop:NoOpRBACBackend'\n ],\n }\n)\n", "path": "st2common/setup.py"}]} | 1,168 | 105 |
gh_patches_debug_33210 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-1462 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] IKEA E1812 shortcut button does not use quirks
**Describe the bug**
I have two IKEA E1812 Tradfri shortcut buttons that started to behave differently. ZHA claims that they have the same firmware (I also bought them together), but they now have different clusters: The one that is not picked up correctly has an extra `in_cluster` with ID 0xfc7c in its device signature. This causes `zhaquirks.ikea.shortcutbtn.IkeaTradfriShortcutBtn` to not pick this button up, producing a *Fail because input cluster mismatch on at least one endpoint* message in the log.
The `zha_event` still fires as it should, so by using the raw event one can work around this, but maybe there's a way to get the quirks to work.
**To Reproduce**
- pair a shortcut button that has the 0xfc7c input cluster
- go to the device page and see that no quirks are applied
- try to add an automation, see that triggers for button press are missing
(Unfortunately I have no clue what caused the two buttons to present different signatures)
**Expected behavior**
The quirks should still apply
**Additional context**
signature of the button with the extra cluster (this is the only difference between my buttons)
**EDIT:** The WindowCovering output cluster is also missing on the weird button
```
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0820",
"in_clusters": [
"0x0000",
"0x0001",
"0x0003",
"0x0009",
"0x0020",
"0x1000",
"0xfc7c"
],
"out_clusters": [
"0x0003",
"0x0004",
"0x0006",
"0x0008",
"0x0019",
"0x1000"
]
}
},
"manufacturer": "IKEA of Sweden",
"model": "TRADFRI SHORTCUT Button",
"class": "zigpy.device.Device"
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/ikea/shortcutbtn.py`
Content:
```
1 """Device handler for IKEA of Sweden TRADFRI shortcut button."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomDevice
4 from zigpy.zcl.clusters.closures import WindowCovering
5 from zigpy.zcl.clusters.general import (
6 Alarms,
7 Basic,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 PollControl,
14 PowerConfiguration,
15 )
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from zhaquirks.const import (
19 ARGS,
20 CLUSTER_ID,
21 COMMAND,
22 COMMAND_MOVE_ON_OFF,
23 COMMAND_OFF,
24 COMMAND_ON,
25 COMMAND_STOP,
26 DEVICE_TYPE,
27 DIM_UP,
28 DOUBLE_PRESS,
29 ENDPOINT_ID,
30 ENDPOINTS,
31 INPUT_CLUSTERS,
32 LONG_PRESS,
33 LONG_RELEASE,
34 MODELS_INFO,
35 OUTPUT_CLUSTERS,
36 PROFILE_ID,
37 SHORT_PRESS,
38 TURN_ON,
39 )
40 from zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster
41
42
43 class IkeaTradfriShortcutBtn(CustomDevice):
44 """Custom device representing IKEA of Sweden TRADFRI shortcut button."""
45
46 signature = {
47 # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
48 # device_version=1
49 # input_clusters=[0, 1, 3, 9, 32, 4096]
50 # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>
51 MODELS_INFO: [(IKEA, "TRADFRI SHORTCUT Button")],
52 ENDPOINTS: {
53 1: {
54 PROFILE_ID: zha.PROFILE_ID,
55 DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
56 INPUT_CLUSTERS: [
57 Basic.cluster_id,
58 PowerConfiguration.cluster_id,
59 Identify.cluster_id,
60 Alarms.cluster_id,
61 PollControl.cluster_id,
62 LightLink.cluster_id,
63 ],
64 OUTPUT_CLUSTERS: [
65 Identify.cluster_id,
66 Groups.cluster_id,
67 OnOff.cluster_id,
68 LevelControl.cluster_id,
69 Ota.cluster_id,
70 WindowCovering.cluster_id,
71 LightLink.cluster_id,
72 ],
73 }
74 },
75 }
76
77 replacement = {
78 ENDPOINTS: {
79 1: {
80 PROFILE_ID: zha.PROFILE_ID,
81 DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
82 INPUT_CLUSTERS: [
83 Basic.cluster_id,
84 PowerConfiguration1CRCluster,
85 Identify.cluster_id,
86 Alarms.cluster_id,
87 PollControl.cluster_id,
88 LightLinkCluster,
89 ],
90 OUTPUT_CLUSTERS: [
91 Identify.cluster_id,
92 Groups.cluster_id,
93 OnOff.cluster_id,
94 LevelControl.cluster_id,
95 Ota.cluster_id,
96 WindowCovering.cluster_id,
97 LightLink.cluster_id,
98 ],
99 }
100 }
101 }
102
103 device_automation_triggers = {
104 (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},
105 (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},
106 (LONG_PRESS, DIM_UP): {
107 COMMAND: COMMAND_MOVE_ON_OFF,
108 CLUSTER_ID: 8,
109 ENDPOINT_ID: 1,
110 ARGS: [0, 83],
111 },
112 (LONG_RELEASE, DIM_UP): {
113 COMMAND: COMMAND_STOP,
114 CLUSTER_ID: 8,
115 ENDPOINT_ID: 1,
116 },
117 }
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/ikea/shortcutbtn.py b/zhaquirks/ikea/shortcutbtn.py
--- a/zhaquirks/ikea/shortcutbtn.py
+++ b/zhaquirks/ikea/shortcutbtn.py
@@ -115,3 +115,79 @@
ENDPOINT_ID: 1,
},
}
+
+
+class IkeaTradfriShortcutBtn2(CustomDevice):
+ """Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster."""
+
+ signature = {
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
+ # device_version=1
+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]
+ # output_clusters=[3, 4, 6, 8, 25, 4096]>
+ MODELS_INFO: [(IKEA, "TRADFRI SHORTCUT Button")],
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ PowerConfiguration.cluster_id,
+ Identify.cluster_id,
+ Alarms.cluster_id,
+ PollControl.cluster_id,
+ LightLink.cluster_id,
+ 0xFC7C, # IKEA Cluster
+ ],
+ OUTPUT_CLUSTERS: [
+ Identify.cluster_id,
+ Groups.cluster_id,
+ OnOff.cluster_id,
+ LevelControl.cluster_id,
+ Ota.cluster_id,
+ LightLink.cluster_id,
+ ],
+ }
+ },
+ }
+
+ replacement = {
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ PowerConfiguration1CRCluster,
+ Identify.cluster_id,
+ Alarms.cluster_id,
+ PollControl.cluster_id,
+ LightLinkCluster,
+ ],
+ OUTPUT_CLUSTERS: [
+ Identify.cluster_id,
+ Groups.cluster_id,
+ OnOff.cluster_id,
+ LevelControl.cluster_id,
+ Ota.cluster_id,
+ LightLink.cluster_id,
+ ],
+ }
+ }
+ }
+
+ device_automation_triggers = {
+ (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},
+ (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},
+ (LONG_PRESS, DIM_UP): {
+ COMMAND: COMMAND_MOVE_ON_OFF,
+ CLUSTER_ID: 8,
+ ENDPOINT_ID: 1,
+ ARGS: [0, 83],
+ },
+ (LONG_RELEASE, DIM_UP): {
+ COMMAND: COMMAND_STOP,
+ CLUSTER_ID: 8,
+ ENDPOINT_ID: 1,
+ },
+ }
| {"golden_diff": "diff --git a/zhaquirks/ikea/shortcutbtn.py b/zhaquirks/ikea/shortcutbtn.py\n--- a/zhaquirks/ikea/shortcutbtn.py\n+++ b/zhaquirks/ikea/shortcutbtn.py\n@@ -115,3 +115,79 @@\n ENDPOINT_ID: 1,\n },\n }\n+\n+\n+class IkeaTradfriShortcutBtn2(CustomDevice):\n+ \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster.\"\"\"\n+\n+ signature = {\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n+ # device_version=1\n+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n+ # output_clusters=[3, 4, 6, 8, 25, 4096]>\n+ MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ PowerConfiguration.cluster_id,\n+ Identify.cluster_id,\n+ Alarms.cluster_id,\n+ PollControl.cluster_id,\n+ LightLink.cluster_id,\n+ 0xFC7C, # IKEA Cluster\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Ota.cluster_id,\n+ LightLink.cluster_id,\n+ ],\n+ }\n+ },\n+ }\n+\n+ replacement = {\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ PowerConfiguration1CRCluster,\n+ Identify.cluster_id,\n+ Alarms.cluster_id,\n+ PollControl.cluster_id,\n+ LightLinkCluster,\n+ ],\n+ OUTPUT_CLUSTERS: [\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ OnOff.cluster_id,\n+ LevelControl.cluster_id,\n+ Ota.cluster_id,\n+ LightLink.cluster_id,\n+ ],\n+ }\n+ }\n+ }\n+\n+ device_automation_triggers = {\n+ (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n+ (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n+ (LONG_PRESS, DIM_UP): {\n+ COMMAND: COMMAND_MOVE_ON_OFF,\n+ CLUSTER_ID: 8,\n+ ENDPOINT_ID: 1,\n+ ARGS: [0, 83],\n+ },\n+ (LONG_RELEASE, DIM_UP): {\n+ COMMAND: COMMAND_STOP,\n+ CLUSTER_ID: 8,\n+ ENDPOINT_ID: 1,\n+ },\n+ }\n", "issue": "[BUG] IKEA E1812 shortcut button does not use quirks\n**Describe the bug**\r\nI have two IKEA E1812 Tradfri shortcut buttons that started to behave differently. ZHA claims that they have the same firmware (I also bought them together), but they now have different clusters: The one that is not picked up correctly has an extra `in_cluster` with ID 0xfc7c in its device signature. This causes `zhaquirks.ikea.shortcutbtn.IkeaTradfriShortcutBtn` to not pick this button up, producing a *Fail because input cluster mismatch on at least one endpoint* message in the log.\r\n\r\nThe `zha_event` still fires as it should, so by using the raw event one can work around this, but maybe there's a way to get the quirks to work.\r\n\r\n**To Reproduce**\r\n- pair a shortcut button that has the 0xfc7c input cluster\r\n- go to the device page and see that no quirks are applied\r\n- try to add an automation, see that triggers for button press are missing\r\n\r\n(Unfortunately I have no clue what caused the two buttons to present different signatures)\r\n\r\n**Expected behavior**\r\nThe quirks should still apply\r\n\r\n**Additional context**\r\nsignature of the button with the extra cluster (this is the only difference between my buttons)\r\n**EDIT:** The WindowCovering output cluster is also missing on the weird button\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.EndDevice: 2>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress: 128>, manufacturer_code=4476, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=11264, maximum_outgoing_transfer_size=82, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=True, *is_full_function_device=False, *is_mains_powered=False, *is_receiver_on_when_idle=False, *is_router=False, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0820\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0001\",\r\n \"0x0003\",\r\n \"0x0009\",\r\n \"0x0020\",\r\n \"0x1000\",\r\n \"0xfc7c\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0006\",\r\n \"0x0008\",\r\n \"0x0019\",\r\n \"0x1000\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"IKEA of Sweden\",\r\n \"model\": \"TRADFRI SHORTCUT Button\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI shortcut button.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_MOVE_ON_OFF,\n COMMAND_OFF,\n COMMAND_ON,\n COMMAND_STOP,\n DEVICE_TYPE,\n DIM_UP,\n DOUBLE_PRESS,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n LONG_RELEASE,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_ON,\n)\nfrom zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster\n\n\nclass IkeaTradfriShortcutBtn(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n", "path": "zhaquirks/ikea/shortcutbtn.py"}], "after_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI shortcut button.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom zhaquirks.const import (\n ARGS,\n CLUSTER_ID,\n COMMAND,\n COMMAND_MOVE_ON_OFF,\n COMMAND_OFF,\n COMMAND_ON,\n COMMAND_STOP,\n DEVICE_TYPE,\n DIM_UP,\n DOUBLE_PRESS,\n ENDPOINT_ID,\n ENDPOINTS,\n INPUT_CLUSTERS,\n LONG_PRESS,\n LONG_RELEASE,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SHORT_PRESS,\n TURN_ON,\n)\nfrom zhaquirks.ikea import IKEA, LightLinkCluster, PowerConfiguration1CRCluster\n\n\nclass IkeaTradfriShortcutBtn(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n\n\nclass IkeaTradfriShortcutBtn2(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI shortcut button with IKEA cluster.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n # output_clusters=[3, 4, 6, 8, 25, 4096]>\n MODELS_INFO: [(IKEA, \"TRADFRI SHORTCUT Button\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n 0xFC7C, # IKEA Cluster\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.NON_COLOR_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration1CRCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLinkCluster,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n LightLink.cluster_id,\n ],\n }\n }\n }\n\n device_automation_triggers = {\n (SHORT_PRESS, TURN_ON): {COMMAND: COMMAND_ON, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (DOUBLE_PRESS, TURN_ON): {COMMAND: COMMAND_OFF, CLUSTER_ID: 6, ENDPOINT_ID: 1},\n (LONG_PRESS, DIM_UP): {\n COMMAND: COMMAND_MOVE_ON_OFF,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n ARGS: [0, 83],\n },\n (LONG_RELEASE, DIM_UP): {\n COMMAND: COMMAND_STOP,\n CLUSTER_ID: 8,\n ENDPOINT_ID: 1,\n },\n }\n", "path": "zhaquirks/ikea/shortcutbtn.py"}]} | 1,979 | 711 |
gh_patches_debug_16823 | rasdani/github-patches | git_diff | bids-standard__pybids-505 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
0.9.4 Release
@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/prep_zenodo.py`
Content:
```
1 #!/usr/bin/env python3
2 import git
3 import json
4 from subprocess import run, PIPE, CalledProcessError
5 from pathlib import Path
6 from tempfile import TemporaryDirectory
7
8
9 def decommify(name):
10 return ' '.join(name.split(', ')[::-1])
11
12
13 # List of repositories whose commits should be counted as contributions
14 codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
15
16 # Last shablona commit
17 origin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'
18
19 git_root = Path(git.Repo('.', search_parent_directories=True).working_dir)
20 zenodo_file = git_root / '.zenodo.json'
21
22 zenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}
23
24 orig_creators = zenodo.get('creators', [])
25 creator_map = {decommify(creator['name']): creator
26 for creator in orig_creators}
27
28 shortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)
29 counts = [line.split('\t', 1)[::-1]
30 for line in shortlog.stdout.decode().split('\n') if line]
31
32 # Get additional commit counts from dependencies
33 with TemporaryDirectory() as tmpdir:
34 tmppath = Path(tmpdir)
35 for repo in codependents:
36 try:
37 repo, ref = repo
38 except (TypeError, ValueError):
39 ref = None
40 repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])
41 try:
42 clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)
43 except CalledProcessError as err:
44 raise RuntimeError("Could not clone {}".format(repo)) from err
45
46 if ref is None:
47 tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)
48 # latest tag
49 ref = tag.stdout.decode().strip().rsplit('\n', 1)[1]
50
51 dep_shortlog = run(
52 ['git', '-C', repo_dir, 'shortlog', '-ns', ref],
53 stdout=PIPE)
54 counts.extend(line.split('\t', 1)[::-1]
55 for line in dep_shortlog.stdout.decode().split('\n')
56 if line)
57
58 commit_counts = {}
59 for committer, commits in counts:
60 commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)
61
62 # Stable sort:
63 # Number of commits in reverse order
64 # Ties broken by alphabetical order of first name
65 committers = [committer
66 for committer, _ in sorted(commit_counts.items(),
67 key=lambda x: (-x[1], x[0]))]
68
69 # Tal to the top
70 first_author = 'Tal Yarkoni'
71 if committers[0] != first_author:
72 committers.remove(first_author)
73 committers.insert(0, first_author)
74
75 creators = [
76 creator_map.get(committer, {'name': committer})
77 for committer in committers
78 ]
79
80 zenodo['creators'] = creators
81 zenodo_file.write_text(json.dumps(zenodo, indent=2) + '\n')
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py
--- a/tools/prep_zenodo.py
+++ b/tools/prep_zenodo.py
@@ -9,6 +9,11 @@
def decommify(name):
return ' '.join(name.split(', ')[::-1])
+# Users who have asked not to be cited at this time
+# XXX We should add a shortlog since the most recent tag and explicitly note
+# that a blacklisted user has contributed again recently, and verify they still
+# do not want to be cited.
+blacklist = {'Cecile Madjar'}
# List of repositories whose commits should be counted as contributions
codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]
@@ -75,6 +80,7 @@
creators = [
creator_map.get(committer, {'name': committer})
for committer in committers
+ if committer not in blacklist
]
zenodo['creators'] = creators
| {"golden_diff": "diff --git a/tools/prep_zenodo.py b/tools/prep_zenodo.py\n--- a/tools/prep_zenodo.py\n+++ b/tools/prep_zenodo.py\n@@ -9,6 +9,11 @@\n def decommify(name):\n return ' '.join(name.split(', ')[::-1])\n \n+# Users who have asked not to be cited at this time\n+# XXX We should add a shortlog since the most recent tag and explicitly note\n+# that a blacklisted user has contributed again recently, and verify they still\n+# do not want to be cited.\n+blacklist = {'Cecile Madjar'}\n \n # List of repositories whose commits should be counted as contributions\n codependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n@@ -75,6 +80,7 @@\n creators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n+ if committer not in blacklist\n ]\n \n zenodo['creators'] = creators\n", "issue": "0.9.4 Release\n@tyarkoni We've built up a [changelog](https://github.com/bids-standard/pybids/releases/tag/untagged-aa777a4ea34ce82990cd), so it'd be good to release soon. Any blocking issues?\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2) + '\\n')\n", "path": "tools/prep_zenodo.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE, CalledProcessError\nfrom pathlib import Path\nfrom tempfile import TemporaryDirectory\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\n# Users who have asked not to be cited at this time\n# XXX We should add a shortlog since the most recent tag and explicitly note\n# that a blacklisted user has contributed again recently, and verify they still\n# do not want to be cited.\nblacklist = {'Cecile Madjar'}\n\n# List of repositories whose commits should be counted as contributions\ncodependents = [('https://github.com/grabbles/grabbit.git', '0.2.6')]\n\n# Last shablona commit\norigin_commit = 'd72caaf5933907ed699d57faddaec7bfc836ce6f'\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns', f'{origin_commit}..'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\n# Get additional commit counts from dependencies\nwith TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n for repo in codependents:\n try:\n repo, ref = repo\n except (TypeError, ValueError):\n ref = None\n repo_dir = str(tmppath / repo.rsplit('/', 1)[1].split('.', 1)[0])\n try:\n clone = run(['git', 'clone', '-q', repo, repo_dir], check=True)\n except CalledProcessError as err:\n raise RuntimeError(\"Could not clone {}\".format(repo)) from err\n\n if ref is None:\n tag = run(['git', '-C', repo_dir, 'tag'], stdout=PIPE)\n # latest tag\n ref = tag.stdout.decode().strip().rsplit('\\n', 1)[1]\n\n dep_shortlog = run(\n ['git', '-C', repo_dir, 'shortlog', '-ns', ref],\n stdout=PIPE)\n counts.extend(line.split('\\t', 1)[::-1]\n for line in dep_shortlog.stdout.decode().split('\\n')\n if line)\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))]\n\n# Tal to the top\nfirst_author = 'Tal Yarkoni'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n if committer not in blacklist\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, indent=2) + '\\n')\n", "path": "tools/prep_zenodo.py"}]} | 1,197 | 228 |
gh_patches_debug_26541 | rasdani/github-patches | git_diff | OpenMined__PySyft-5732 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add tests for domain's RoleManager
## Description
Add unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`
## Type of Test
- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)
- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)
- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)
- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)
- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)
- [ ] Other...
## Expected Behavior
Ideally, the tests should cover as many methods as possible and within reason.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/grid/apps/domain/src/main/core/manager/role_manager.py`
Content:
```
1 # stdlib
2 from typing import List
3 from typing import Union
4
5 # grid relative
6 from ..database.tables.roles import Role
7 from ..exceptions import RoleNotFoundError
8 from .database_manager import DatabaseManager
9
10
11 class RoleManager(DatabaseManager):
12
13 schema = Role
14
15 def __init__(self, database):
16 self._schema = RoleManager.schema
17 self.db = database
18
19 @property
20 def user_role(self):
21 return self.first(name="User")
22
23 @property
24 def owner_role(self):
25 return self.first(name="Owner")
26
27 @property
28 def compliance_officer_role(self):
29 return self.first(name="Compliance Officer")
30
31 @property
32 def admin_role(self):
33 return self.first(name="Administrator")
34
35 @property
36 def common_roles(self):
37 return self.db.session.query(self._schema).filter_by(
38 can_triage_requests=False,
39 can_edit_settings=False,
40 can_create_users=False,
41 can_create_groups=False,
42 can_upload_data=False,
43 can_edit_roles=False,
44 can_manage_infrastructure=False,
45 )
46
47 @property
48 def org_roles(self):
49 return self.db.session.query(self._schema).except_(self.common_roles)
50
51 def first(self, **kwargs) -> Union[None, List]:
52 result = super().first(**kwargs)
53 if not result:
54 raise RoleNotFoundError
55 return result
56
57 def query(self, **kwargs) -> Union[None, List]:
58 results = super().query(**kwargs)
59 if len(results) == 0:
60 raise RoleNotFoundError
61 return results
62
63 def set(self, role_id, params):
64 if self.contain(id=role_id):
65 self.modify({"id": role_id}, params)
66 else:
67 raise RoleNotFoundError
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py
--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py
+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py
@@ -2,6 +2,8 @@
from typing import List
from typing import Union
+from flask_sqlalchemy import BaseQuery
+
# grid relative
from ..database.tables.roles import Role
from ..exceptions import RoleNotFoundError
@@ -32,8 +34,7 @@
def admin_role(self):
return self.first(name="Administrator")
- @property
- def common_roles(self):
+ def _common_roles(self) -> BaseQuery:
return self.db.session.query(self._schema).filter_by(
can_triage_requests=False,
can_edit_settings=False,
@@ -44,9 +45,13 @@
can_manage_infrastructure=False,
)
+ @property
+ def common_roles(self):
+ return self._common_roles().all()
+
@property
def org_roles(self):
- return self.db.session.query(self._schema).except_(self.common_roles)
+ return self.db.session.query(self._schema).except_(self._common_roles).all()
def first(self, **kwargs) -> Union[None, List]:
result = super().first(**kwargs)
| {"golden_diff": "diff --git a/packages/grid/apps/domain/src/main/core/manager/role_manager.py b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n--- a/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n+++ b/packages/grid/apps/domain/src/main/core/manager/role_manager.py\n@@ -2,6 +2,8 @@\n from typing import List\n from typing import Union\n \n+from flask_sqlalchemy import BaseQuery\n+\n # grid relative\n from ..database.tables.roles import Role\n from ..exceptions import RoleNotFoundError\n@@ -32,8 +34,7 @@\n def admin_role(self):\n return self.first(name=\"Administrator\")\n \n- @property\n- def common_roles(self):\n+ def _common_roles(self) -> BaseQuery:\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n@@ -44,9 +45,13 @@\n can_manage_infrastructure=False,\n )\n \n+ @property\n+ def common_roles(self):\n+ return self._common_roles().all()\n+\n @property\n def org_roles(self):\n- return self.db.session.query(self._schema).except_(self.common_roles)\n+ return self.db.session.query(self._schema).except_(self._common_roles).all()\n \n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n", "issue": "Add tests for domain's RoleManager\n## Description\r\nAdd unit tests for grid domain's RoleManager at the module `grid/apps/domain/src/main/core/manager`\r\n\r\n## Type of Test\r\n- [x] **Unit test** (e.g. checking a loop, method, or function is working as intended)\r\n- [ ] **Integration test** (e.g. checking if a certain group or set of functionality is working as intended)\r\n- [ ] **Regression test** (e.g. checking if by adding or removing a module of code allows other systems to continue to function as intended)\r\n- [ ] **Stress test** (e.g. checking to see how well a system performs under various situations, including heavy usage)\r\n- [ ] **Performance test** (e.g. checking to see how efficient a system is as performing the intended task)\r\n- [ ] Other...\r\n\r\n## Expected Behavior\r\nIdeally, the tests should cover as many methods as possible and within reason.\r\n\n", "before_files": [{"content": "# stdlib\nfrom typing import List\nfrom typing import Union\n\n# grid relative\nfrom ..database.tables.roles import Role\nfrom ..exceptions import RoleNotFoundError\nfrom .database_manager import DatabaseManager\n\n\nclass RoleManager(DatabaseManager):\n\n schema = Role\n\n def __init__(self, database):\n self._schema = RoleManager.schema\n self.db = database\n\n @property\n def user_role(self):\n return self.first(name=\"User\")\n\n @property\n def owner_role(self):\n return self.first(name=\"Owner\")\n\n @property\n def compliance_officer_role(self):\n return self.first(name=\"Compliance Officer\")\n\n @property\n def admin_role(self):\n return self.first(name=\"Administrator\")\n\n @property\n def common_roles(self):\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n can_create_users=False,\n can_create_groups=False,\n can_upload_data=False,\n can_edit_roles=False,\n can_manage_infrastructure=False,\n )\n\n @property\n def org_roles(self):\n return self.db.session.query(self._schema).except_(self.common_roles)\n\n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n if not result:\n raise RoleNotFoundError\n return result\n\n def query(self, **kwargs) -> Union[None, List]:\n results = super().query(**kwargs)\n if len(results) == 0:\n raise RoleNotFoundError\n return results\n\n def set(self, role_id, params):\n if self.contain(id=role_id):\n self.modify({\"id\": role_id}, params)\n else:\n raise RoleNotFoundError\n", "path": "packages/grid/apps/domain/src/main/core/manager/role_manager.py"}], "after_files": [{"content": "# stdlib\nfrom typing import List\nfrom typing import Union\n\nfrom flask_sqlalchemy import BaseQuery\n\n# grid relative\nfrom ..database.tables.roles import Role\nfrom ..exceptions import RoleNotFoundError\nfrom .database_manager import DatabaseManager\n\n\nclass RoleManager(DatabaseManager):\n\n schema = Role\n\n def __init__(self, database):\n self._schema = RoleManager.schema\n self.db = database\n\n @property\n def user_role(self):\n return self.first(name=\"User\")\n\n @property\n def owner_role(self):\n return self.first(name=\"Owner\")\n\n @property\n def compliance_officer_role(self):\n return self.first(name=\"Compliance Officer\")\n\n @property\n def admin_role(self):\n return self.first(name=\"Administrator\")\n\n def _common_roles(self) -> BaseQuery:\n return self.db.session.query(self._schema).filter_by(\n can_triage_requests=False,\n can_edit_settings=False,\n can_create_users=False,\n can_create_groups=False,\n can_upload_data=False,\n can_edit_roles=False,\n can_manage_infrastructure=False,\n )\n\n @property\n def common_roles(self):\n return self._common_roles().all()\n\n @property\n def org_roles(self):\n return self.db.session.query(self._schema).except_(self._common_roles).all()\n\n def first(self, **kwargs) -> Union[None, List]:\n result = super().first(**kwargs)\n if not result:\n raise RoleNotFoundError\n return result\n\n def query(self, **kwargs) -> Union[None, List]:\n results = super().query(**kwargs)\n if len(results) == 0:\n raise RoleNotFoundError\n return results\n\n def set(self, role_id, params):\n if self.contain(id=role_id):\n self.modify({\"id\": role_id}, params)\n else:\n raise RoleNotFoundError\n", "path": "packages/grid/apps/domain/src/main/core/manager/role_manager.py"}]} | 975 | 308 |
gh_patches_debug_59199 | rasdani/github-patches | git_diff | Nitrate__Nitrate-380 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more information links to package
Add these links to `project_urls` argument of `setup.py`.
- Source Code: https://github.com/Nitrate/Nitrate
- Issue Tracker: https://github.com/Nitrate/Nitrate/issues
- Documentation: https://nitrate.readthedocs.io/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import sys
4
5 from setuptools import setup, find_packages
6
7
8 with open('VERSION.txt', 'r') as f:
9 pkg_version = f.read().strip()
10
11
12 def get_long_description():
13 with open('README.rst', 'r') as f:
14 return f.read()
15
16
17 install_requires = [
18 'beautifulsoup4 >= 4.1.1',
19 'django >= 1.11,<3.0',
20 'django-contrib-comments == 1.8.0',
21 'django-tinymce == 2.7.0',
22 'django-uuslug == 1.1.8',
23 'html2text',
24 'odfpy >= 0.9.6',
25 'python-bugzilla',
26 'six',
27 'xmltodict',
28 'kobo == 0.9.0'
29 ]
30
31 if sys.version_info.major < 3:
32 install_requires += [
33 'enum34',
34 ]
35
36 extras_require = {
37 'mysql': ['PyMySQL == 0.9.2'],
38 'pgsql': ['psycopg2 == 2.7.5'],
39
40 # Required for tcms.core.contrib.auth.backends.KerberosBackend
41 'krbauth': [
42 'kerberos == 1.2.5'
43 ],
44
45 # Packages for building documentation
46 'docs': [
47 'Sphinx >= 1.1.2',
48 'sphinx_rtd_theme',
49 ],
50
51 # Necessary packages for running tests
52 'tests': [
53 'beautifulsoup4',
54 'coverage',
55 'factory_boy',
56 'flake8',
57 'mock',
58 'pytest',
59 'pytest-cov',
60 'pytest-django',
61 ],
62
63 # Contain tools that assists the development
64 'devtools': [
65 'django-debug-toolbar == 1.7',
66 'tox',
67 'django-extensions',
68 'pygraphviz',
69 'future-breakpoint',
70 ],
71
72 # Required packages required to run async tasks
73 'async': [
74 'celery == 4.2.0',
75 ]
76 }
77
78 setup(
79 name='Nitrate',
80 version=pkg_version,
81 description='Test Case Management System',
82 long_description=get_long_description(),
83 author='Nitrate Team',
84 maintainer='Chenxiong Qi',
85 maintainer_email='[email protected]',
86 url='https://github.com/Nitrate/Nitrate/',
87 license='GPLv2+',
88 keywords='test case',
89 install_requires=install_requires,
90 extras_require=extras_require,
91 packages=find_packages(),
92 include_package_data=True,
93 classifiers=[
94 'Framework :: Django',
95 'Framework :: Django :: 1.11',
96 'Framework :: Django :: 2.0',
97 'Framework :: Django :: 2.1',
98 'Intended Audience :: Developers',
99 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
100 'Programming Language :: Python :: 2',
101 'Programming Language :: Python :: 2.7',
102 'Programming Language :: Python :: 3',
103 'Programming Language :: Python :: 3.6',
104 'Programming Language :: Python :: 3.7',
105 'Topic :: Software Development :: Quality Assurance',
106 'Topic :: Software Development :: Testing',
107 ],
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -105,4 +105,9 @@
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
+ project_urls={
+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',
+ 'Source Code': 'https://github.com/Nitrate/Nitrate',
+ 'Documentation': 'https://nitrate.readthedocs.io/',
+ },
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -105,4 +105,9 @@\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n+ project_urls={\n+ 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n+ 'Source Code': 'https://github.com/Nitrate/Nitrate',\n+ 'Documentation': 'https://nitrate.readthedocs.io/',\n+ },\n )\n", "issue": "Add more information links to package\nAdd these links to `project_urls` argument of `setup.py`.\r\n\r\n- Source Code: https://github.com/Nitrate/Nitrate\r\n- Issue Tracker: https://github.com/Nitrate/Nitrate/issues\r\n- Documentation: https://nitrate.readthedocs.io/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'six',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nif sys.version_info.major < 3:\n install_requires += [\n 'enum34',\n ]\n\nextras_require = {\n 'mysql': ['PyMySQL == 0.9.2'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.core.contrib.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n packages=find_packages(),\n include_package_data=True,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]} | 1,267 | 116 |
gh_patches_debug_21139 | rasdani/github-patches | git_diff | conan-io__conan-3600 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Listing package content
Maybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?
TIA
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/local_file_getter.py`
Content:
```
1 import os
2
3 from conans.errors import NotFoundException
4 from conans.model.manifest import discarded_file
5 from conans.model.ref import PackageReference
6 from conans.util.files import load
7
8
9 def get_path(client_cache, conan_ref, package_id, path):
10 """
11 :param client_cache: Conan's client cache
12 :param conan_ref: Specified reference in the conan get command
13 :param package_id: Specified package id (can be None)
14 :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
15 :return: The real path in the local cache for the specified parameters
16 """
17 if package_id is None: # Get the file in the exported files
18 folder = client_cache.export(conan_ref)
19 else:
20 folder = client_cache.package(PackageReference(conan_ref, package_id))
21
22 abs_path = os.path.join(folder, path)
23 if not os.path.exists(abs_path):
24 raise NotFoundException("The specified path doesn't exist")
25 if os.path.isdir(abs_path):
26 return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])
27 else:
28 return load(abs_path)
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py
--- a/conans/client/local_file_getter.py
+++ b/conans/client/local_file_getter.py
@@ -11,13 +11,15 @@
:param client_cache: Conan's client cache
:param conan_ref: Specified reference in the conan get command
:param package_id: Specified package id (can be None)
- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)
+ :param path: Path to a file, subfolder of exports (if only ref)
+ or package (if package_id defined)
:return: The real path in the local cache for the specified parameters
"""
if package_id is None: # Get the file in the exported files
folder = client_cache.export(conan_ref)
else:
- folder = client_cache.package(PackageReference(conan_ref, package_id))
+ folder = client_cache.package(PackageReference(conan_ref, package_id),
+ short_paths=None)
abs_path = os.path.join(folder, path)
if not os.path.exists(abs_path):
| {"golden_diff": "diff --git a/conans/client/local_file_getter.py b/conans/client/local_file_getter.py\n--- a/conans/client/local_file_getter.py\n+++ b/conans/client/local_file_getter.py\n@@ -11,13 +11,15 @@\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n- :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n+ :param path: Path to a file, subfolder of exports (if only ref)\n+ or package (if package_id defined)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n- folder = client_cache.package(PackageReference(conan_ref, package_id))\n+ folder = client_cache.package(PackageReference(conan_ref, package_id),\n+ short_paths=None)\n \n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n", "issue": "Listing package content\nMaybe a stupid question, but is there a quick way to list all the files (artifacts: binaries and header files) contained in a package (i.e. files added through the package method)?\r\nTIA\r\n\n", "before_files": [{"content": "import os\n\nfrom conans.errors import NotFoundException\nfrom conans.model.manifest import discarded_file\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import load\n\n\ndef get_path(client_cache, conan_ref, package_id, path):\n \"\"\"\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n :param path: Path to a file, subfolder of exports (if only ref) or package (if package_id declared as well)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n folder = client_cache.package(PackageReference(conan_ref, package_id))\n\n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n raise NotFoundException(\"The specified path doesn't exist\")\n if os.path.isdir(abs_path):\n return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])\n else:\n return load(abs_path)\n", "path": "conans/client/local_file_getter.py"}], "after_files": [{"content": "import os\n\nfrom conans.errors import NotFoundException\nfrom conans.model.manifest import discarded_file\nfrom conans.model.ref import PackageReference\nfrom conans.util.files import load\n\n\ndef get_path(client_cache, conan_ref, package_id, path):\n \"\"\"\n :param client_cache: Conan's client cache\n :param conan_ref: Specified reference in the conan get command\n :param package_id: Specified package id (can be None)\n :param path: Path to a file, subfolder of exports (if only ref)\n or package (if package_id defined)\n :return: The real path in the local cache for the specified parameters\n \"\"\"\n if package_id is None: # Get the file in the exported files\n folder = client_cache.export(conan_ref)\n else:\n folder = client_cache.package(PackageReference(conan_ref, package_id),\n short_paths=None)\n\n abs_path = os.path.join(folder, path)\n if not os.path.exists(abs_path):\n raise NotFoundException(\"The specified path doesn't exist\")\n if os.path.isdir(abs_path):\n return sorted([path for path in os.listdir(abs_path) if not discarded_file(path)])\n else:\n return load(abs_path)\n", "path": "conans/client/local_file_getter.py"}]} | 621 | 270 |
gh_patches_debug_16510 | rasdani/github-patches | git_diff | cupy__cupy-5771 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[RFC] Drop Python 3.6 support in v10
We are now discussing to drop support for Python 3.6 in CuPy v10. Feel free to leave a comment here if you have any concerns.
Background:
* CUDA Python is unlikely to provide a wheel for Python 3.6, although it can be built from the source without any issue. CUDA Python currently requires [`-std=c++14`](https://github.com/NVIDIA/cuda-python/blob/427c597959e6fe1409195a30d42fc4a1886bc89a/setup.py#L38) so recent versions of gcc, which is not in RHEL/CentOS 7 by default, is needed. We want to avoid requiring CuPy wheel users to manually install non-default GCC.
* NumPy dropped Python 3.6 support in June 2020: https://numpy.org/neps/nep-0029-deprecation_policy.html
* Python 3.6 support become EOL in December 2021.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import glob
4 import os
5 from setuptools import setup, find_packages
6 import sys
7
8 source_root = os.path.abspath(os.path.dirname(__file__))
9 sys.path.append(os.path.join(source_root, 'install'))
10
11 import cupy_builder # NOQA
12 from cupy_builder import cupy_setup_build # NOQA
13
14 ctx = cupy_builder.Context(source_root)
15 cupy_builder.initialize(ctx)
16 if not cupy_builder.preflight_check(ctx):
17 sys.exit(1)
18
19
20 # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)
21 setup_requires = [
22 'Cython>=0.29.22,<3',
23 'fastrlock>=0.5',
24 ]
25 install_requires = [
26 'numpy>=1.17,<1.24', # see #4773
27 'fastrlock>=0.5',
28 ]
29 extras_require = {
30 'all': [
31 'scipy>=1.4,<1.10', # see #4773
32 'Cython>=0.29.22,<3',
33 'optuna>=2.0',
34 ],
35 'stylecheck': [
36 'autopep8==1.5.5',
37 'flake8==3.8.4',
38 'pbr==5.5.1',
39 'pycodestyle==2.6.0',
40 ],
41 'test': [
42 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.
43 'pytest>=6.2',
44 ],
45 # TODO(kmaehashi): Remove 'jenkins' requirements.
46 'jenkins': [
47 'pytest>=6.2',
48 'pytest-timeout',
49 'pytest-cov',
50 'coveralls',
51 'codecov',
52 'coverage<5', # Otherwise, Python must be built with sqlite
53 ],
54 }
55 tests_require = extras_require['test']
56
57
58 # List of files that needs to be in the distribution (sdist/wheel).
59 # Notes:
60 # - Files only needed in sdist should be added to `MANIFEST.in`.
61 # - The following glob (`**`) ignores items starting with `.`.
62 cupy_package_data = [
63 'cupy/cuda/cupy_thrust.cu',
64 'cupy/cuda/cupy_cub.cu',
65 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback
66 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback
67 'cupy/cuda/cupy_cufft.h', # for cuFFT callback
68 'cupy/cuda/cufft.pxd', # for cuFFT callback
69 'cupy/cuda/cufft.pyx', # for cuFFT callback
70 'cupy/random/cupy_distributions.cu',
71 'cupy/random/cupy_distributions.cuh',
72 ] + [
73 x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)
74 if os.path.isfile(x)
75 ]
76
77 package_data = {
78 'cupy': [
79 os.path.relpath(x, 'cupy') for x in cupy_package_data
80 ],
81 }
82
83 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)
84
85 ext_modules = cupy_setup_build.get_ext_modules(False, ctx)
86 build_ext = cupy_setup_build.custom_build_ext
87
88 # Get __version__ variable
89 with open(os.path.join(source_root, 'cupy', '_version.py')) as f:
90 exec(f.read())
91
92 long_description = None
93 if ctx.long_description_path is not None:
94 with open(ctx.long_description_path) as f:
95 long_description = f.read()
96
97
98 CLASSIFIERS = """\
99 Development Status :: 5 - Production/Stable
100 Intended Audience :: Science/Research
101 Intended Audience :: Developers
102 License :: OSI Approved :: MIT License
103 Programming Language :: Python
104 Programming Language :: Python :: 3
105 Programming Language :: Python :: 3.6
106 Programming Language :: Python :: 3.7
107 Programming Language :: Python :: 3.8
108 Programming Language :: Python :: 3.9
109 Programming Language :: Python :: 3 :: Only
110 Programming Language :: Cython
111 Topic :: Software Development
112 Topic :: Scientific/Engineering
113 Operating System :: POSIX
114 Operating System :: Microsoft :: Windows
115 """
116
117
118 setup(
119 name=ctx.package_name,
120 version=__version__, # NOQA
121 description='CuPy: NumPy & SciPy for GPU',
122 long_description=long_description,
123 author='Seiya Tokui',
124 author_email='[email protected]',
125 maintainer='CuPy Developers',
126 url='https://cupy.dev/',
127 license='MIT License',
128 project_urls={
129 "Bug Tracker": "https://github.com/cupy/cupy/issues",
130 "Documentation": "https://docs.cupy.dev/",
131 "Source Code": "https://github.com/cupy/cupy",
132 },
133 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
134 packages=find_packages(exclude=['install', 'tests']),
135 package_data=package_data,
136 zip_safe=False,
137 python_requires='>=3.6.0',
138 setup_requires=setup_requires,
139 install_requires=install_requires,
140 tests_require=tests_require,
141 extras_require=extras_require,
142 ext_modules=ext_modules,
143 cmdclass={'build_ext': build_ext},
144 )
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -102,7 +102,6 @@
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
@@ -134,7 +133,7 @@
packages=find_packages(exclude=['install', 'tests']),
package_data=package_data,
zip_safe=False,
- python_requires='>=3.6.0',
+ python_requires='>=3.7',
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -102,7 +102,6 @@\n License :: OSI Approved :: MIT License\n Programming Language :: Python\n Programming Language :: Python :: 3\n-Programming Language :: Python :: 3.6\n Programming Language :: Python :: 3.7\n Programming Language :: Python :: 3.8\n Programming Language :: Python :: 3.9\n@@ -134,7 +133,7 @@\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n- python_requires='>=3.6.0',\n+ python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n", "issue": "[RFC] Drop Python 3.6 support in v10\nWe are now discussing to drop support for Python 3.6 in CuPy v10. Feel free to leave a comment here if you have any concerns.\r\n\r\nBackground:\r\n* CUDA Python is unlikely to provide a wheel for Python 3.6, although it can be built from the source without any issue. CUDA Python currently requires [`-std=c++14`](https://github.com/NVIDIA/cuda-python/blob/427c597959e6fe1409195a30d42fc4a1886bc89a/setup.py#L38) so recent versions of gcc, which is not in RHEL/CentOS 7 by default, is needed. We want to avoid requiring CuPy wheel users to manually install non-default GCC.\r\n* NumPy dropped Python 3.6 support in June 2020: https://numpy.org/neps/nep-0029-deprecation_policy.html\r\n* Python 3.6 support become EOL in December 2021.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.17,<1.24', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.4,<1.10', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n # TODO(kmaehashi): Remove 'jenkins' requirements.\n 'jenkins': [\n 'pytest>=6.2',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\next_modules = cupy_setup_build.get_ext_modules(False, ctx)\nbuild_ext = cupy_setup_build.custom_build_ext\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nsource_root = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(source_root, 'install'))\n\nimport cupy_builder # NOQA\nfrom cupy_builder import cupy_setup_build # NOQA\n\nctx = cupy_builder.Context(source_root)\ncupy_builder.initialize(ctx)\nif not cupy_builder.preflight_check(ctx):\n sys.exit(1)\n\n\n# TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\nsetup_requires = [\n 'Cython>=0.29.22,<3',\n 'fastrlock>=0.5',\n]\ninstall_requires = [\n 'numpy>=1.17,<1.24', # see #4773\n 'fastrlock>=0.5',\n]\nextras_require = {\n 'all': [\n 'scipy>=1.4,<1.10', # see #4773\n 'Cython>=0.29.22,<3',\n 'optuna>=2.0',\n ],\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n # TODO(kmaehashi): Remove 'jenkins' requirements.\n 'jenkins': [\n 'pytest>=6.2',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\ntests_require = extras_require['test']\n\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs(ctx)\n\next_modules = cupy_setup_build.get_ext_modules(False, ctx)\nbuild_ext = cupy_setup_build.custom_build_ext\n\n# Get __version__ variable\nwith open(os.path.join(source_root, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nlong_description = None\nif ctx.long_description_path is not None:\n with open(ctx.long_description_path) as f:\n long_description = f.read()\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=ctx.package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy & SciPy for GPU',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n maintainer='CuPy Developers',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.7',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}]} | 2,006 | 177 |
gh_patches_debug_56202 | rasdani/github-patches | git_diff | svthalia__concrexit-3558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Separate promotion permissions in eventadmin inline from the main promotion perms
### What?
Currently, people need add/change_promotionrequest permission to make promotionrequests for their events. But with this permission they also get full access to all other promotionrequests. So we should make the inline in the eventadmin bypass that check. For all I care, anyone who can change an event can make a promorquest from the eventadmin (by virtue of their 'change_event' permission and being an organizer of the event), without having the add/change_promotionrequest permission, and thus without seeing the main Promotion Requests changelist page.
### Why?
<!-- A clear and concise motivation why we should consider implementing this. -->
Least privilege principle: many people should be allowed to request promotion for their own events, but don't need to be able to edit unrelated requests. And this way we can have promocie be able to bypass the requirements in #3529, without normal organizers being able to do the same.
### How?
Override has_xxx_permission() on the inline class. Read the inlinemodeladmin docs for guidance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/admin/inlines.py`
Content:
```
1 from django.contrib import admin
2
3 from events import models
4 from pizzas.models import FoodEvent
5 from promotion.models import PromotionRequest
6
7 from .forms import RegistrationInformationFieldForm
8
9
10 class RegistrationInformationFieldInline(admin.TabularInline):
11 """The inline for registration information fields in the Event admin."""
12
13 form = RegistrationInformationFieldForm
14 extra = 0
15 model = models.RegistrationInformationField
16 ordering = ("_order",)
17
18 radio_fields = {"type": admin.VERTICAL}
19
20 def get_formset(self, request, obj=None, **kwargs):
21 formset = super().get_formset(request, obj, **kwargs)
22 if obj is not None:
23 count = obj.registrationinformationfield_set.count()
24 formset.form.declared_fields["order"].initial = count
25 return formset
26
27
28 class PizzaEventInline(admin.StackedInline):
29 """The inline for pizza events in the Event admin."""
30
31 model = FoodEvent
32 extra = 0
33 max_num = 1
34
35
36 class PromotionRequestInline(admin.StackedInline):
37 model = PromotionRequest
38 readonly_fields = (
39 "assigned_to",
40 "status",
41 "drive_folder",
42 )
43 extra = 0
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/admin/inlines.py b/website/events/admin/inlines.py
--- a/website/events/admin/inlines.py
+++ b/website/events/admin/inlines.py
@@ -39,5 +39,19 @@
"assigned_to",
"status",
"drive_folder",
+ "status_updated",
)
+
+ def has_add_permission(self, request, obj=None):
+ return True
+
+ def has_view_permission(self, request, obj=None):
+ return True
+
+ def has_change_permission(self, request, obj=None):
+ return True
+
+ def has_delete_permission(self, request, obj=None):
+ return True
+
extra = 0
| {"golden_diff": "diff --git a/website/events/admin/inlines.py b/website/events/admin/inlines.py\n--- a/website/events/admin/inlines.py\n+++ b/website/events/admin/inlines.py\n@@ -39,5 +39,19 @@\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n+ \"status_updated\",\n )\n+\n+ def has_add_permission(self, request, obj=None):\n+ return True\n+\n+ def has_view_permission(self, request, obj=None):\n+ return True\n+\n+ def has_change_permission(self, request, obj=None):\n+ return True\n+\n+ def has_delete_permission(self, request, obj=None):\n+ return True\n+\n extra = 0\n", "issue": "Separate promotion permissions in eventadmin inline from the main promotion perms\n### What?\r\nCurrently, people need add/change_promotionrequest permission to make promotionrequests for their events. But with this permission they also get full access to all other promotionrequests. So we should make the inline in the eventadmin bypass that check. For all I care, anyone who can change an event can make a promorquest from the eventadmin (by virtue of their 'change_event' permission and being an organizer of the event), without having the add/change_promotionrequest permission, and thus without seeing the main Promotion Requests changelist page.\r\n\r\n### Why?\r\n<!-- A clear and concise motivation why we should consider implementing this. -->\r\nLeast privilege principle: many people should be allowed to request promotion for their own events, but don't need to be able to edit unrelated requests. And this way we can have promocie be able to bypass the requirements in #3529, without normal organizers being able to do the same.\r\n\r\n### How?\r\nOverride has_xxx_permission() on the inline class. Read the inlinemodeladmin docs for guidance.\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom events import models\nfrom pizzas.models import FoodEvent\nfrom promotion.models import PromotionRequest\n\nfrom .forms import RegistrationInformationFieldForm\n\n\nclass RegistrationInformationFieldInline(admin.TabularInline):\n \"\"\"The inline for registration information fields in the Event admin.\"\"\"\n\n form = RegistrationInformationFieldForm\n extra = 0\n model = models.RegistrationInformationField\n ordering = (\"_order\",)\n\n radio_fields = {\"type\": admin.VERTICAL}\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj, **kwargs)\n if obj is not None:\n count = obj.registrationinformationfield_set.count()\n formset.form.declared_fields[\"order\"].initial = count\n return formset\n\n\nclass PizzaEventInline(admin.StackedInline):\n \"\"\"The inline for pizza events in the Event admin.\"\"\"\n\n model = FoodEvent\n extra = 0\n max_num = 1\n\n\nclass PromotionRequestInline(admin.StackedInline):\n model = PromotionRequest\n readonly_fields = (\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n )\n extra = 0\n", "path": "website/events/admin/inlines.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom events import models\nfrom pizzas.models import FoodEvent\nfrom promotion.models import PromotionRequest\n\nfrom .forms import RegistrationInformationFieldForm\n\n\nclass RegistrationInformationFieldInline(admin.TabularInline):\n \"\"\"The inline for registration information fields in the Event admin.\"\"\"\n\n form = RegistrationInformationFieldForm\n extra = 0\n model = models.RegistrationInformationField\n ordering = (\"_order\",)\n\n radio_fields = {\"type\": admin.VERTICAL}\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj, **kwargs)\n if obj is not None:\n count = obj.registrationinformationfield_set.count()\n formset.form.declared_fields[\"order\"].initial = count\n return formset\n\n\nclass PizzaEventInline(admin.StackedInline):\n \"\"\"The inline for pizza events in the Event admin.\"\"\"\n\n model = FoodEvent\n extra = 0\n max_num = 1\n\n\nclass PromotionRequestInline(admin.StackedInline):\n model = PromotionRequest\n readonly_fields = (\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"status_updated\",\n )\n\n def has_add_permission(self, request, obj=None):\n return True\n\n def has_view_permission(self, request, obj=None):\n return True\n\n def has_change_permission(self, request, obj=None):\n return True\n\n def has_delete_permission(self, request, obj=None):\n return True\n\n extra = 0\n", "path": "website/events/admin/inlines.py"}]} | 827 | 158 |
gh_patches_debug_32880 | rasdani/github-patches | git_diff | learningequality__kolibri-7214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Learner activity records partially not visible after upgrade to 0.14b3
# Observed Behaviour
Upgraded from 0.13.3 to 0.14b3. Learner activity records were partially not visible in Class Home -> Class activity and Reports. Downgraded back to 0.13.3 and they are all visible again.
# Expected behavior
All learner activity should be visible as prior to upgrade
# User-facing consequences
Confusion and fear of data loss.
# Errors and logs
None on screen
# Steps to reproduce
Upgrade from 0.13.3 to 0.14.0-b3 and check learner activity records.
# Context
Ubuntu 18.04.3
Package : 0.14.beta3 .deb
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/query.py`
Content:
```
1 from django.db import connection
2 from django.db.models import Aggregate
3 from django.db.models import CharField
4 from django.db.models import IntegerField
5 from django.db.models import Subquery
6
7 try:
8 from django.contrib.postgres.aggregates import ArrayAgg
9
10 class NotNullArrayAgg(ArrayAgg):
11 def convert_value(self, value, expression, connection, context):
12 if not value:
13 return []
14 return filter(lambda x: x is not None, value)
15
16
17 except ImportError:
18 NotNullArrayAgg = None
19
20
21 class SQCount(Subquery):
22 # Include ALIAS at the end to support Postgres
23 template = "(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)"
24 output_field = IntegerField()
25
26
27 class SQSum(Subquery):
28 # Include ALIAS at the end to support Postgres
29 template = "(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)"
30 output_field = IntegerField()
31
32
33 class GroupConcatSubquery(Subquery):
34 template = "(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)"
35 output_field = CharField()
36
37 def as_postgresql(self, compiler, connection):
38 self.template = (
39 "(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)"
40 )
41 return super(GroupConcatSubquery, self).as_sql(compiler, connection)
42
43
44 class GroupConcat(Aggregate):
45 template = "GROUP_CONCAT(%(field)s)"
46 output_field = CharField()
47
48 def __init__(self, *args, **kwargs):
49 self.result_field = kwargs.pop("result_field", None)
50 super(GroupConcat, self).__init__(*args, **kwargs)
51
52 def convert_value(self, value, expression, connection, context):
53 if not value:
54 return []
55 results = value.split(",")
56 if self.result_field is not None:
57 return map(self.result_field.to_python, results)
58 return results
59
60
61 def get_source_field(model, field_path):
62 # Get the source field from the model so that we can properly coerce values
63 # this is necessary when we are using GroupConcat to return non-string fields.
64 paths = field_path.split("__")
65 while len(paths) > 1:
66 model = model._meta.get_field(paths.pop(0)).related_model
67 return model._meta.get_field(paths[0])
68
69
70 def annotate_array_aggregate(queryset, **kwargs):
71 if connection.vendor == "postgresql" and NotNullArrayAgg is not None:
72 return queryset.annotate(
73 **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}
74 )
75 model = queryset.model
76 # Call values on "pk" to insert a GROUP BY to ensure the GROUP CONCAT
77 # is called by row and not across the entire queryset.
78 return queryset.values("pk").annotate(
79 **{
80 target: GroupConcat(source, result_field=get_source_field(model, source))
81 for target, source in kwargs.items()
82 }
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/query.py b/kolibri/core/query.py
--- a/kolibri/core/query.py
+++ b/kolibri/core/query.py
@@ -8,10 +8,17 @@
from django.contrib.postgres.aggregates import ArrayAgg
class NotNullArrayAgg(ArrayAgg):
+ def __init__(self, *args, **kwargs):
+ self.result_field = kwargs.pop("result_field", None)
+ super(NotNullArrayAgg, self).__init__(*args, **kwargs)
+
def convert_value(self, value, expression, connection, context):
if not value:
return []
- return filter(lambda x: x is not None, value)
+ results = list(filter(lambda x: x is not None, value))
+ if self.result_field is not None:
+ return list(map(self.result_field.to_python, results))
+ return results
except ImportError:
@@ -54,7 +61,7 @@
return []
results = value.split(",")
if self.result_field is not None:
- return map(self.result_field.to_python, results)
+ return list(map(self.result_field.to_python, results))
return results
@@ -68,11 +75,16 @@
def annotate_array_aggregate(queryset, **kwargs):
+ model = queryset.model
if connection.vendor == "postgresql" and NotNullArrayAgg is not None:
return queryset.annotate(
- **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}
+ **{
+ target: NotNullArrayAgg(
+ source, result_field=get_source_field(model, source)
+ )
+ for target, source in kwargs.items()
+ }
)
- model = queryset.model
# Call values on "pk" to insert a GROUP BY to ensure the GROUP CONCAT
# is called by row and not across the entire queryset.
return queryset.values("pk").annotate(
| {"golden_diff": "diff --git a/kolibri/core/query.py b/kolibri/core/query.py\n--- a/kolibri/core/query.py\n+++ b/kolibri/core/query.py\n@@ -8,10 +8,17 @@\n from django.contrib.postgres.aggregates import ArrayAgg\n \n class NotNullArrayAgg(ArrayAgg):\n+ def __init__(self, *args, **kwargs):\n+ self.result_field = kwargs.pop(\"result_field\", None)\n+ super(NotNullArrayAgg, self).__init__(*args, **kwargs)\n+\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n- return filter(lambda x: x is not None, value)\n+ results = list(filter(lambda x: x is not None, value))\n+ if self.result_field is not None:\n+ return list(map(self.result_field.to_python, results))\n+ return results\n \n \n except ImportError:\n@@ -54,7 +61,7 @@\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n- return map(self.result_field.to_python, results)\n+ return list(map(self.result_field.to_python, results))\n return results\n \n \n@@ -68,11 +75,16 @@\n \n \n def annotate_array_aggregate(queryset, **kwargs):\n+ model = queryset.model\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n- **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}\n+ **{\n+ target: NotNullArrayAgg(\n+ source, result_field=get_source_field(model, source)\n+ )\n+ for target, source in kwargs.items()\n+ }\n )\n- model = queryset.model\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n", "issue": "Learner activity records partially not visible after upgrade to 0.14b3\n# Observed Behaviour\n\nUpgraded from 0.13.3 to 0.14b3. Learner activity records were partially not visible in Class Home -> Class activity and Reports. Downgraded back to 0.13.3 and they are all visible again.\n\n# Expected behavior\n\nAll learner activity should be visible as prior to upgrade\n\n\n# User-facing consequences\n\n Confusion and fear of data loss.\n\n# Errors and logs\n\nNone on screen\n\n# Steps to reproduce\n\nUpgrade from 0.13.3 to 0.14.0-b3 and check learner activity records.\n\n\n\n# Context\nUbuntu 18.04.3\nPackage : 0.14.beta3 .deb\n\n", "before_files": [{"content": "from django.db import connection\nfrom django.db.models import Aggregate\nfrom django.db.models import CharField\nfrom django.db.models import IntegerField\nfrom django.db.models import Subquery\n\ntry:\n from django.contrib.postgres.aggregates import ArrayAgg\n\n class NotNullArrayAgg(ArrayAgg):\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n return filter(lambda x: x is not None, value)\n\n\nexcept ImportError:\n NotNullArrayAgg = None\n\n\nclass SQCount(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass SQSum(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass GroupConcatSubquery(Subquery):\n template = \"(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = CharField()\n\n def as_postgresql(self, compiler, connection):\n self.template = (\n \"(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)\"\n )\n return super(GroupConcatSubquery, self).as_sql(compiler, connection)\n\n\nclass GroupConcat(Aggregate):\n template = \"GROUP_CONCAT(%(field)s)\"\n output_field = CharField()\n\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(GroupConcat, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n return map(self.result_field.to_python, results)\n return results\n\n\ndef get_source_field(model, field_path):\n # Get the source field from the model so that we can properly coerce values\n # this is necessary when we are using GroupConcat to return non-string fields.\n paths = field_path.split(\"__\")\n while len(paths) > 1:\n model = model._meta.get_field(paths.pop(0)).related_model\n return model._meta.get_field(paths[0])\n\n\ndef annotate_array_aggregate(queryset, **kwargs):\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n **{target: NotNullArrayAgg(source) for target, source in kwargs.items()}\n )\n model = queryset.model\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n **{\n target: GroupConcat(source, result_field=get_source_field(model, source))\n for target, source in kwargs.items()\n }\n )\n", "path": "kolibri/core/query.py"}], "after_files": [{"content": "from django.db import connection\nfrom django.db.models import Aggregate\nfrom django.db.models import CharField\nfrom django.db.models import IntegerField\nfrom django.db.models import Subquery\n\ntry:\n from django.contrib.postgres.aggregates import ArrayAgg\n\n class NotNullArrayAgg(ArrayAgg):\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(NotNullArrayAgg, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = list(filter(lambda x: x is not None, value))\n if self.result_field is not None:\n return list(map(self.result_field.to_python, results))\n return results\n\n\nexcept ImportError:\n NotNullArrayAgg = None\n\n\nclass SQCount(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT COUNT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass SQSum(Subquery):\n # Include ALIAS at the end to support Postgres\n template = \"(SELECT SUM(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = IntegerField()\n\n\nclass GroupConcatSubquery(Subquery):\n template = \"(SELECT GROUP_CONCAT(%(field)s) FROM (%(subquery)s) AS %(field)s__sum)\"\n output_field = CharField()\n\n def as_postgresql(self, compiler, connection):\n self.template = (\n \"(SELECT STRING_AGG(%(field)s, ',') FROM (%(subquery)s) AS %(field)s__sum)\"\n )\n return super(GroupConcatSubquery, self).as_sql(compiler, connection)\n\n\nclass GroupConcat(Aggregate):\n template = \"GROUP_CONCAT(%(field)s)\"\n output_field = CharField()\n\n def __init__(self, *args, **kwargs):\n self.result_field = kwargs.pop(\"result_field\", None)\n super(GroupConcat, self).__init__(*args, **kwargs)\n\n def convert_value(self, value, expression, connection, context):\n if not value:\n return []\n results = value.split(\",\")\n if self.result_field is not None:\n return list(map(self.result_field.to_python, results))\n return results\n\n\ndef get_source_field(model, field_path):\n # Get the source field from the model so that we can properly coerce values\n # this is necessary when we are using GroupConcat to return non-string fields.\n paths = field_path.split(\"__\")\n while len(paths) > 1:\n model = model._meta.get_field(paths.pop(0)).related_model\n return model._meta.get_field(paths[0])\n\n\ndef annotate_array_aggregate(queryset, **kwargs):\n model = queryset.model\n if connection.vendor == \"postgresql\" and NotNullArrayAgg is not None:\n return queryset.annotate(\n **{\n target: NotNullArrayAgg(\n source, result_field=get_source_field(model, source)\n )\n for target, source in kwargs.items()\n }\n )\n # Call values on \"pk\" to insert a GROUP BY to ensure the GROUP CONCAT\n # is called by row and not across the entire queryset.\n return queryset.values(\"pk\").annotate(\n **{\n target: GroupConcat(source, result_field=get_source_field(model, source))\n for target, source in kwargs.items()\n }\n )\n", "path": "kolibri/core/query.py"}]} | 1,264 | 434 |
gh_patches_debug_38923 | rasdani/github-patches | git_diff | goauthentik__authentik-8858 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False version status in admin dashboard for AirGapped environments
**Describe the bug**
In an AirGapped environment with `AUTHENTIK_DISABLE_UPDATE_CHECK=true`, or when the version check has not yet been performed, the version tile on the admin dashboard will always state its `Up-to-date!` which may not actually be the case.
**To Reproduce**
Steps to reproduce the behavior:
1. Deploy a fresh Authentik installation on an older version (e.g. `2023.10.7`) and ensure `AUTHENTIK_DISABLE_UPDATE_CHECK=true` is set.
2. Go to the admin dashboard
3. Observe the version tile stating proudly that its up-to-date.
**Expected behavior**
Not incorrectly stating its up-to-date as that would give a false sense of security.
**Screenshots**

**Logs**
N/a
**Version and Deployment (please complete the following information):**
- authentik version: 2024.2.0
- Deployment: docker-compose
**Additional context**
N/a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/admin/api/version.py`
Content:
```
1 """authentik administration overview"""
2
3 from django.core.cache import cache
4 from drf_spectacular.utils import extend_schema
5 from packaging.version import parse
6 from rest_framework.fields import SerializerMethodField
7 from rest_framework.permissions import IsAuthenticated
8 from rest_framework.request import Request
9 from rest_framework.response import Response
10 from rest_framework.views import APIView
11
12 from authentik import __version__, get_build_hash
13 from authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version
14 from authentik.core.api.utils import PassiveSerializer
15
16
17 class VersionSerializer(PassiveSerializer):
18 """Get running and latest version."""
19
20 version_current = SerializerMethodField()
21 version_latest = SerializerMethodField()
22 build_hash = SerializerMethodField()
23 outdated = SerializerMethodField()
24
25 def get_build_hash(self, _) -> str:
26 """Get build hash, if version is not latest or released"""
27 return get_build_hash()
28
29 def get_version_current(self, _) -> str:
30 """Get current version"""
31 return __version__
32
33 def get_version_latest(self, _) -> str:
34 """Get latest version from cache"""
35 version_in_cache = cache.get(VERSION_CACHE_KEY)
36 if not version_in_cache: # pragma: no cover
37 update_latest_version.delay()
38 return __version__
39 return version_in_cache
40
41 def get_outdated(self, instance) -> bool:
42 """Check if we're running the latest version"""
43 return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))
44
45
46 class VersionView(APIView):
47 """Get running and latest version."""
48
49 permission_classes = [IsAuthenticated]
50 pagination_class = None
51 filter_backends = []
52
53 @extend_schema(responses={200: VersionSerializer(many=False)})
54 def get(self, request: Request) -> Response:
55 """Get running and latest version."""
56 return Response(VersionSerializer(True).data)
57
```
Path: `authentik/admin/tasks.py`
Content:
```
1 """authentik admin tasks"""
2
3 import re
4
5 from django.core.cache import cache
6 from django.core.validators import URLValidator
7 from django.db import DatabaseError, InternalError, ProgrammingError
8 from packaging.version import parse
9 from requests import RequestException
10 from structlog.stdlib import get_logger
11
12 from authentik import __version__, get_build_hash
13 from authentik.admin.apps import PROM_INFO
14 from authentik.events.models import Event, EventAction, Notification
15 from authentik.events.system_tasks import SystemTask, TaskStatus, prefill_task
16 from authentik.lib.config import CONFIG
17 from authentik.lib.utils.http import get_http_session
18 from authentik.root.celery import CELERY_APP
19
20 LOGGER = get_logger()
21 VERSION_CACHE_KEY = "authentik_latest_version"
22 VERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours
23 # Chop of the first ^ because we want to search the entire string
24 URL_FINDER = URLValidator.regex.pattern[1:]
25 LOCAL_VERSION = parse(__version__)
26
27
28 def _set_prom_info():
29 """Set prometheus info for version"""
30 PROM_INFO.info(
31 {
32 "version": __version__,
33 "latest": cache.get(VERSION_CACHE_KEY, ""),
34 "build_hash": get_build_hash(),
35 }
36 )
37
38
39 @CELERY_APP.task(
40 throws=(DatabaseError, ProgrammingError, InternalError),
41 )
42 def clear_update_notifications():
43 """Clear update notifications on startup if the notification was for the version
44 we're running now."""
45 for notification in Notification.objects.filter(event__action=EventAction.UPDATE_AVAILABLE):
46 if "new_version" not in notification.event.context:
47 continue
48 notification_version = notification.event.context["new_version"]
49 if LOCAL_VERSION >= parse(notification_version):
50 notification.delete()
51
52
53 @CELERY_APP.task(bind=True, base=SystemTask)
54 @prefill_task
55 def update_latest_version(self: SystemTask):
56 """Update latest version info"""
57 if CONFIG.get_bool("disable_update_check"):
58 cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT)
59 self.set_status(TaskStatus.WARNING, "Version check disabled.")
60 return
61 try:
62 response = get_http_session().get(
63 "https://version.goauthentik.io/version.json",
64 )
65 response.raise_for_status()
66 data = response.json()
67 upstream_version = data.get("stable", {}).get("version")
68 cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT)
69 self.set_status(TaskStatus.SUCCESSFUL, "Successfully updated latest Version")
70 _set_prom_info()
71 # Check if upstream version is newer than what we're running,
72 # and if no event exists yet, create one.
73 if LOCAL_VERSION < parse(upstream_version):
74 # Event has already been created, don't create duplicate
75 if Event.objects.filter(
76 action=EventAction.UPDATE_AVAILABLE,
77 context__new_version=upstream_version,
78 ).exists():
79 return
80 event_dict = {"new_version": upstream_version}
81 if match := re.search(URL_FINDER, data.get("stable", {}).get("changelog", "")):
82 event_dict["message"] = f"Changelog: {match.group()}"
83 Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()
84 except (RequestException, IndexError) as exc:
85 cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT)
86 self.set_error(exc)
87
88
89 _set_prom_info()
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/admin/api/version.py b/authentik/admin/api/version.py
--- a/authentik/admin/api/version.py
+++ b/authentik/admin/api/version.py
@@ -10,7 +10,7 @@
from rest_framework.views import APIView
from authentik import __version__, get_build_hash
-from authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version
+from authentik.admin.tasks import VERSION_CACHE_KEY, VERSION_NULL, update_latest_version
from authentik.core.api.utils import PassiveSerializer
@@ -19,6 +19,7 @@
version_current = SerializerMethodField()
version_latest = SerializerMethodField()
+ version_latest_valid = SerializerMethodField()
build_hash = SerializerMethodField()
outdated = SerializerMethodField()
@@ -38,6 +39,10 @@
return __version__
return version_in_cache
+ def get_version_latest_valid(self, _) -> str:
+ """Check if latest version is valid"""
+ return cache.get(VERSION_CACHE_KEY) != VERSION_NULL
+
def get_outdated(self, instance) -> bool:
"""Check if we're running the latest version"""
return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))
diff --git a/authentik/admin/tasks.py b/authentik/admin/tasks.py
--- a/authentik/admin/tasks.py
+++ b/authentik/admin/tasks.py
@@ -18,6 +18,7 @@
from authentik.root.celery import CELERY_APP
LOGGER = get_logger()
+VERSION_NULL = "0.0.0"
VERSION_CACHE_KEY = "authentik_latest_version"
VERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours
# Chop of the first ^ because we want to search the entire string
@@ -55,7 +56,7 @@
def update_latest_version(self: SystemTask):
"""Update latest version info"""
if CONFIG.get_bool("disable_update_check"):
- cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT)
+ cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)
self.set_status(TaskStatus.WARNING, "Version check disabled.")
return
try:
@@ -82,7 +83,7 @@
event_dict["message"] = f"Changelog: {match.group()}"
Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()
except (RequestException, IndexError) as exc:
- cache.set(VERSION_CACHE_KEY, "0.0.0", VERSION_CACHE_TIMEOUT)
+ cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)
self.set_error(exc)
| {"golden_diff": "diff --git a/authentik/admin/api/version.py b/authentik/admin/api/version.py\n--- a/authentik/admin/api/version.py\n+++ b/authentik/admin/api/version.py\n@@ -10,7 +10,7 @@\n from rest_framework.views import APIView\n \n from authentik import __version__, get_build_hash\n-from authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version\n+from authentik.admin.tasks import VERSION_CACHE_KEY, VERSION_NULL, update_latest_version\n from authentik.core.api.utils import PassiveSerializer\n \n \n@@ -19,6 +19,7 @@\n \n version_current = SerializerMethodField()\n version_latest = SerializerMethodField()\n+ version_latest_valid = SerializerMethodField()\n build_hash = SerializerMethodField()\n outdated = SerializerMethodField()\n \n@@ -38,6 +39,10 @@\n return __version__\n return version_in_cache\n \n+ def get_version_latest_valid(self, _) -> str:\n+ \"\"\"Check if latest version is valid\"\"\"\n+ return cache.get(VERSION_CACHE_KEY) != VERSION_NULL\n+\n def get_outdated(self, instance) -> bool:\n \"\"\"Check if we're running the latest version\"\"\"\n return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))\ndiff --git a/authentik/admin/tasks.py b/authentik/admin/tasks.py\n--- a/authentik/admin/tasks.py\n+++ b/authentik/admin/tasks.py\n@@ -18,6 +18,7 @@\n from authentik.root.celery import CELERY_APP\n \n LOGGER = get_logger()\n+VERSION_NULL = \"0.0.0\"\n VERSION_CACHE_KEY = \"authentik_latest_version\"\n VERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours\n # Chop of the first ^ because we want to search the entire string\n@@ -55,7 +56,7 @@\n def update_latest_version(self: SystemTask):\n \"\"\"Update latest version info\"\"\"\n if CONFIG.get_bool(\"disable_update_check\"):\n- cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n+ cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.WARNING, \"Version check disabled.\")\n return\n try:\n@@ -82,7 +83,7 @@\n event_dict[\"message\"] = f\"Changelog: {match.group()}\"\n Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()\n except (RequestException, IndexError) as exc:\n- cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n+ cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_error(exc)\n", "issue": "False version status in admin dashboard for AirGapped environments\n**Describe the bug**\r\nIn an AirGapped environment with `AUTHENTIK_DISABLE_UPDATE_CHECK=true`, or when the version check has not yet been performed, the version tile on the admin dashboard will always state its `Up-to-date!` which may not actually be the case.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Deploy a fresh Authentik installation on an older version (e.g. `2023.10.7`) and ensure `AUTHENTIK_DISABLE_UPDATE_CHECK=true` is set.\r\n2. Go to the admin dashboard\r\n3. Observe the version tile stating proudly that its up-to-date.\r\n\r\n**Expected behavior**\r\nNot incorrectly stating its up-to-date as that would give a false sense of security.\r\n\r\n**Screenshots**\r\n\r\n\r\n**Logs**\r\nN/a\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2024.2.0\r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nN/a\r\n\n", "before_files": [{"content": "\"\"\"authentik administration overview\"\"\"\n\nfrom django.core.cache import cache\nfrom drf_spectacular.utils import extend_schema\nfrom packaging.version import parse\nfrom rest_framework.fields import SerializerMethodField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.tasks import VERSION_CACHE_KEY, update_latest_version\nfrom authentik.core.api.utils import PassiveSerializer\n\n\nclass VersionSerializer(PassiveSerializer):\n \"\"\"Get running and latest version.\"\"\"\n\n version_current = SerializerMethodField()\n version_latest = SerializerMethodField()\n build_hash = SerializerMethodField()\n outdated = SerializerMethodField()\n\n def get_build_hash(self, _) -> str:\n \"\"\"Get build hash, if version is not latest or released\"\"\"\n return get_build_hash()\n\n def get_version_current(self, _) -> str:\n \"\"\"Get current version\"\"\"\n return __version__\n\n def get_version_latest(self, _) -> str:\n \"\"\"Get latest version from cache\"\"\"\n version_in_cache = cache.get(VERSION_CACHE_KEY)\n if not version_in_cache: # pragma: no cover\n update_latest_version.delay()\n return __version__\n return version_in_cache\n\n def get_outdated(self, instance) -> bool:\n \"\"\"Check if we're running the latest version\"\"\"\n return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))\n\n\nclass VersionView(APIView):\n \"\"\"Get running and latest version.\"\"\"\n\n permission_classes = [IsAuthenticated]\n pagination_class = None\n filter_backends = []\n\n @extend_schema(responses={200: VersionSerializer(many=False)})\n def get(self, request: Request) -> Response:\n \"\"\"Get running and latest version.\"\"\"\n return Response(VersionSerializer(True).data)\n", "path": "authentik/admin/api/version.py"}, {"content": "\"\"\"authentik admin tasks\"\"\"\n\nimport re\n\nfrom django.core.cache import cache\nfrom django.core.validators import URLValidator\nfrom django.db import DatabaseError, InternalError, ProgrammingError\nfrom packaging.version import parse\nfrom requests import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.apps import PROM_INFO\nfrom authentik.events.models import Event, EventAction, Notification\nfrom authentik.events.system_tasks import SystemTask, TaskStatus, prefill_task\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger()\nVERSION_CACHE_KEY = \"authentik_latest_version\"\nVERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours\n# Chop of the first ^ because we want to search the entire string\nURL_FINDER = URLValidator.regex.pattern[1:]\nLOCAL_VERSION = parse(__version__)\n\n\ndef _set_prom_info():\n \"\"\"Set prometheus info for version\"\"\"\n PROM_INFO.info(\n {\n \"version\": __version__,\n \"latest\": cache.get(VERSION_CACHE_KEY, \"\"),\n \"build_hash\": get_build_hash(),\n }\n )\n\n\n@CELERY_APP.task(\n throws=(DatabaseError, ProgrammingError, InternalError),\n)\ndef clear_update_notifications():\n \"\"\"Clear update notifications on startup if the notification was for the version\n we're running now.\"\"\"\n for notification in Notification.objects.filter(event__action=EventAction.UPDATE_AVAILABLE):\n if \"new_version\" not in notification.event.context:\n continue\n notification_version = notification.event.context[\"new_version\"]\n if LOCAL_VERSION >= parse(notification_version):\n notification.delete()\n\n\n@CELERY_APP.task(bind=True, base=SystemTask)\n@prefill_task\ndef update_latest_version(self: SystemTask):\n \"\"\"Update latest version info\"\"\"\n if CONFIG.get_bool(\"disable_update_check\"):\n cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.WARNING, \"Version check disabled.\")\n return\n try:\n response = get_http_session().get(\n \"https://version.goauthentik.io/version.json\",\n )\n response.raise_for_status()\n data = response.json()\n upstream_version = data.get(\"stable\", {}).get(\"version\")\n cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.SUCCESSFUL, \"Successfully updated latest Version\")\n _set_prom_info()\n # Check if upstream version is newer than what we're running,\n # and if no event exists yet, create one.\n if LOCAL_VERSION < parse(upstream_version):\n # Event has already been created, don't create duplicate\n if Event.objects.filter(\n action=EventAction.UPDATE_AVAILABLE,\n context__new_version=upstream_version,\n ).exists():\n return\n event_dict = {\"new_version\": upstream_version}\n if match := re.search(URL_FINDER, data.get(\"stable\", {}).get(\"changelog\", \"\")):\n event_dict[\"message\"] = f\"Changelog: {match.group()}\"\n Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()\n except (RequestException, IndexError) as exc:\n cache.set(VERSION_CACHE_KEY, \"0.0.0\", VERSION_CACHE_TIMEOUT)\n self.set_error(exc)\n\n\n_set_prom_info()\n", "path": "authentik/admin/tasks.py"}], "after_files": [{"content": "\"\"\"authentik administration overview\"\"\"\n\nfrom django.core.cache import cache\nfrom drf_spectacular.utils import extend_schema\nfrom packaging.version import parse\nfrom rest_framework.fields import SerializerMethodField\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.tasks import VERSION_CACHE_KEY, VERSION_NULL, update_latest_version\nfrom authentik.core.api.utils import PassiveSerializer\n\n\nclass VersionSerializer(PassiveSerializer):\n \"\"\"Get running and latest version.\"\"\"\n\n version_current = SerializerMethodField()\n version_latest = SerializerMethodField()\n version_latest_valid = SerializerMethodField()\n build_hash = SerializerMethodField()\n outdated = SerializerMethodField()\n\n def get_build_hash(self, _) -> str:\n \"\"\"Get build hash, if version is not latest or released\"\"\"\n return get_build_hash()\n\n def get_version_current(self, _) -> str:\n \"\"\"Get current version\"\"\"\n return __version__\n\n def get_version_latest(self, _) -> str:\n \"\"\"Get latest version from cache\"\"\"\n version_in_cache = cache.get(VERSION_CACHE_KEY)\n if not version_in_cache: # pragma: no cover\n update_latest_version.delay()\n return __version__\n return version_in_cache\n\n def get_version_latest_valid(self, _) -> str:\n \"\"\"Check if latest version is valid\"\"\"\n return cache.get(VERSION_CACHE_KEY) != VERSION_NULL\n\n def get_outdated(self, instance) -> bool:\n \"\"\"Check if we're running the latest version\"\"\"\n return parse(self.get_version_current(instance)) < parse(self.get_version_latest(instance))\n\n\nclass VersionView(APIView):\n \"\"\"Get running and latest version.\"\"\"\n\n permission_classes = [IsAuthenticated]\n pagination_class = None\n filter_backends = []\n\n @extend_schema(responses={200: VersionSerializer(many=False)})\n def get(self, request: Request) -> Response:\n \"\"\"Get running and latest version.\"\"\"\n return Response(VersionSerializer(True).data)\n", "path": "authentik/admin/api/version.py"}, {"content": "\"\"\"authentik admin tasks\"\"\"\n\nimport re\n\nfrom django.core.cache import cache\nfrom django.core.validators import URLValidator\nfrom django.db import DatabaseError, InternalError, ProgrammingError\nfrom packaging.version import parse\nfrom requests import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik import __version__, get_build_hash\nfrom authentik.admin.apps import PROM_INFO\nfrom authentik.events.models import Event, EventAction, Notification\nfrom authentik.events.system_tasks import SystemTask, TaskStatus, prefill_task\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.root.celery import CELERY_APP\n\nLOGGER = get_logger()\nVERSION_NULL = \"0.0.0\"\nVERSION_CACHE_KEY = \"authentik_latest_version\"\nVERSION_CACHE_TIMEOUT = 8 * 60 * 60 # 8 hours\n# Chop of the first ^ because we want to search the entire string\nURL_FINDER = URLValidator.regex.pattern[1:]\nLOCAL_VERSION = parse(__version__)\n\n\ndef _set_prom_info():\n \"\"\"Set prometheus info for version\"\"\"\n PROM_INFO.info(\n {\n \"version\": __version__,\n \"latest\": cache.get(VERSION_CACHE_KEY, \"\"),\n \"build_hash\": get_build_hash(),\n }\n )\n\n\n@CELERY_APP.task(\n throws=(DatabaseError, ProgrammingError, InternalError),\n)\ndef clear_update_notifications():\n \"\"\"Clear update notifications on startup if the notification was for the version\n we're running now.\"\"\"\n for notification in Notification.objects.filter(event__action=EventAction.UPDATE_AVAILABLE):\n if \"new_version\" not in notification.event.context:\n continue\n notification_version = notification.event.context[\"new_version\"]\n if LOCAL_VERSION >= parse(notification_version):\n notification.delete()\n\n\n@CELERY_APP.task(bind=True, base=SystemTask)\n@prefill_task\ndef update_latest_version(self: SystemTask):\n \"\"\"Update latest version info\"\"\"\n if CONFIG.get_bool(\"disable_update_check\"):\n cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.WARNING, \"Version check disabled.\")\n return\n try:\n response = get_http_session().get(\n \"https://version.goauthentik.io/version.json\",\n )\n response.raise_for_status()\n data = response.json()\n upstream_version = data.get(\"stable\", {}).get(\"version\")\n cache.set(VERSION_CACHE_KEY, upstream_version, VERSION_CACHE_TIMEOUT)\n self.set_status(TaskStatus.SUCCESSFUL, \"Successfully updated latest Version\")\n _set_prom_info()\n # Check if upstream version is newer than what we're running,\n # and if no event exists yet, create one.\n if LOCAL_VERSION < parse(upstream_version):\n # Event has already been created, don't create duplicate\n if Event.objects.filter(\n action=EventAction.UPDATE_AVAILABLE,\n context__new_version=upstream_version,\n ).exists():\n return\n event_dict = {\"new_version\": upstream_version}\n if match := re.search(URL_FINDER, data.get(\"stable\", {}).get(\"changelog\", \"\")):\n event_dict[\"message\"] = f\"Changelog: {match.group()}\"\n Event.new(EventAction.UPDATE_AVAILABLE, **event_dict).save()\n except (RequestException, IndexError) as exc:\n cache.set(VERSION_CACHE_KEY, VERSION_NULL, VERSION_CACHE_TIMEOUT)\n self.set_error(exc)\n\n\n_set_prom_info()\n", "path": "authentik/admin/tasks.py"}]} | 1,975 | 587 |
gh_patches_debug_7473 | rasdani/github-patches | git_diff | praw-dev__praw-1327 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PRAW installed by pip is missing the `images` directory and its contents
## Issue Description
PRAW's video submit method uses a placeholder image as a video thumbnail when the user doesn't provide a thumbnail. Here's the relevant code:
https://github.com/praw-dev/praw/blob/54f8b3f998008b81988aac057d33fe38d5ac7739/praw/models/reddit/subreddit.py#L511-L514
That image is at [`praw/images/PRAW logo.png`](https://github.com/praw-dev/praw/blob/master/praw/images/PRAW%20logo.png). Unfortunately the current release on PyPI is missing the file and the entire `images` directory, so the `submit_video` method fails when another thumbnail isn't provided.
It isn't just the wheel on PyPI that is missing the image. The source tarball is as well.
I suspect that a change to [`MANIFEST.in`](https://github.com/praw-dev/praw/blob/master/MANIFEST.in) might solve this problem. Or maybe not, as [this Stack Overflow answer](https://stackoverflow.com/a/25964691/8033766) suggests that `MANIFEST.in` is just for Python 2.6 and earlier.
Adding an `__init__.py` to the `images` folder would probably [make `find_packages()` in `setup.py`](https://setuptools.readthedocs.io/en/latest/setuptools.html#using-find-packages) notice the folder, but this would be a mis-use of an `__init__.py` since that folder is not a Python package.
[This page](https://docs.python.org/3.3/distutils/setupscript.html#installing-additional-files) suggests using the `data_files` argument in `setup.py`, but I tried that and couldn't get it to work ([branch here](https://github.com/jarhill0/praw/tree/image-upload-fix), [PyPI testing push here](https://test.pypi.org/project/praw/)).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """praw setup.py"""
2
3 import re
4 from codecs import open
5 from os import path
6 from setuptools import find_packages, setup
7
8
9 PACKAGE_NAME = "praw"
10 HERE = path.abspath(path.dirname(__file__))
11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp:
12 README = fp.read()
13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp:
14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1)
15
16 extras = {
17 "ci": ["coveralls"],
18 "dev": ["pre-commit"],
19 "lint": ["black", "flake8", "pydocstyle", "sphinx", "sphinx_rtd_theme"],
20 "test": [
21 "betamax >=0.8, <0.9",
22 "betamax-matchers >=0.3.0, <0.5",
23 "betamax-serializers >=0.2, <0.3",
24 "mock >=0.8",
25 "pytest >=2.7.3",
26 ],
27 }
28 extras["dev"] += extras["lint"] + extras["test"]
29
30 setup(
31 name=PACKAGE_NAME,
32 author="Bryce Boe",
33 author_email="[email protected]",
34 python_requires=">=3.5",
35 classifiers=[
36 "Development Status :: 5 - Production/Stable",
37 "Environment :: Console",
38 "Intended Audience :: Developers",
39 "License :: OSI Approved :: BSD License",
40 "Natural Language :: English",
41 "Operating System :: OS Independent",
42 "Programming Language :: Python",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.5",
45 "Programming Language :: Python :: 3.6",
46 "Programming Language :: Python :: 3.7",
47 "Programming Language :: Python :: 3.8",
48 "Topic :: Utilities",
49 ],
50 description=(
51 "PRAW, an acronym for `Python Reddit API Wrapper`, is a "
52 "python package that allows for simple access to "
53 "reddit's API."
54 ),
55 extras_require=extras,
56 install_requires=[
57 "prawcore >=1.0.1, <2.0",
58 "update_checker >=0.16",
59 "websocket-client >=0.54.0",
60 ],
61 keywords="reddit api wrapper",
62 license="Simplified BSD License",
63 long_description=README,
64 package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]},
65 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
66 url="https://praw.readthedocs.org/",
67 version=VERSION,
68 )
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,10 @@
keywords="reddit api wrapper",
license="Simplified BSD License",
long_description=README,
- package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini"]},
+ package_data={
+ "": ["LICENSE.txt"],
+ PACKAGE_NAME: ["*.ini", "images/*.jpg"],
+ },
packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]),
url="https://praw.readthedocs.org/",
version=VERSION,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,7 +61,10 @@\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n- package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\"]},\n+ package_data={\n+ \"\": [\"LICENSE.txt\"],\n+ PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n+ },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n", "issue": "PRAW installed by pip is missing the `images` directory and its contents\n## Issue Description\r\n\r\nPRAW's video submit method uses a placeholder image as a video thumbnail when the user doesn't provide a thumbnail. Here's the relevant code:\r\n\r\nhttps://github.com/praw-dev/praw/blob/54f8b3f998008b81988aac057d33fe38d5ac7739/praw/models/reddit/subreddit.py#L511-L514\r\n\r\nThat image is at [`praw/images/PRAW logo.png`](https://github.com/praw-dev/praw/blob/master/praw/images/PRAW%20logo.png). Unfortunately the current release on PyPI is missing the file and the entire `images` directory, so the `submit_video` method fails when another thumbnail isn't provided.\r\n\r\nIt isn't just the wheel on PyPI that is missing the image. The source tarball is as well.\r\n\r\nI suspect that a change to [`MANIFEST.in`](https://github.com/praw-dev/praw/blob/master/MANIFEST.in) might solve this problem. Or maybe not, as [this Stack Overflow answer](https://stackoverflow.com/a/25964691/8033766) suggests that `MANIFEST.in` is just for Python 2.6 and earlier.\r\n\r\nAdding an `__init__.py` to the `images` folder would probably [make `find_packages()` in `setup.py`](https://setuptools.readthedocs.io/en/latest/setuptools.html#using-find-packages) notice the folder, but this would be a mis-use of an `__init__.py` since that folder is not a Python package.\r\n\r\n[This page](https://docs.python.org/3.3/distutils/setupscript.html#installing-additional-files) suggests using the `data_files` argument in `setup.py`, but I tried that and couldn't get it to work ([branch here](https://github.com/jarhill0/praw/tree/image-upload-fix), [PyPI testing push here](https://test.pypi.org/project/praw/)).\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\"black\", \"flake8\", \"pydocstyle\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"betamax-serializers >=0.2, <0.3\",\n \"mock >=0.8\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.0.1, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\nfrom setuptools import find_packages, setup\n\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"pre-commit\"],\n \"lint\": [\"black\", \"flake8\", \"pydocstyle\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"betamax-serializers >=0.2, <0.3\",\n \"mock >=0.8\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\">=3.5\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a \"\n \"python package that allows for simple access to \"\n \"reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=1.0.1, <2.0\",\n \"update_checker >=0.16\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\n \"\": [\"LICENSE.txt\"],\n PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"],\n },\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n url=\"https://praw.readthedocs.org/\",\n version=VERSION,\n)\n", "path": "setup.py"}]} | 1,420 | 139 |
gh_patches_debug_28202 | rasdani/github-patches | git_diff | open-mmlab__mmpose-493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Seed in sampler
https://github.com/open-mmlab/mmdetection/pull/4665
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmpose/datasets/samplers/distributed_sampler.py`
Content:
```
1 import torch
2 from torch.utils.data import DistributedSampler as _DistributedSampler
3
4
5 class DistributedSampler(_DistributedSampler):
6 """DistributedSampler inheriting from
7 `torch.utils.data.DistributedSampler`.
8
9 In pytorch of lower versions, there is no `shuffle` argument. This child
10 class will port one to DistributedSampler.
11 """
12
13 def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
14 super().__init__(dataset, num_replicas=num_replicas, rank=rank)
15 self.shuffle = shuffle
16
17 def __iter__(self):
18 """Deterministically shuffle based on epoch."""
19 if self.shuffle:
20 g = torch.Generator()
21 g.manual_seed(self.epoch)
22 indices = torch.randperm(len(self.dataset), generator=g).tolist()
23 else:
24 indices = torch.arange(len(self.dataset)).tolist()
25
26 # add extra samples to make it evenly divisible
27 indices += indices[:(self.total_size - len(indices))]
28 assert len(indices) == self.total_size
29
30 # subsample
31 indices = indices[self.rank:self.total_size:self.num_replicas]
32 assert len(indices) == self.num_samples
33 return iter(indices)
34
```
Path: `mmpose/datasets/builder.py`
Content:
```
1 import platform
2 import random
3 from functools import partial
4
5 import numpy as np
6 from mmcv.parallel import collate
7 from mmcv.runner import get_dist_info
8 from mmcv.utils import build_from_cfg
9 from mmcv.utils.parrots_wrapper import _get_dataloader
10
11 from .dataset_wrappers import RepeatDataset
12 from .registry import DATASETS
13 from .samplers import DistributedSampler
14
15 if platform.system() != 'Windows':
16 # https://github.com/pytorch/pytorch/issues/973
17 import resource
18 rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
19 hard_limit = rlimit[1]
20 soft_limit = min(4096, hard_limit)
21 resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
22
23
24 def build_dataset(cfg, default_args=None):
25 """Build a dataset from config dict.
26
27 Args:
28 cfg (dict): Config dict. It should at least contain the key "type".
29 default_args (dict, optional): Default initialization arguments.
30 Default: None.
31
32 Returns:
33 Dataset: The constructed dataset.
34 """
35 if cfg['type'] == 'RepeatDataset':
36 dataset = RepeatDataset(
37 build_dataset(cfg['dataset'], default_args), cfg['times'])
38 else:
39 dataset = build_from_cfg(cfg, DATASETS, default_args)
40 return dataset
41
42
43 def build_dataloader(dataset,
44 samples_per_gpu,
45 workers_per_gpu,
46 num_gpus=1,
47 dist=True,
48 shuffle=True,
49 seed=None,
50 drop_last=True,
51 pin_memory=True,
52 **kwargs):
53 """Build PyTorch DataLoader.
54
55 In distributed training, each GPU/process has a dataloader.
56 In non-distributed training, there is only one dataloader for all GPUs.
57
58 Args:
59 dataset (Dataset): A PyTorch dataset.
60 samples_per_gpu (int): Number of training samples on each GPU, i.e.,
61 batch size of each GPU.
62 workers_per_gpu (int): How many subprocesses to use for data loading
63 for each GPU.
64 num_gpus (int): Number of GPUs. Only used in non-distributed training.
65 dist (bool): Distributed training/test or not. Default: True.
66 shuffle (bool): Whether to shuffle the data at every epoch.
67 Default: True.
68 drop_last (bool): Whether to drop the last incomplete batch in epoch.
69 Default: True
70 pin_memory (bool): Whether to use pin_memory in DataLoader.
71 Default: True
72 kwargs: any keyword argument to be used to initialize DataLoader
73
74 Returns:
75 DataLoader: A PyTorch dataloader.
76 """
77 rank, world_size = get_dist_info()
78 if dist:
79 sampler = DistributedSampler(
80 dataset, world_size, rank, shuffle=shuffle)
81 shuffle = False
82 batch_size = samples_per_gpu
83 num_workers = workers_per_gpu
84 else:
85 sampler = None
86 batch_size = num_gpus * samples_per_gpu
87 num_workers = num_gpus * workers_per_gpu
88
89 init_fn = partial(
90 worker_init_fn, num_workers=num_workers, rank=rank,
91 seed=seed) if seed is not None else None
92
93 _, DataLoader = _get_dataloader()
94 data_loader = DataLoader(
95 dataset,
96 batch_size=batch_size,
97 sampler=sampler,
98 num_workers=num_workers,
99 collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
100 pin_memory=pin_memory,
101 shuffle=shuffle,
102 worker_init_fn=init_fn,
103 drop_last=drop_last,
104 **kwargs)
105
106 return data_loader
107
108
109 def worker_init_fn(worker_id, num_workers, rank, seed):
110 """Init the random seed for various workers."""
111 # The seed of each worker equals to
112 # num_worker * rank + worker_id + user_seed
113 worker_seed = num_workers * rank + worker_id + seed
114 np.random.seed(worker_seed)
115 random.seed(worker_seed)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py
--- a/mmpose/datasets/builder.py
+++ b/mmpose/datasets/builder.py
@@ -77,7 +77,7 @@
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
- dataset, world_size, rank, shuffle=shuffle)
+ dataset, world_size, rank, shuffle=shuffle, seed=seed)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
diff --git a/mmpose/datasets/samplers/distributed_sampler.py b/mmpose/datasets/samplers/distributed_sampler.py
--- a/mmpose/datasets/samplers/distributed_sampler.py
+++ b/mmpose/datasets/samplers/distributed_sampler.py
@@ -10,15 +10,22 @@
class will port one to DistributedSampler.
"""
- def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
- super().__init__(dataset, num_replicas=num_replicas, rank=rank)
- self.shuffle = shuffle
+ def __init__(self,
+ dataset,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ seed=0):
+ super().__init__(
+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
+ # for the compatibility from PyTorch 1.3+
+ self.seed = seed if seed is not None else 0
def __iter__(self):
"""Deterministically shuffle based on epoch."""
if self.shuffle:
g = torch.Generator()
- g.manual_seed(self.epoch)
+ g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
| {"golden_diff": "diff --git a/mmpose/datasets/builder.py b/mmpose/datasets/builder.py\n--- a/mmpose/datasets/builder.py\n+++ b/mmpose/datasets/builder.py\n@@ -77,7 +77,7 @@\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n- dataset, world_size, rank, shuffle=shuffle)\n+ dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\ndiff --git a/mmpose/datasets/samplers/distributed_sampler.py b/mmpose/datasets/samplers/distributed_sampler.py\n--- a/mmpose/datasets/samplers/distributed_sampler.py\n+++ b/mmpose/datasets/samplers/distributed_sampler.py\n@@ -10,15 +10,22 @@\n class will port one to DistributedSampler.\n \"\"\"\n \n- def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n- super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n- self.shuffle = shuffle\n+ def __init__(self,\n+ dataset,\n+ num_replicas=None,\n+ rank=None,\n+ shuffle=True,\n+ seed=0):\n+ super().__init__(\n+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n+ # for the compatibility from PyTorch 1.3+\n+ self.seed = seed if seed is not None else 0\n \n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n- g.manual_seed(self.epoch)\n+ g.manual_seed(self.epoch + self.seed)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n", "issue": "Seed in sampler\nhttps://github.com/open-mmlab/mmdetection/pull/4665\n", "before_files": [{"content": "import torch\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\n\nclass DistributedSampler(_DistributedSampler):\n \"\"\"DistributedSampler inheriting from\n `torch.utils.data.DistributedSampler`.\n\n In pytorch of lower versions, there is no `shuffle` argument. This child\n class will port one to DistributedSampler.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank)\n self.shuffle = shuffle\n\n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n", "path": "mmpose/datasets/samplers/distributed_sampler.py"}, {"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .dataset_wrappers import RepeatDataset\nfrom .registry import DATASETS\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}], "after_files": [{"content": "import torch\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\n\nclass DistributedSampler(_DistributedSampler):\n \"\"\"DistributedSampler inheriting from\n `torch.utils.data.DistributedSampler`.\n\n In pytorch of lower versions, there is no `shuffle` argument. This child\n class will port one to DistributedSampler.\n \"\"\"\n\n def __init__(self,\n dataset,\n num_replicas=None,\n rank=None,\n shuffle=True,\n seed=0):\n super().__init__(\n dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n # for the compatibility from PyTorch 1.3+\n self.seed = seed if seed is not None else 0\n\n def __iter__(self):\n \"\"\"Deterministically shuffle based on epoch.\"\"\"\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch + self.seed)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n", "path": "mmpose/datasets/samplers/distributed_sampler.py"}, {"content": "import platform\nimport random\nfrom functools import partial\n\nimport numpy as np\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import build_from_cfg\nfrom mmcv.utils.parrots_wrapper import _get_dataloader\n\nfrom .dataset_wrappers import RepeatDataset\nfrom .registry import DATASETS\nfrom .samplers import DistributedSampler\n\nif platform.system() != 'Windows':\n # https://github.com/pytorch/pytorch/issues/973\n import resource\n rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n hard_limit = rlimit[1]\n soft_limit = min(4096, hard_limit)\n resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\n\ndef build_dataset(cfg, default_args=None):\n \"\"\"Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key \"type\".\n default_args (dict, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The constructed dataset.\n \"\"\"\n if cfg['type'] == 'RepeatDataset':\n dataset = RepeatDataset(\n build_dataset(cfg['dataset'], default_args), cfg['times'])\n else:\n dataset = build_from_cfg(cfg, DATASETS, default_args)\n return dataset\n\n\ndef build_dataloader(dataset,\n samples_per_gpu,\n workers_per_gpu,\n num_gpus=1,\n dist=True,\n shuffle=True,\n seed=None,\n drop_last=True,\n pin_memory=True,\n **kwargs):\n \"\"\"Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader for all GPUs.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n batch size of each GPU.\n workers_per_gpu (int): How many subprocesses to use for data loading\n for each GPU.\n num_gpus (int): Number of GPUs. Only used in non-distributed training.\n dist (bool): Distributed training/test or not. Default: True.\n shuffle (bool): Whether to shuffle the data at every epoch.\n Default: True.\n drop_last (bool): Whether to drop the last incomplete batch in epoch.\n Default: True\n pin_memory (bool): Whether to use pin_memory in DataLoader.\n Default: True\n kwargs: any keyword argument to be used to initialize DataLoader\n\n Returns:\n DataLoader: A PyTorch dataloader.\n \"\"\"\n rank, world_size = get_dist_info()\n if dist:\n sampler = DistributedSampler(\n dataset, world_size, rank, shuffle=shuffle, seed=seed)\n shuffle = False\n batch_size = samples_per_gpu\n num_workers = workers_per_gpu\n else:\n sampler = None\n batch_size = num_gpus * samples_per_gpu\n num_workers = num_gpus * workers_per_gpu\n\n init_fn = partial(\n worker_init_fn, num_workers=num_workers, rank=rank,\n seed=seed) if seed is not None else None\n\n _, DataLoader = _get_dataloader()\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n sampler=sampler,\n num_workers=num_workers,\n collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n pin_memory=pin_memory,\n shuffle=shuffle,\n worker_init_fn=init_fn,\n drop_last=drop_last,\n **kwargs)\n\n return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Init the random seed for various workers.\"\"\"\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n", "path": "mmpose/datasets/builder.py"}]} | 1,715 | 426 |
gh_patches_debug_33771 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4750 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_300 fails if there are more rules than one
**Describe the issue**
CKV_AWS_300 fails if there's more rules than one defined in `aws_s3_bucket_lifecycle_configuration`
**Examples**
```
resource "aws_s3_bucket_lifecycle_configuration" "bucket" {
bucket = aws_s3_bucket.bucket.bucket
rule {
id = "id-1"
status = "Enabled"
abort_incomplete_multipart_upload {
days_after_initiation = 1
}
}
rule {
id = "id-2"
status = "Enabled"
noncurrent_version_expiration {
noncurrent_days = 1
}
}
}
```
**Version (please complete the following information):**
- Checkov Version 2.3.111
Fails with:
```
Check: CKV_AWS_300: "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
FAILED for resource: aws_s3_bucket_lifecycle_configuration.bucket
File: s3.tf:1-1
122 | resource "aws_s3_bucket_lifecycle_configuration" "bucket" {
123 | bucket = aws_s3_bucket.bucket.bucket
124 |
125 | rule {
126 | id = "id-1"
127 |
128 | abort_incomplete_multipart_upload {
129 | days_after_initiation = 1
130 | }
131 |
132 | status = "Enabled"
133 | }
134 |
135 | rule {
136 | id = "id-2"
137 | status = "Enabled"
138 |
139 | noncurrent_version_expiration {
140 | noncurrent_days = 1
141 | }
142 | }
143 | }
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py`
Content:
```
1 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
2 from checkov.common.models.enums import CheckCategories, CheckResult
3
4
5 class S3AbortIncompleteUploads(BaseResourceCheck):
6 def __init__(self):
7 """
8 If you don't set this value in a lifecycle configuration you'll end up paying for s3
9 resources you never could use
10 """
11 name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
12 id = "CKV_AWS_300"
13 supported_resources = ('aws_s3_bucket_lifecycle_configuration',)
14 categories = (CheckCategories.GENERAL_SECURITY,)
15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
16
17 def scan_resource_conf(self, conf):
18 self.evaluated_keys = ["rule"]
19 rules = conf.get("rule")
20 if rules and isinstance(rules, list):
21 for idx_rule, rule in enumerate(rules):
22 if not rule.get("abort_incomplete_multipart_upload"):
23 self.evaluated_keys = [f"rule/[{idx_rule}]/"]
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26 return CheckResult.FAILED
27
28
29 check = S3AbortIncompleteUploads()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
@@ -1,28 +1,36 @@
+from __future__ import annotations
+
+from typing import Any
+
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class S3AbortIncompleteUploads(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
"""
If you don't set this value in a lifecycle configuration you'll end up paying for s3
resources you never could use
"""
name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
id = "CKV_AWS_300"
- supported_resources = ('aws_s3_bucket_lifecycle_configuration',)
+ supported_resources = ("aws_s3_bucket_lifecycle_configuration",)
categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
self.evaluated_keys = ["rule"]
rules = conf.get("rule")
if rules and isinstance(rules, list):
for idx_rule, rule in enumerate(rules):
- if not rule.get("abort_incomplete_multipart_upload"):
- self.evaluated_keys = [f"rule/[{idx_rule}]/"]
- return CheckResult.FAILED
- return CheckResult.PASSED
+ if (
+ rule.get("abort_incomplete_multipart_upload")
+ and rule.get("status") == ["Enabled"]
+ and not rule.get("filter")
+ ):
+ self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"]
+ return CheckResult.PASSED
+
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n@@ -1,28 +1,36 @@\n+from __future__ import annotations\n+\n+from typing import Any\n+\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n from checkov.common.models.enums import CheckCategories, CheckResult\n \n \n class S3AbortIncompleteUploads(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n- supported_resources = ('aws_s3_bucket_lifecycle_configuration',)\n+ supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n- if not rule.get(\"abort_incomplete_multipart_upload\"):\n- self.evaluated_keys = [f\"rule/[{idx_rule}]/\"]\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n+ if (\n+ rule.get(\"abort_incomplete_multipart_upload\")\n+ and rule.get(\"status\") == [\"Enabled\"]\n+ and not rule.get(\"filter\")\n+ ):\n+ self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n+ return CheckResult.PASSED\n+\n return CheckResult.FAILED\n", "issue": "CKV_AWS_300 fails if there are more rules than one\n**Describe the issue**\r\nCKV_AWS_300 fails if there's more rules than one defined in `aws_s3_bucket_lifecycle_configuration`\r\n\r\n**Examples**\r\n```\r\nresource \"aws_s3_bucket_lifecycle_configuration\" \"bucket\" {\r\n bucket = aws_s3_bucket.bucket.bucket\r\n\r\n rule {\r\n id = \"id-1\"\r\n status = \"Enabled\"\r\n\r\n abort_incomplete_multipart_upload {\r\n days_after_initiation = 1\r\n }\r\n }\r\n\r\n rule {\r\n id = \"id-2\"\r\n status = \"Enabled\"\r\n\r\n noncurrent_version_expiration {\r\n noncurrent_days = 1\r\n }\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.111\r\n\r\nFails with:\r\n\r\n```\r\nCheck: CKV_AWS_300: \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\r\n\tFAILED for resource: aws_s3_bucket_lifecycle_configuration.bucket\r\n\tFile: s3.tf:1-1\r\n\r\n\t\t122 | resource \"aws_s3_bucket_lifecycle_configuration\" \"bucket\" {\r\n\t\t123 | bucket = aws_s3_bucket.bucket.bucket\r\n\t\t124 | \r\n\t\t125 | rule {\r\n\t\t126 | id = \"id-1\"\r\n\t\t127 | \r\n\t\t128 | abort_incomplete_multipart_upload {\r\n\t\t129 | days_after_initiation = 1\r\n\t\t130 | }\r\n\t\t131 | \r\n\t\t132 | status = \"Enabled\"\r\n\t\t133 | }\r\n\t\t134 | \r\n\t\t135 | rule {\r\n\t\t136 | id = \"id-2\"\r\n\t\t137 | status = \"Enabled\"\r\n\t\t138 | \r\n\t\t139 | noncurrent_version_expiration {\r\n\t\t140 | noncurrent_days = 1\r\n\t\t141 | }\r\n\t\t142 | }\r\n\t\t143 | }\r\n```\n", "before_files": [{"content": "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self):\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = ('aws_s3_bucket_lifecycle_configuration',)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if not rule.get(\"abort_incomplete_multipart_upload\"):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/\"]\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if (\n rule.get(\"abort_incomplete_multipart_upload\")\n and rule.get(\"status\") == [\"Enabled\"]\n and not rule.get(\"filter\")\n ):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}]} | 1,061 | 482 |
gh_patches_debug_22629 | rasdani/github-patches | git_diff | yt-project__yt-3613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: spurious log warning when saving a plot to png format
### Bug report
**Bug summary**
**Code for reproduction**
```python
import yt
yt.funcs.mylog.setLevel("warning")
ds = yt.load_sample("IsolatedGalaxy")
p = yt.SlicePlot(ds, "z", "density")
p.save("/tmp/test.png")
```
**Actual outcome**
```
yt : [WARNING ] 2021-10-20 11:50:44,393 Received two valid image formats '.png' (from `filename`) and 'png' (from `suffix`). The former is ignored.
```
**Expected outcome**
No log warning
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/visualization/_commons.py`
Content:
```
1 import os
2 import sys
3 from typing import Optional, Type
4
5 import matplotlib
6 from packaging.version import Version
7
8 from yt.utilities.logger import ytLogger as mylog
9
10 from ._mpl_imports import (
11 FigureCanvasAgg,
12 FigureCanvasBase,
13 FigureCanvasPdf,
14 FigureCanvasPS,
15 FigureCanvasSVG,
16 )
17
18 MPL_VERSION = Version(matplotlib.__version__)
19
20 DEFAULT_FONT_PROPERTIES = {
21 "family": "stixgeneral",
22 "size": 18,
23 }
24
25 if MPL_VERSION >= Version("3.4"):
26 DEFAULT_FONT_PROPERTIES["math_fontfamily"] = "cm"
27
28 SUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())
29 SUPPORTED_CANVAS_CLASSES = frozenset(
30 (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)
31 )
32
33
34 def get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:
35 s = normalize_extension_string(suffix)
36 if s not in SUPPORTED_FORMATS:
37 raise ValueError(f"Unsupported file format '{suffix}'.")
38 for cls in SUPPORTED_CANVAS_CLASSES:
39 if s in cls.get_supported_filetypes():
40 return cls
41 raise RuntimeError(
42 "Something went terribly wrong. "
43 f"File extension '{suffix}' is supposed to be supported "
44 "but no compatible backend was found."
45 )
46
47
48 def normalize_extension_string(s: str) -> str:
49 if sys.version_info < (3, 9):
50 if s.startswith("."):
51 return s[1:]
52 return s
53 else:
54 return s.removeprefix(".")
55
56
57 def validate_image_name(filename, suffix: Optional[str] = None) -> str:
58 """
59 Build a valid image filename with a specified extension (default to png).
60 The suffix parameter is ignored if the input filename has a valid extension already.
61 Otherwise, suffix is appended to the filename, replacing any existing extension.
62 """
63 name, psuffix = os.path.splitext(filename)
64 if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:
65 if suffix is not None:
66 suffix = normalize_extension_string(suffix)
67 if suffix in SUPPORTED_FORMATS and suffix != psuffix:
68 mylog.warning(
69 "Received two valid image formats '%s' (from `filename`) "
70 "and '%s' (from `suffix`). The former is ignored.",
71 psuffix,
72 suffix,
73 )
74 return f"{name}.{suffix}"
75 return str(filename)
76
77 if suffix is None:
78 suffix = ".png"
79
80 suffix = normalize_extension_string(suffix)
81
82 if suffix not in SUPPORTED_FORMATS:
83 raise ValueError(f"Unsupported file format '{suffix}'.")
84
85 return f"{filename}.{suffix}"
86
87
88 def get_canvas(figure, filename):
89
90 name, suffix = os.path.splitext(filename)
91
92 if not suffix:
93 raise ValueError(
94 f"Can not determine canvas class from filename '{filename}' "
95 f"without an extension."
96 )
97 return get_canvas_class(suffix)(figure)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py
--- a/yt/visualization/_commons.py
+++ b/yt/visualization/_commons.py
@@ -61,9 +61,12 @@
Otherwise, suffix is appended to the filename, replacing any existing extension.
"""
name, psuffix = os.path.splitext(filename)
- if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:
- if suffix is not None:
- suffix = normalize_extension_string(suffix)
+ psuffix = normalize_extension_string(psuffix)
+
+ if suffix is not None:
+ suffix = normalize_extension_string(suffix)
+
+ if psuffix in SUPPORTED_FORMATS:
if suffix in SUPPORTED_FORMATS and suffix != psuffix:
mylog.warning(
"Received two valid image formats '%s' (from `filename`) "
@@ -75,9 +78,7 @@
return str(filename)
if suffix is None:
- suffix = ".png"
-
- suffix = normalize_extension_string(suffix)
+ suffix = "png"
if suffix not in SUPPORTED_FORMATS:
raise ValueError(f"Unsupported file format '{suffix}'.")
| {"golden_diff": "diff --git a/yt/visualization/_commons.py b/yt/visualization/_commons.py\n--- a/yt/visualization/_commons.py\n+++ b/yt/visualization/_commons.py\n@@ -61,9 +61,12 @@\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n- if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:\n- if suffix is not None:\n- suffix = normalize_extension_string(suffix)\n+ psuffix = normalize_extension_string(psuffix)\n+\n+ if suffix is not None:\n+ suffix = normalize_extension_string(suffix)\n+\n+ if psuffix in SUPPORTED_FORMATS:\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n@@ -75,9 +78,7 @@\n return str(filename)\n \n if suffix is None:\n- suffix = \".png\"\n-\n- suffix = normalize_extension_string(suffix)\n+ suffix = \"png\"\n \n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n", "issue": "BUG: spurious log warning when saving a plot to png format\n### Bug report\r\n\r\n**Bug summary**\r\n\r\n**Code for reproduction**\r\n\r\n```python\r\nimport yt\r\n\r\nyt.funcs.mylog.setLevel(\"warning\")\r\n\r\nds = yt.load_sample(\"IsolatedGalaxy\")\r\np = yt.SlicePlot(ds, \"z\", \"density\")\r\np.save(\"/tmp/test.png\")\r\n```\r\n\r\n\r\n**Actual outcome**\r\n\r\n```\r\nyt : [WARNING ] 2021-10-20 11:50:44,393 Received two valid image formats '.png' (from `filename`) and 'png' (from `suffix`). The former is ignored.\r\n```\r\n\r\n**Expected outcome**\r\n\r\nNo log warning\n", "before_files": [{"content": "import os\nimport sys\nfrom typing import Optional, Type\n\nimport matplotlib\nfrom packaging.version import Version\n\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom ._mpl_imports import (\n FigureCanvasAgg,\n FigureCanvasBase,\n FigureCanvasPdf,\n FigureCanvasPS,\n FigureCanvasSVG,\n)\n\nMPL_VERSION = Version(matplotlib.__version__)\n\nDEFAULT_FONT_PROPERTIES = {\n \"family\": \"stixgeneral\",\n \"size\": 18,\n}\n\nif MPL_VERSION >= Version(\"3.4\"):\n DEFAULT_FONT_PROPERTIES[\"math_fontfamily\"] = \"cm\"\n\nSUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())\nSUPPORTED_CANVAS_CLASSES = frozenset(\n (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)\n)\n\n\ndef get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:\n s = normalize_extension_string(suffix)\n if s not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n for cls in SUPPORTED_CANVAS_CLASSES:\n if s in cls.get_supported_filetypes():\n return cls\n raise RuntimeError(\n \"Something went terribly wrong. \"\n f\"File extension '{suffix}' is supposed to be supported \"\n \"but no compatible backend was found.\"\n )\n\n\ndef normalize_extension_string(s: str) -> str:\n if sys.version_info < (3, 9):\n if s.startswith(\".\"):\n return s[1:]\n return s\n else:\n return s.removeprefix(\".\")\n\n\ndef validate_image_name(filename, suffix: Optional[str] = None) -> str:\n \"\"\"\n Build a valid image filename with a specified extension (default to png).\n The suffix parameter is ignored if the input filename has a valid extension already.\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n if normalize_extension_string(psuffix) in SUPPORTED_FORMATS:\n if suffix is not None:\n suffix = normalize_extension_string(suffix)\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n \"and '%s' (from `suffix`). The former is ignored.\",\n psuffix,\n suffix,\n )\n return f\"{name}.{suffix}\"\n return str(filename)\n\n if suffix is None:\n suffix = \".png\"\n\n suffix = normalize_extension_string(suffix)\n\n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n\n return f\"{filename}.{suffix}\"\n\n\ndef get_canvas(figure, filename):\n\n name, suffix = os.path.splitext(filename)\n\n if not suffix:\n raise ValueError(\n f\"Can not determine canvas class from filename '{filename}' \"\n f\"without an extension.\"\n )\n return get_canvas_class(suffix)(figure)\n", "path": "yt/visualization/_commons.py"}], "after_files": [{"content": "import os\nimport sys\nfrom typing import Optional, Type\n\nimport matplotlib\nfrom packaging.version import Version\n\nfrom yt.utilities.logger import ytLogger as mylog\n\nfrom ._mpl_imports import (\n FigureCanvasAgg,\n FigureCanvasBase,\n FigureCanvasPdf,\n FigureCanvasPS,\n FigureCanvasSVG,\n)\n\nMPL_VERSION = Version(matplotlib.__version__)\n\nDEFAULT_FONT_PROPERTIES = {\n \"family\": \"stixgeneral\",\n \"size\": 18,\n}\n\nif MPL_VERSION >= Version(\"3.4\"):\n DEFAULT_FONT_PROPERTIES[\"math_fontfamily\"] = \"cm\"\n\nSUPPORTED_FORMATS = frozenset(FigureCanvasBase.get_supported_filetypes().keys())\nSUPPORTED_CANVAS_CLASSES = frozenset(\n (FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS, FigureCanvasSVG)\n)\n\n\ndef get_canvas_class(suffix: str) -> Type[FigureCanvasBase]:\n s = normalize_extension_string(suffix)\n if s not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n for cls in SUPPORTED_CANVAS_CLASSES:\n if s in cls.get_supported_filetypes():\n return cls\n raise RuntimeError(\n \"Something went terribly wrong. \"\n f\"File extension '{suffix}' is supposed to be supported \"\n \"but no compatible backend was found.\"\n )\n\n\ndef normalize_extension_string(s: str) -> str:\n if sys.version_info < (3, 9):\n if s.startswith(\".\"):\n return s[1:]\n return s\n else:\n return s.removeprefix(\".\")\n\n\ndef validate_image_name(filename, suffix: Optional[str] = None) -> str:\n \"\"\"\n Build a valid image filename with a specified extension (default to png).\n The suffix parameter is ignored if the input filename has a valid extension already.\n Otherwise, suffix is appended to the filename, replacing any existing extension.\n \"\"\"\n name, psuffix = os.path.splitext(filename)\n psuffix = normalize_extension_string(psuffix)\n\n if suffix is not None:\n suffix = normalize_extension_string(suffix)\n\n if psuffix in SUPPORTED_FORMATS:\n if suffix in SUPPORTED_FORMATS and suffix != psuffix:\n mylog.warning(\n \"Received two valid image formats '%s' (from `filename`) \"\n \"and '%s' (from `suffix`). The former is ignored.\",\n psuffix,\n suffix,\n )\n return f\"{name}.{suffix}\"\n return str(filename)\n\n if suffix is None:\n suffix = \"png\"\n\n if suffix not in SUPPORTED_FORMATS:\n raise ValueError(f\"Unsupported file format '{suffix}'.\")\n\n return f\"{filename}.{suffix}\"\n\n\ndef get_canvas(figure, filename):\n\n name, suffix = os.path.splitext(filename)\n\n if not suffix:\n raise ValueError(\n f\"Can not determine canvas class from filename '{filename}' \"\n f\"without an extension.\"\n )\n return get_canvas_class(suffix)(figure)\n", "path": "yt/visualization/_commons.py"}]} | 1,235 | 260 |
gh_patches_debug_5604 | rasdani/github-patches | git_diff | bokeh__bokeh-9682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOCUMENTATION] typo in texas.py
Superfluous parenthesis on [line 34 of texas.py](https://github.com/bokeh/bokeh/blob/aa60b9c9d554fbd349a21da37d616c5b8eda8c09/examples/plotting/file/texas.py#L34). Shows up in the hover tool tip.
Not sure whether you want a PR for something so small? So far I haven't found any other corrections that could be added in.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting/file/texas.py`
Content:
```
1 from bokeh.io import show
2 from bokeh.models import LogColorMapper
3 from bokeh.palettes import Viridis6 as palette
4 from bokeh.plotting import figure
5 from bokeh.sampledata.unemployment import data as unemployment
6 from bokeh.sampledata.us_counties import data as counties
7
8 palette = tuple(reversed(palette))
9
10 counties = {
11 code: county for code, county in counties.items() if county["state"] == "tx"
12 }
13
14 county_xs = [county["lons"] for county in counties.values()]
15 county_ys = [county["lats"] for county in counties.values()]
16
17 county_names = [county['name'] for county in counties.values()]
18 county_rates = [unemployment[county_id] for county_id in counties]
19 color_mapper = LogColorMapper(palette=palette)
20
21 data=dict(
22 x=county_xs,
23 y=county_ys,
24 name=county_names,
25 rate=county_rates,
26 )
27
28 TOOLS = "pan,wheel_zoom,reset,hover,save"
29
30 p = figure(
31 title="Texas Unemployment, 2009", tools=TOOLS,
32 x_axis_location=None, y_axis_location=None,
33 tooltips=[
34 ("Name", "@name"), ("Unemployment rate)", "@rate%"), ("(Long, Lat)", "($x, $y)")
35 ])
36 p.grid.grid_line_color = None
37 p.hover.point_policy = "follow_mouse"
38
39 p.patches('x', 'y', source=data,
40 fill_color={'field': 'rate', 'transform': color_mapper},
41 fill_alpha=0.7, line_color="white", line_width=0.5)
42
43 show(p)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/plotting/file/texas.py b/examples/plotting/file/texas.py
--- a/examples/plotting/file/texas.py
+++ b/examples/plotting/file/texas.py
@@ -31,7 +31,7 @@
title="Texas Unemployment, 2009", tools=TOOLS,
x_axis_location=None, y_axis_location=None,
tooltips=[
- ("Name", "@name"), ("Unemployment rate)", "@rate%"), ("(Long, Lat)", "($x, $y)")
+ ("Name", "@name"), ("Unemployment rate", "@rate%"), ("(Long, Lat)", "($x, $y)")
])
p.grid.grid_line_color = None
p.hover.point_policy = "follow_mouse"
| {"golden_diff": "diff --git a/examples/plotting/file/texas.py b/examples/plotting/file/texas.py\n--- a/examples/plotting/file/texas.py\n+++ b/examples/plotting/file/texas.py\n@@ -31,7 +31,7 @@\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n- (\"Name\", \"@name\"), (\"Unemployment rate)\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n+ (\"Name\", \"@name\"), (\"Unemployment rate\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\n p.grid.grid_line_color = None\n p.hover.point_policy = \"follow_mouse\"\n", "issue": "[DOCUMENTATION] typo in texas.py\nSuperfluous parenthesis on [line 34 of texas.py](https://github.com/bokeh/bokeh/blob/aa60b9c9d554fbd349a21da37d616c5b8eda8c09/examples/plotting/file/texas.py#L34). Shows up in the hover tool tip.\r\n\r\nNot sure whether you want a PR for something so small? So far I haven't found any other corrections that could be added in.\n", "before_files": [{"content": "from bokeh.io import show\nfrom bokeh.models import LogColorMapper\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.unemployment import data as unemployment\nfrom bokeh.sampledata.us_counties import data as counties\n\npalette = tuple(reversed(palette))\n\ncounties = {\n code: county for code, county in counties.items() if county[\"state\"] == \"tx\"\n}\n\ncounty_xs = [county[\"lons\"] for county in counties.values()]\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n\ncounty_names = [county['name'] for county in counties.values()]\ncounty_rates = [unemployment[county_id] for county_id in counties]\ncolor_mapper = LogColorMapper(palette=palette)\n\ndata=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n rate=county_rates,\n)\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@name\"), (\"Unemployment rate)\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\np.grid.grid_line_color = None\np.hover.point_policy = \"follow_mouse\"\n\np.patches('x', 'y', source=data,\n fill_color={'field': 'rate', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\nshow(p)\n", "path": "examples/plotting/file/texas.py"}], "after_files": [{"content": "from bokeh.io import show\nfrom bokeh.models import LogColorMapper\nfrom bokeh.palettes import Viridis6 as palette\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.unemployment import data as unemployment\nfrom bokeh.sampledata.us_counties import data as counties\n\npalette = tuple(reversed(palette))\n\ncounties = {\n code: county for code, county in counties.items() if county[\"state\"] == \"tx\"\n}\n\ncounty_xs = [county[\"lons\"] for county in counties.values()]\ncounty_ys = [county[\"lats\"] for county in counties.values()]\n\ncounty_names = [county['name'] for county in counties.values()]\ncounty_rates = [unemployment[county_id] for county_id in counties]\ncolor_mapper = LogColorMapper(palette=palette)\n\ndata=dict(\n x=county_xs,\n y=county_ys,\n name=county_names,\n rate=county_rates,\n)\n\nTOOLS = \"pan,wheel_zoom,reset,hover,save\"\n\np = figure(\n title=\"Texas Unemployment, 2009\", tools=TOOLS,\n x_axis_location=None, y_axis_location=None,\n tooltips=[\n (\"Name\", \"@name\"), (\"Unemployment rate\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")\n ])\np.grid.grid_line_color = None\np.hover.point_policy = \"follow_mouse\"\n\np.patches('x', 'y', source=data,\n fill_color={'field': 'rate', 'transform': color_mapper},\n fill_alpha=0.7, line_color=\"white\", line_width=0.5)\n\nshow(p)\n", "path": "examples/plotting/file/texas.py"}]} | 811 | 166 |
gh_patches_debug_50247 | rasdani/github-patches | git_diff | sopel-irc__sopel-2154 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `.clearpronouns` command
### The problem
Users might set their pronouns by mistake or just to test the functionality and then they are stuck.
### The solution
Add an "unsetpronouns" that deletes pronoun information for the nick.
Something like this might work.
```python
@plugin.command('unsetpronouns')
def unset_pronouns(bot, trigger):
bot.db.delete_nick_value(trigger.nick, 'pronouns')
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/pronouns.py`
Content:
```
1 """
2 pronouns.py - Sopel Pronouns Plugin
3 Copyright © 2016, Elsie Powell
4 Licensed under the Eiffel Forum License 2.
5
6 https://sopel.chat
7 """
8 from __future__ import generator_stop
9
10 from sopel import plugin
11
12
13 # Copied from pronoun.is, leaving a *lot* out. If
14 # https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that
15 # would be a lot easier.
16 # If ambiguous, the earlier one will be used.
17 KNOWN_SETS = {
18 "ze/hir": "ze/hir/hir/hirs/hirself",
19 "ze/zir": "ze/zir/zir/zirs/zirself",
20 "they/.../themselves": "they/them/their/theirs/themselves",
21 "they/.../themself": "they/them/their/theirs/themself",
22 "she/her": "she/her/her/hers/herself",
23 "he/him": "he/him/his/his/himself",
24 "xey/xem": "xey/xem/xyr/xyrs/xemself",
25 "sie/hir": "sie/hir/hir/hirs/hirself",
26 "it/it": "it/it/its/its/itself",
27 "ey/em": "ey/em/eir/eirs/eirself",
28 }
29
30
31 @plugin.command('pronouns')
32 @plugin.example('.pronouns Embolalia')
33 def pronouns(bot, trigger):
34 """Show the pronouns for a given user, defaulting to the current user if left blank."""
35 if not trigger.group(3):
36 pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')
37 if pronouns:
38 say_pronouns(bot, trigger.nick, pronouns)
39 else:
40 bot.reply("I don't know your pronouns! You can set them with "
41 "{}setpronouns".format(bot.config.core.help_prefix))
42 else:
43 pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')
44 if pronouns:
45 say_pronouns(bot, trigger.group(3), pronouns)
46 elif trigger.group(3) == bot.nick:
47 # You can stuff an entry into the database manually for your bot's
48 # gender, but like… it's a bot.
49 bot.say(
50 "I am a bot. Beep boop. My pronouns are it/it/its/its/itself. "
51 "See https://pronoun.is/it for examples."
52 )
53 else:
54 bot.reply("I don't know {}'s pronouns. They can set them with "
55 "{}setpronouns".format(trigger.group(3),
56 bot.config.core.help_prefix))
57
58
59 def say_pronouns(bot, nick, pronouns):
60 for short, set_ in KNOWN_SETS.items():
61 if pronouns == set_:
62 break
63 short = pronouns
64
65 bot.say("{}'s pronouns are {}. See https://pronoun.is/{} for "
66 "examples.".format(nick, pronouns, short))
67
68
69 @plugin.command('setpronouns')
70 @plugin.example('.setpronouns fae/faer/faer/faers/faerself')
71 @plugin.example('.setpronouns they/them/theirs')
72 @plugin.example('.setpronouns they/them')
73 def set_pronouns(bot, trigger):
74 """Set your pronouns."""
75 pronouns = trigger.group(2)
76 if not pronouns:
77 bot.reply('What pronouns do you use?')
78 return
79
80 disambig = ''
81 requested_pronoun_split = pronouns.split("/")
82 if len(requested_pronoun_split) < 5:
83 matching = []
84 for known_pronoun_set in KNOWN_SETS.values():
85 known_pronoun_split = known_pronoun_set.split("/")
86 if known_pronoun_set.startswith(pronouns + "/") or (
87 len(requested_pronoun_split) == 3
88 and (
89 (
90 # "they/.../themself"
91 requested_pronoun_split[1] == "..."
92 and requested_pronoun_split[0] == known_pronoun_split[0]
93 and requested_pronoun_split[2] == known_pronoun_split[4]
94 )
95 or (
96 # "they/them/theirs"
97 requested_pronoun_split[0:2] == known_pronoun_split[0:2]
98 and requested_pronoun_split[2] == known_pronoun_split[3]
99 )
100 )
101 ):
102 matching.append(known_pronoun_set)
103
104 if len(matching) == 0:
105 bot.reply(
106 "I'm sorry, I don't know those pronouns. "
107 "You can give me a set I don't know by formatting it "
108 "subject/object/possessive-determiner/possessive-pronoun/"
109 "reflexive, as in: they/them/their/theirs/themselves"
110 )
111 return
112
113 pronouns = matching[0]
114 if len(matching) > 1:
115 disambig = " Or, if you meant one of these, please tell me: {}".format(
116 ", ".join(matching[1:])
117 )
118
119 bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)
120 bot.reply(
121 "Thanks for telling me! I'll remember you use {}.{}".format(pronouns, disambig)
122 )
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py
--- a/sopel/modules/pronouns.py
+++ b/sopel/modules/pronouns.py
@@ -120,3 +120,10 @@
bot.reply(
"Thanks for telling me! I'll remember you use {}.{}".format(pronouns, disambig)
)
+
+
[email protected]('clearpronouns')
+def unset_pronouns(bot, trigger):
+ """Clear pronouns for the given user."""
+ bot.db.delete_nick_value(trigger.nick, 'pronouns')
+ bot.reply("Okay, I'll forget your pronouns.")
| {"golden_diff": "diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py\n--- a/sopel/modules/pronouns.py\n+++ b/sopel/modules/pronouns.py\n@@ -120,3 +120,10 @@\n bot.reply(\n \"Thanks for telling me! I'll remember you use {}.{}\".format(pronouns, disambig)\n )\n+\n+\[email protected]('clearpronouns')\n+def unset_pronouns(bot, trigger):\n+ \"\"\"Clear pronouns for the given user.\"\"\"\n+ bot.db.delete_nick_value(trigger.nick, 'pronouns')\n+ bot.reply(\"Okay, I'll forget your pronouns.\")\n", "issue": "Add `.clearpronouns` command\n### The problem\r\nUsers might set their pronouns by mistake or just to test the functionality and then they are stuck.\r\n\r\n### The solution\r\n\r\nAdd an \"unsetpronouns\" that deletes pronoun information for the nick. \r\nSomething like this might work. \r\n\r\n```python\r\[email protected]('unsetpronouns')\r\ndef unset_pronouns(bot, trigger):\r\n bot.db.delete_nick_value(trigger.nick, 'pronouns')\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\n# If ambiguous, the earlier one will be used.\nKNOWN_SETS = {\n \"ze/hir\": \"ze/hir/hir/hirs/hirself\",\n \"ze/zir\": \"ze/zir/zir/zirs/zirself\",\n \"they/.../themselves\": \"they/them/their/theirs/themselves\",\n \"they/.../themself\": \"they/them/their/theirs/themself\",\n \"she/her\": \"she/her/her/hers/herself\",\n \"he/him\": \"he/him/his/his/himself\",\n \"xey/xem\": \"xey/xem/xyr/xyrs/xemself\",\n \"sie/hir\": \"sie/hir/hir/hirs/hirself\",\n \"it/it\": \"it/it/its/its/itself\",\n \"ey/em\": \"ey/em/eir/eirs/eirself\",\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns fae/faer/faer/faers/faerself')\[email protected]('.setpronouns they/them/theirs')\[email protected]('.setpronouns they/them')\ndef set_pronouns(bot, trigger):\n \"\"\"Set your pronouns.\"\"\"\n pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n requested_pronoun_split = pronouns.split(\"/\")\n if len(requested_pronoun_split) < 5:\n matching = []\n for known_pronoun_set in KNOWN_SETS.values():\n known_pronoun_split = known_pronoun_set.split(\"/\")\n if known_pronoun_set.startswith(pronouns + \"/\") or (\n len(requested_pronoun_split) == 3\n and (\n (\n # \"they/.../themself\"\n requested_pronoun_split[1] == \"...\"\n and requested_pronoun_split[0] == known_pronoun_split[0]\n and requested_pronoun_split[2] == known_pronoun_split[4]\n )\n or (\n # \"they/them/theirs\"\n requested_pronoun_split[0:2] == known_pronoun_split[0:2]\n and requested_pronoun_split[2] == known_pronoun_split[3]\n )\n )\n ):\n matching.append(known_pronoun_set)\n\n if len(matching) == 0:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n\n pronouns = matching[0]\n if len(matching) > 1:\n disambig = \" Or, if you meant one of these, please tell me: {}\".format(\n \", \".join(matching[1:])\n )\n\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\n \"Thanks for telling me! I'll remember you use {}.{}\".format(pronouns, disambig)\n )\n", "path": "sopel/modules/pronouns.py"}], "after_files": [{"content": "\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import generator_stop\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\n# If ambiguous, the earlier one will be used.\nKNOWN_SETS = {\n \"ze/hir\": \"ze/hir/hir/hirs/hirself\",\n \"ze/zir\": \"ze/zir/zir/zirs/zirself\",\n \"they/.../themselves\": \"they/them/their/theirs/themselves\",\n \"they/.../themself\": \"they/them/their/theirs/themself\",\n \"she/her\": \"she/her/her/hers/herself\",\n \"he/him\": \"he/him/his/his/himself\",\n \"xey/xem\": \"xey/xem/xyr/xyrs/xemself\",\n \"sie/hir\": \"sie/hir/hir/hirs/hirself\",\n \"it/it\": \"it/it/its/its/itself\",\n \"ey/em\": \"ey/em/eir/eirs/eirself\",\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns fae/faer/faer/faers/faerself')\[email protected]('.setpronouns they/them/theirs')\[email protected]('.setpronouns they/them')\ndef set_pronouns(bot, trigger):\n \"\"\"Set your pronouns.\"\"\"\n pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n requested_pronoun_split = pronouns.split(\"/\")\n if len(requested_pronoun_split) < 5:\n matching = []\n for known_pronoun_set in KNOWN_SETS.values():\n known_pronoun_split = known_pronoun_set.split(\"/\")\n if known_pronoun_set.startswith(pronouns + \"/\") or (\n len(requested_pronoun_split) == 3\n and (\n (\n # \"they/.../themself\"\n requested_pronoun_split[1] == \"...\"\n and requested_pronoun_split[0] == known_pronoun_split[0]\n and requested_pronoun_split[2] == known_pronoun_split[4]\n )\n or (\n # \"they/them/theirs\"\n requested_pronoun_split[0:2] == known_pronoun_split[0:2]\n and requested_pronoun_split[2] == known_pronoun_split[3]\n )\n )\n ):\n matching.append(known_pronoun_set)\n\n if len(matching) == 0:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n\n pronouns = matching[0]\n if len(matching) > 1:\n disambig = \" Or, if you meant one of these, please tell me: {}\".format(\n \", \".join(matching[1:])\n )\n\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\n \"Thanks for telling me! I'll remember you use {}.{}\".format(pronouns, disambig)\n )\n\n\[email protected]('clearpronouns')\ndef unset_pronouns(bot, trigger):\n \"\"\"Clear pronouns for the given user.\"\"\"\n bot.db.delete_nick_value(trigger.nick, 'pronouns')\n bot.reply(\"Okay, I'll forget your pronouns.\")\n", "path": "sopel/modules/pronouns.py"}]} | 1,817 | 148 |
gh_patches_debug_5488 | rasdani/github-patches | git_diff | iterative__dvc-7908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
config: option for `--jobs` value
Several DVC commands have a `--jobs` option that has default values (e.g, https://dvc.org/doc/commands-reference/gc).
Afaik there is no way to change the default value. Having the option to change it through `dvc config` would be useful.
Can you consider adding it?
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/fs/__init__.py`
Content:
```
1 from urllib.parse import urlparse
2
3 # pylint: disable=unused-import
4 from dvc_objects.fs import utils # noqa: F401
5 from dvc_objects.fs import ( # noqa: F401
6 FS_MAP,
7 AzureFileSystem,
8 GDriveFileSystem,
9 GSFileSystem,
10 HDFSFileSystem,
11 HTTPFileSystem,
12 HTTPSFileSystem,
13 LocalFileSystem,
14 MemoryFileSystem,
15 OSSFileSystem,
16 S3FileSystem,
17 Schemes,
18 SSHFileSystem,
19 WebDAVFileSystem,
20 WebDAVSFileSystem,
21 WebHDFSFileSystem,
22 generic,
23 get_fs_cls,
24 system,
25 )
26 from dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401
27 from dvc_objects.fs.errors import ( # noqa: F401
28 AuthError,
29 ConfigError,
30 RemoteMissingDepsError,
31 )
32 from dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401
33 from dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401
34 from dvc_objects.fs.implementations.local import localfs # noqa: F401
35 from dvc_objects.fs.implementations.ssh import ( # noqa: F401
36 DEFAULT_PORT as DEFAULT_SSH_PORT,
37 )
38 from dvc_objects.fs.path import Path # noqa: F401
39
40 from .data import DataFileSystem # noqa: F401
41 from .dvc import DvcFileSystem # noqa: F401
42 from .git import GitFileSystem # noqa: F401
43
44 # pylint: enable=unused-import
45
46
47 def get_fs_config(repo, config, **kwargs):
48 name = kwargs.get("name")
49 if name:
50 try:
51 remote_conf = config["remote"][name.lower()]
52 except KeyError:
53 from dvc.config import RemoteNotFoundError
54
55 raise RemoteNotFoundError(f"remote '{name}' doesn't exist")
56 else:
57 remote_conf = kwargs
58 return _resolve_remote_refs(repo, config, remote_conf)
59
60
61 def _resolve_remote_refs(repo, config, remote_conf):
62 # Support for cross referenced remotes.
63 # This will merge the settings, shadowing base ref with remote_conf.
64 # For example, having:
65 #
66 # dvc remote add server ssh://localhost
67 # dvc remote modify server user root
68 # dvc remote modify server ask_password true
69 #
70 # dvc remote add images remote://server/tmp/pictures
71 # dvc remote modify images user alice
72 # dvc remote modify images ask_password false
73 # dvc remote modify images password asdf1234
74 #
75 # Results on a config dictionary like:
76 #
77 # {
78 # "url": "ssh://localhost/tmp/pictures",
79 # "user": "alice",
80 # "password": "asdf1234",
81 # "ask_password": False,
82 # }
83 parsed = urlparse(remote_conf["url"])
84 if parsed.scheme != "remote":
85 return remote_conf
86
87 base = get_fs_config(repo, config, name=parsed.netloc)
88 cls, _, _ = get_cloud_fs(repo, **base)
89 relpath = parsed.path.lstrip("/").replace("/", cls.sep)
90 url = cls.sep.join((base["url"], relpath))
91 return {**base, **remote_conf, "url": url}
92
93
94 def get_cloud_fs(repo, **kwargs):
95 from dvc.config import ConfigError as RepoConfigError
96 from dvc.config_schema import SCHEMA, Invalid
97
98 repo_config = repo.config if repo else {}
99 core_config = repo_config.get("core", {})
100
101 remote_conf = get_fs_config(repo, repo_config, **kwargs)
102 try:
103 remote_conf = SCHEMA["remote"][str](remote_conf)
104 except Invalid as exc:
105 raise RepoConfigError(str(exc)) from None
106
107 if "jobs" not in remote_conf:
108 jobs = core_config.get("jobs")
109 if jobs:
110 remote_conf["jobs"] = jobs
111
112 if "checksum_jobs" not in remote_conf:
113 checksum_jobs = core_config.get("checksum_jobs")
114 if checksum_jobs:
115 remote_conf["checksum_jobs"] = checksum_jobs
116
117 cls = get_fs_cls(remote_conf)
118
119 if cls == GDriveFileSystem and repo:
120 remote_conf["gdrive_credentials_tmp_dir"] = repo.tmp_dir
121
122 url = remote_conf.pop("url")
123 if issubclass(cls, WebDAVFileSystem):
124 # For WebDAVFileSystem, provided url is the base path itself, so it
125 # should be treated as being a root path.
126 fs_path = cls.root_marker
127 else:
128 fs_path = cls._strip_protocol(url) # pylint:disable=protected-access
129
130 extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access
131 conf = {**extras, **remote_conf} # remote config takes priority
132 return cls, conf, fs_path
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/fs/__init__.py b/dvc/fs/__init__.py
--- a/dvc/fs/__init__.py
+++ b/dvc/fs/__init__.py
@@ -104,11 +104,6 @@
except Invalid as exc:
raise RepoConfigError(str(exc)) from None
- if "jobs" not in remote_conf:
- jobs = core_config.get("jobs")
- if jobs:
- remote_conf["jobs"] = jobs
-
if "checksum_jobs" not in remote_conf:
checksum_jobs = core_config.get("checksum_jobs")
if checksum_jobs:
| {"golden_diff": "diff --git a/dvc/fs/__init__.py b/dvc/fs/__init__.py\n--- a/dvc/fs/__init__.py\n+++ b/dvc/fs/__init__.py\n@@ -104,11 +104,6 @@\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n \n- if \"jobs\" not in remote_conf:\n- jobs = core_config.get(\"jobs\")\n- if jobs:\n- remote_conf[\"jobs\"] = jobs\n-\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n", "issue": "config: option for `--jobs` value\nSeveral DVC commands have a `--jobs` option that has default values (e.g, https://dvc.org/doc/commands-reference/gc).\r\n\r\nAfaik there is no way to change the default value. Having the option to change it through `dvc config` would be useful.\r\n\r\nCan you consider adding it?\r\n\r\nThanks\n", "before_files": [{"content": "from urllib.parse import urlparse\n\n# pylint: disable=unused-import\nfrom dvc_objects.fs import utils # noqa: F401\nfrom dvc_objects.fs import ( # noqa: F401\n FS_MAP,\n AzureFileSystem,\n GDriveFileSystem,\n GSFileSystem,\n HDFSFileSystem,\n HTTPFileSystem,\n HTTPSFileSystem,\n LocalFileSystem,\n MemoryFileSystem,\n OSSFileSystem,\n S3FileSystem,\n Schemes,\n SSHFileSystem,\n WebDAVFileSystem,\n WebDAVSFileSystem,\n WebHDFSFileSystem,\n generic,\n get_fs_cls,\n system,\n)\nfrom dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401\nfrom dvc_objects.fs.errors import ( # noqa: F401\n AuthError,\n ConfigError,\n RemoteMissingDepsError,\n)\nfrom dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.local import localfs # noqa: F401\nfrom dvc_objects.fs.implementations.ssh import ( # noqa: F401\n DEFAULT_PORT as DEFAULT_SSH_PORT,\n)\nfrom dvc_objects.fs.path import Path # noqa: F401\n\nfrom .data import DataFileSystem # noqa: F401\nfrom .dvc import DvcFileSystem # noqa: F401\nfrom .git import GitFileSystem # noqa: F401\n\n# pylint: enable=unused-import\n\n\ndef get_fs_config(repo, config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n try:\n remote_conf = config[\"remote\"][name.lower()]\n except KeyError:\n from dvc.config import RemoteNotFoundError\n\n raise RemoteNotFoundError(f\"remote '{name}' doesn't exist\")\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, config, remote_conf)\n\n\ndef _resolve_remote_refs(repo, config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = get_fs_config(repo, config, name=parsed.netloc)\n cls, _, _ = get_cloud_fs(repo, **base)\n relpath = parsed.path.lstrip(\"/\").replace(\"/\", cls.sep)\n url = cls.sep.join((base[\"url\"], relpath))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_fs(repo, **kwargs):\n from dvc.config import ConfigError as RepoConfigError\n from dvc.config_schema import SCHEMA, Invalid\n\n repo_config = repo.config if repo else {}\n core_config = repo_config.get(\"core\", {})\n\n remote_conf = get_fs_config(repo, repo_config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n\n if \"jobs\" not in remote_conf:\n jobs = core_config.get(\"jobs\")\n if jobs:\n remote_conf[\"jobs\"] = jobs\n\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n remote_conf[\"checksum_jobs\"] = checksum_jobs\n\n cls = get_fs_cls(remote_conf)\n\n if cls == GDriveFileSystem and repo:\n remote_conf[\"gdrive_credentials_tmp_dir\"] = repo.tmp_dir\n\n url = remote_conf.pop(\"url\")\n if issubclass(cls, WebDAVFileSystem):\n # For WebDAVFileSystem, provided url is the base path itself, so it\n # should be treated as being a root path.\n fs_path = cls.root_marker\n else:\n fs_path = cls._strip_protocol(url) # pylint:disable=protected-access\n\n extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access\n conf = {**extras, **remote_conf} # remote config takes priority\n return cls, conf, fs_path\n", "path": "dvc/fs/__init__.py"}], "after_files": [{"content": "from urllib.parse import urlparse\n\n# pylint: disable=unused-import\nfrom dvc_objects.fs import utils # noqa: F401\nfrom dvc_objects.fs import ( # noqa: F401\n FS_MAP,\n AzureFileSystem,\n GDriveFileSystem,\n GSFileSystem,\n HDFSFileSystem,\n HTTPFileSystem,\n HTTPSFileSystem,\n LocalFileSystem,\n MemoryFileSystem,\n OSSFileSystem,\n S3FileSystem,\n Schemes,\n SSHFileSystem,\n WebDAVFileSystem,\n WebDAVSFileSystem,\n WebHDFSFileSystem,\n generic,\n get_fs_cls,\n system,\n)\nfrom dvc_objects.fs.base import AnyFSPath, FileSystem # noqa: F401\nfrom dvc_objects.fs.errors import ( # noqa: F401\n AuthError,\n ConfigError,\n RemoteMissingDepsError,\n)\nfrom dvc_objects.fs.implementations.azure import AzureAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.gdrive import GDriveAuthError # noqa: F401\nfrom dvc_objects.fs.implementations.local import localfs # noqa: F401\nfrom dvc_objects.fs.implementations.ssh import ( # noqa: F401\n DEFAULT_PORT as DEFAULT_SSH_PORT,\n)\nfrom dvc_objects.fs.path import Path # noqa: F401\n\nfrom .data import DataFileSystem # noqa: F401\nfrom .dvc import DvcFileSystem # noqa: F401\nfrom .git import GitFileSystem # noqa: F401\n\n# pylint: enable=unused-import\n\n\ndef get_fs_config(repo, config, **kwargs):\n name = kwargs.get(\"name\")\n if name:\n try:\n remote_conf = config[\"remote\"][name.lower()]\n except KeyError:\n from dvc.config import RemoteNotFoundError\n\n raise RemoteNotFoundError(f\"remote '{name}' doesn't exist\")\n else:\n remote_conf = kwargs\n return _resolve_remote_refs(repo, config, remote_conf)\n\n\ndef _resolve_remote_refs(repo, config, remote_conf):\n # Support for cross referenced remotes.\n # This will merge the settings, shadowing base ref with remote_conf.\n # For example, having:\n #\n # dvc remote add server ssh://localhost\n # dvc remote modify server user root\n # dvc remote modify server ask_password true\n #\n # dvc remote add images remote://server/tmp/pictures\n # dvc remote modify images user alice\n # dvc remote modify images ask_password false\n # dvc remote modify images password asdf1234\n #\n # Results on a config dictionary like:\n #\n # {\n # \"url\": \"ssh://localhost/tmp/pictures\",\n # \"user\": \"alice\",\n # \"password\": \"asdf1234\",\n # \"ask_password\": False,\n # }\n parsed = urlparse(remote_conf[\"url\"])\n if parsed.scheme != \"remote\":\n return remote_conf\n\n base = get_fs_config(repo, config, name=parsed.netloc)\n cls, _, _ = get_cloud_fs(repo, **base)\n relpath = parsed.path.lstrip(\"/\").replace(\"/\", cls.sep)\n url = cls.sep.join((base[\"url\"], relpath))\n return {**base, **remote_conf, \"url\": url}\n\n\ndef get_cloud_fs(repo, **kwargs):\n from dvc.config import ConfigError as RepoConfigError\n from dvc.config_schema import SCHEMA, Invalid\n\n repo_config = repo.config if repo else {}\n core_config = repo_config.get(\"core\", {})\n\n remote_conf = get_fs_config(repo, repo_config, **kwargs)\n try:\n remote_conf = SCHEMA[\"remote\"][str](remote_conf)\n except Invalid as exc:\n raise RepoConfigError(str(exc)) from None\n\n if \"checksum_jobs\" not in remote_conf:\n checksum_jobs = core_config.get(\"checksum_jobs\")\n if checksum_jobs:\n remote_conf[\"checksum_jobs\"] = checksum_jobs\n\n cls = get_fs_cls(remote_conf)\n\n if cls == GDriveFileSystem and repo:\n remote_conf[\"gdrive_credentials_tmp_dir\"] = repo.tmp_dir\n\n url = remote_conf.pop(\"url\")\n if issubclass(cls, WebDAVFileSystem):\n # For WebDAVFileSystem, provided url is the base path itself, so it\n # should be treated as being a root path.\n fs_path = cls.root_marker\n else:\n fs_path = cls._strip_protocol(url) # pylint:disable=protected-access\n\n extras = cls._get_kwargs_from_urls(url) # pylint:disable=protected-access\n conf = {**extras, **remote_conf} # remote config takes priority\n return cls, conf, fs_path\n", "path": "dvc/fs/__init__.py"}]} | 1,724 | 136 |
gh_patches_debug_34627 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2364 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update I1022 to only suggest sub if all values can be in the sub
### CloudFormation Lint Version
0.64.1
### What operating system are you using?
All
### Describe the bug
Original feedback provided by @iann0036. Translated to an issue for tracking.
```yaml
Fn::Join:
- ""
- - Fn::Select:
- 0
- Fn::Split:
- "/"
- !Ref MySubnet1CIDR
- !Ref MySubnetsCIDRSize
```
```
I1022: Prefer using Fn::Sub over Fn::Join with an empty delimiter
```
### Expected behavior
Currently the way to make this comply would be
```yaml
Fn::Sub:
- ${CIDR}${MySubnetsCIDRSize}
- CIDR:
Fn::Select:
- 0
- Fn::Split:
- "/"
- !Ref MySubnet1CIDR
```
which may not be as optimal
### Reproduction template
```yaml
Fn::Join:
- ""
- - Fn::Select:
- 0
- Fn::Split:
- "/"
- !Ref MySubnet1CIDR
- !Ref MySubnetsCIDRSize
````
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/functions/SubNotJoin.py`
Content:
```
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule, RuleMatch
6
7
8 class SubNotJoin(CloudFormationLintRule):
9 """Check if Join is being used with no join characters"""
10 id = 'I1022'
11 shortdesc = 'Use Sub instead of Join'
12 description = 'Prefer a sub instead of Join when using a join delimiter that is empty'
13 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
14 tags = ['functions', 'sub', 'join']
15
16 def match(self, cfn):
17 matches = []
18
19 join_objs = cfn.search_deep_keys('Fn::Join')
20
21 for join_obj in join_objs:
22 if isinstance(join_obj[-1], list):
23 join_operator = join_obj[-1][0]
24 if isinstance(join_operator, str):
25 if join_operator == '':
26 matches.append(RuleMatch(
27 join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))
28 return matches
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/functions/SubNotJoin.py b/src/cfnlint/rules/functions/SubNotJoin.py
--- a/src/cfnlint/rules/functions/SubNotJoin.py
+++ b/src/cfnlint/rules/functions/SubNotJoin.py
@@ -7,12 +7,34 @@
class SubNotJoin(CloudFormationLintRule):
"""Check if Join is being used with no join characters"""
+
id = 'I1022'
shortdesc = 'Use Sub instead of Join'
- description = 'Prefer a sub instead of Join when using a join delimiter that is empty'
+ description = (
+ 'Prefer a sub instead of Join when using a join delimiter that is empty'
+ )
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'
tags = ['functions', 'sub', 'join']
+ def _check_element(self, element):
+ if isinstance(element, dict):
+ if len(element) == 1:
+ for key, value in element.items():
+ if key in ['Fn::Sub']:
+ if not isinstance(value, str):
+ return False
+ elif key not in ['Ref', 'Fn::GetAtt']:
+ return False
+
+ return True
+
+ def _check_elements(self, elements):
+ for element in elements:
+ if not self._check_element(element):
+ return False
+
+ return True
+
def match(self, cfn):
matches = []
@@ -21,8 +43,15 @@
for join_obj in join_objs:
if isinstance(join_obj[-1], list):
join_operator = join_obj[-1][0]
+ join_elements = join_obj[-1][1]
if isinstance(join_operator, str):
if join_operator == '':
- matches.append(RuleMatch(
- join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))
+ if isinstance(join_elements, list):
+ if self._check_elements(join_elements):
+ matches.append(
+ RuleMatch(
+ join_obj[0:-1],
+ 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',
+ )
+ )
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/functions/SubNotJoin.py b/src/cfnlint/rules/functions/SubNotJoin.py\n--- a/src/cfnlint/rules/functions/SubNotJoin.py\n+++ b/src/cfnlint/rules/functions/SubNotJoin.py\n@@ -7,12 +7,34 @@\n \n class SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n+\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n- description = 'Prefer a sub instead of Join when using a join delimiter that is empty'\n+ description = (\n+ 'Prefer a sub instead of Join when using a join delimiter that is empty'\n+ )\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n \n+ def _check_element(self, element):\n+ if isinstance(element, dict):\n+ if len(element) == 1:\n+ for key, value in element.items():\n+ if key in ['Fn::Sub']:\n+ if not isinstance(value, str):\n+ return False\n+ elif key not in ['Ref', 'Fn::GetAtt']:\n+ return False\n+\n+ return True\n+\n+ def _check_elements(self, elements):\n+ for element in elements:\n+ if not self._check_element(element):\n+ return False\n+\n+ return True\n+\n def match(self, cfn):\n matches = []\n \n@@ -21,8 +43,15 @@\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n+ join_elements = join_obj[-1][1]\n if isinstance(join_operator, str):\n if join_operator == '':\n- matches.append(RuleMatch(\n- join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))\n+ if isinstance(join_elements, list):\n+ if self._check_elements(join_elements):\n+ matches.append(\n+ RuleMatch(\n+ join_obj[0:-1],\n+ 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',\n+ )\n+ )\n return matches\n", "issue": "Update I1022 to only suggest sub if all values can be in the sub\n### CloudFormation Lint Version\n\n0.64.1\n\n### What operating system are you using?\n\nAll\n\n### Describe the bug\n\nOriginal feedback provided by @iann0036. Translated to an issue for tracking.\r\n\r\n```yaml\r\nFn::Join:\r\n - \"\"\r\n - - Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n - !Ref MySubnetsCIDRSize\r\n```\r\n\r\n```\r\nI1022: Prefer using Fn::Sub over Fn::Join with an empty delimiter\r\n```\n\n### Expected behavior\n\nCurrently the way to make this comply would be\r\n\r\n```yaml\r\nFn::Sub:\r\n - ${CIDR}${MySubnetsCIDRSize}\r\n - CIDR:\r\n Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n```\r\n\r\nwhich may not be as optimal \n\n### Reproduction template\n\n```yaml\r\nFn::Join:\r\n - \"\"\r\n - - Fn::Select:\r\n - 0\r\n - Fn::Split:\r\n - \"/\"\r\n - !Ref MySubnet1CIDR\r\n - !Ref MySubnetsCIDRSize\r\n ````\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n description = 'Prefer a sub instead of Join when using a join delimiter that is empty'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n\n def match(self, cfn):\n matches = []\n\n join_objs = cfn.search_deep_keys('Fn::Join')\n\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n if isinstance(join_operator, str):\n if join_operator == '':\n matches.append(RuleMatch(\n join_obj[0:-1], 'Prefer using Fn::Sub over Fn::Join with an empty delimiter'))\n return matches\n", "path": "src/cfnlint/rules/functions/SubNotJoin.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass SubNotJoin(CloudFormationLintRule):\n \"\"\"Check if Join is being used with no join characters\"\"\"\n\n id = 'I1022'\n shortdesc = 'Use Sub instead of Join'\n description = (\n 'Prefer a sub instead of Join when using a join delimiter that is empty'\n )\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-sub.html'\n tags = ['functions', 'sub', 'join']\n\n def _check_element(self, element):\n if isinstance(element, dict):\n if len(element) == 1:\n for key, value in element.items():\n if key in ['Fn::Sub']:\n if not isinstance(value, str):\n return False\n elif key not in ['Ref', 'Fn::GetAtt']:\n return False\n\n return True\n\n def _check_elements(self, elements):\n for element in elements:\n if not self._check_element(element):\n return False\n\n return True\n\n def match(self, cfn):\n matches = []\n\n join_objs = cfn.search_deep_keys('Fn::Join')\n\n for join_obj in join_objs:\n if isinstance(join_obj[-1], list):\n join_operator = join_obj[-1][0]\n join_elements = join_obj[-1][1]\n if isinstance(join_operator, str):\n if join_operator == '':\n if isinstance(join_elements, list):\n if self._check_elements(join_elements):\n matches.append(\n RuleMatch(\n join_obj[0:-1],\n 'Prefer using Fn::Sub over Fn::Join with an empty delimiter',\n )\n )\n return matches\n", "path": "src/cfnlint/rules/functions/SubNotJoin.py"}]} | 851 | 504 |
gh_patches_debug_1475 | rasdani/github-patches | git_diff | graspologic-org__graspologic-654 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Possible issue with direct import
```
import graspologic
dir(graspologic)
```
returns
```
['__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__version',
'__version__',
'graspologic',
'layouts',
'models',
'partition',
'plot',
'preprocessing',
'subgraph',
'version']
```
and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).
Is this intentional?
[BUG] Possible issue with direct import
```
import graspologic
dir(graspologic)
```
returns
```
['__builtins__',
'__cached__',
'__doc__',
'__file__',
'__loader__',
'__name__',
'__package__',
'__path__',
'__spec__',
'__version',
'__version__',
'graspologic',
'layouts',
'models',
'partition',
'plot',
'preprocessing',
'subgraph',
'version']
```
and is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).
Is this intentional?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspologic/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 import graspologic.align
5 import graspologic.cluster
6 import graspologic.datasets
7 import graspologic.embed
8 import graspologic.inference
9 import graspologic.layouts
10 import graspologic.models
11 import graspologic.partition
12 import graspologic.preprocessing
13 import graspologic.plot
14 import graspologic.simulations
15 import graspologic.subgraph
16 import graspologic.utils
17
18 from graspologic.version import __version
19
20 __version__ = __version()
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspologic/__init__.py b/graspologic/__init__.py
--- a/graspologic/__init__.py
+++ b/graspologic/__init__.py
@@ -8,6 +8,7 @@
import graspologic.inference
import graspologic.layouts
import graspologic.models
+import graspologic.nominate
import graspologic.partition
import graspologic.preprocessing
import graspologic.plot
| {"golden_diff": "diff --git a/graspologic/__init__.py b/graspologic/__init__.py\n--- a/graspologic/__init__.py\n+++ b/graspologic/__init__.py\n@@ -8,6 +8,7 @@\n import graspologic.inference\n import graspologic.layouts\n import graspologic.models\n+import graspologic.nominate\n import graspologic.partition\n import graspologic.preprocessing\n import graspologic.plot\n", "issue": "[BUG] Possible issue with direct import\n```\r\nimport graspologic\r\ndir(graspologic)\r\n```\r\nreturns \r\n\r\n```\r\n['__builtins__',\r\n '__cached__',\r\n '__doc__',\r\n '__file__',\r\n '__loader__',\r\n '__name__',\r\n '__package__',\r\n '__path__',\r\n '__spec__',\r\n '__version',\r\n '__version__',\r\n 'graspologic',\r\n 'layouts',\r\n 'models',\r\n 'partition',\r\n 'plot',\r\n 'preprocessing',\r\n 'subgraph',\r\n 'version']\r\n```\r\n\r\nand is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).\r\nIs this intentional?\n[BUG] Possible issue with direct import\n```\r\nimport graspologic\r\ndir(graspologic)\r\n```\r\nreturns \r\n\r\n```\r\n['__builtins__',\r\n '__cached__',\r\n '__doc__',\r\n '__file__',\r\n '__loader__',\r\n '__name__',\r\n '__package__',\r\n '__path__',\r\n '__spec__',\r\n '__version',\r\n '__version__',\r\n 'graspologic',\r\n 'layouts',\r\n 'models',\r\n 'partition',\r\n 'plot',\r\n 'preprocessing',\r\n 'subgraph',\r\n 'version']\r\n```\r\n\r\nand is missing lots of modules (align, cluster, datasets, embed, inference, match, nominate, pipeline, utils).\r\nIs this intentional?\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n", "path": "graspologic/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nimport graspologic.align\nimport graspologic.cluster\nimport graspologic.datasets\nimport graspologic.embed\nimport graspologic.inference\nimport graspologic.layouts\nimport graspologic.models\nimport graspologic.nominate\nimport graspologic.partition\nimport graspologic.preprocessing\nimport graspologic.plot\nimport graspologic.simulations\nimport graspologic.subgraph\nimport graspologic.utils\n\nfrom graspologic.version import __version\n\n__version__ = __version()\n", "path": "graspologic/__init__.py"}]} | 655 | 88 |
gh_patches_debug_13790 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3620 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider wafflehouse is broken
During the global build at 2021-06-02-14-42-40, spider **wafflehouse** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/wafflehouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/wafflehouse.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3
4 import scrapy
5
6 from locations.hours import OpeningHours
7 from locations.items import GeojsonPointItem
8
9
10 class WaffleHouseSpider(scrapy.Spider):
11 name = "wafflehouse"
12 item_attributes = {"brand": "Waffle House", "brand_wikidata": "Q1701206"}
13 allowed_domains = ["wafflehouse.com"]
14 start_urls = [
15 "https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1"
16 ]
17
18 def parse(self, response):
19 for row in response.json()["markers"]:
20 url = "https://locations.wafflehouse.com/" + row["slug"]
21 yield scrapy.Request(url, callback=self.parse_store)
22
23 def parse_store(self, response):
24 data = json.loads(
25 response.xpath('//head/script[@type="application/ld+json"]/text()').get()
26 )
27
28 hours = OpeningHours()
29 specs = data.get("openingHoursSpecification", [])
30 if any({"validFrom", "validThrough"} <= spec.keys() for spec in specs):
31 # Giving opening hours for specific dates, abandon the whole proposal
32 pass
33 else:
34 for spec in specs:
35 for day in spec["dayOfWeek"]:
36 hours.add_range(
37 day[:2].capitalize(), spec["opens"], spec["closes"], "%I%p"
38 )
39
40 properties = {
41 "ref": data["@id"],
42 "lat": data["geo"]["latitude"],
43 "lon": data["geo"]["longitude"],
44 "website": response.url,
45 "name": data["name"],
46 "phone": data["telephone"],
47 "addr_full": data["address"]["streetAddress"],
48 "city": data["address"]["addressLocality"],
49 "state": data["address"]["addressRegion"],
50 "postcode": data["address"]["postalCode"],
51 "opening_hours": hours.as_opening_hours(),
52 }
53 yield GeojsonPointItem(**properties)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/wafflehouse.py b/locations/spiders/wafflehouse.py
--- a/locations/spiders/wafflehouse.py
+++ b/locations/spiders/wafflehouse.py
@@ -44,10 +44,11 @@
"website": response.url,
"name": data["name"],
"phone": data["telephone"],
- "addr_full": data["address"]["streetAddress"],
+ "street_address": data["address"]["streetAddress"],
"city": data["address"]["addressLocality"],
"state": data["address"]["addressRegion"],
"postcode": data["address"]["postalCode"],
"opening_hours": hours.as_opening_hours(),
}
+
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/wafflehouse.py b/locations/spiders/wafflehouse.py\n--- a/locations/spiders/wafflehouse.py\n+++ b/locations/spiders/wafflehouse.py\n@@ -44,10 +44,11 @@\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n- \"addr_full\": data[\"address\"][\"streetAddress\"],\n+ \"street_address\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n+\n yield GeojsonPointItem(**properties)\n", "issue": "Spider wafflehouse is broken\nDuring the global build at 2021-06-02-14-42-40, spider **wafflehouse** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/wafflehouse.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/wafflehouse.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass WaffleHouseSpider(scrapy.Spider):\n name = \"wafflehouse\"\n item_attributes = {\"brand\": \"Waffle House\", \"brand_wikidata\": \"Q1701206\"}\n allowed_domains = [\"wafflehouse.com\"]\n start_urls = [\n \"https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1\"\n ]\n\n def parse(self, response):\n for row in response.json()[\"markers\"]:\n url = \"https://locations.wafflehouse.com/\" + row[\"slug\"]\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(\n response.xpath('//head/script[@type=\"application/ld+json\"]/text()').get()\n )\n\n hours = OpeningHours()\n specs = data.get(\"openingHoursSpecification\", [])\n if any({\"validFrom\", \"validThrough\"} <= spec.keys() for spec in specs):\n # Giving opening hours for specific dates, abandon the whole proposal\n pass\n else:\n for spec in specs:\n for day in spec[\"dayOfWeek\"]:\n hours.add_range(\n day[:2].capitalize(), spec[\"opens\"], spec[\"closes\"], \"%I%p\"\n )\n\n properties = {\n \"ref\": data[\"@id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n \"addr_full\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wafflehouse.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass WaffleHouseSpider(scrapy.Spider):\n name = \"wafflehouse\"\n item_attributes = {\"brand\": \"Waffle House\", \"brand_wikidata\": \"Q1701206\"}\n allowed_domains = [\"wafflehouse.com\"]\n start_urls = [\n \"https://wafflehouse.locally.com/stores/conversion_data?has_data=true&company_id=117995&store_mode=&style=&color=&upc=&category=&inline=1&show_links_in_list=&parent_domain=&map_center_lat=39.8&map_center_lng=-98.6&map_distance_diag=3000&sort_by=proximity&no_variants=0&only_retailer_id=&dealers_company_id=&only_store_id=false&uses_alt_coords=false&zoom_level=4&lang=en-us&forced_coords=1\"\n ]\n\n def parse(self, response):\n for row in response.json()[\"markers\"]:\n url = \"https://locations.wafflehouse.com/\" + row[\"slug\"]\n yield scrapy.Request(url, callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(\n response.xpath('//head/script[@type=\"application/ld+json\"]/text()').get()\n )\n\n hours = OpeningHours()\n specs = data.get(\"openingHoursSpecification\", [])\n if any({\"validFrom\", \"validThrough\"} <= spec.keys() for spec in specs):\n # Giving opening hours for specific dates, abandon the whole proposal\n pass\n else:\n for spec in specs:\n for day in spec[\"dayOfWeek\"]:\n hours.add_range(\n day[:2].capitalize(), spec[\"opens\"], spec[\"closes\"], \"%I%p\"\n )\n\n properties = {\n \"ref\": data[\"@id\"],\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"website\": response.url,\n \"name\": data[\"name\"],\n \"phone\": data[\"telephone\"],\n \"street_address\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": hours.as_opening_hours(),\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/wafflehouse.py"}]} | 1,076 | 164 |
gh_patches_debug_31352 | rasdani/github-patches | git_diff | Theano__Theano-4512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
d3viz doesn't work for some graphs
Sometimes d3viz doesn't work for me - empty graph is displayed, and there is an error message in JS console. I tried to reduce it to a minimal example
``` py
import os
os.environ['THEANO_FLAGS'] = "device=gpu0,floatX=float32" #,optimizer=fast_compile"
import numpy as np
from lasagne.updates import adam
from theano import tensor as T, shared, function
import theano.d3viz as d3v
def show_d3(g):
d3v.d3viz(g, 'example.html')
from IPython.display import IFrame
return IFrame('example.html', width=800, height=500)
x = T.fvector()
W = shared(np.zeros((10, 5), dtype=np.float32))
b = shared(np.zeros((10,), dtype=np.float32))
y_true = T.fvector()
y = T.nnet.sigmoid(T.dot(x, W) + b)
cost = T.sqrt(((y - y_true)**2).sum())
updates = adam(cost, [W]) # no b!
f_cost = function([x, y_true], cost, updates=updates)
show_d3(f_cost)
```
(IPython notebook: https://gist.github.com/kmike/13b0fb747dccd4f2f1e44789a9cb832c).
This is brittle: if `adam` is replaced with any other training method from Lasagne (or if a simple SGD is implemented manually) chart works; if `T.sqrt` is removed from `cost` expression chart works; if `T.nnet sigmoid` is removed from `y` expression chart works; if `b` is added to adam updates chart works; if `optimizer=fast_compile` is added to THEANO_FLAGS chart works.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `theano/d3viz/d3viz.py`
Content:
```
1 """Dynamic visualization of Theano graphs.
2
3 Author: Christof Angermueller <[email protected]>
4 """
5 from __future__ import absolute_import, print_function, division
6
7 import os
8 import shutil
9 import re
10 import six
11 from six import iteritems
12
13 from theano.d3viz.formatting import PyDotFormatter
14
15 __path__ = os.path.dirname(os.path.realpath(__file__))
16
17
18 def replace_patterns(x, replace):
19 """Replace `replace` in string `x`.
20
21 Parameters
22 ----------
23 s : str
24 String on which function is applied
25 replace : dict
26 `key`, `value` pairs where key is a regular expression and `value` a
27 string by which `key` is replaced
28 """
29 for from_, to in iteritems(replace):
30 x = x.replace(str(from_), str(to))
31 return x
32
33
34 def escape_quotes(s):
35 """Escape quotes in string.
36
37 Parameters
38 ----------
39 s : str
40 String on which function is applied
41 """
42 s = re.sub(r'''(['"])''', r'\\\1', s)
43 return s
44
45
46 def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):
47 """Create HTML file with dynamic visualizing of a Theano function graph.
48
49 In the HTML file, the whole graph or single nodes can be moved by drag and
50 drop. Zooming is possible via the mouse wheel. Detailed information about
51 nodes and edges are displayed via mouse-over events. Node labels can be
52 edited by selecting Edit from the context menu.
53
54 Input nodes are colored in green, output nodes in blue. Apply nodes are
55 ellipses, and colored depending on the type of operation they perform. Red
56 ellipses are transfers from/to the GPU (ops with names GpuFromHost,
57 HostFromGpu).
58
59 Edges are black by default. If a node returns a view of an
60 input, the input edge will be blue. If it returns a destroyed input, the
61 edge will be red.
62
63 Parameters
64 ----------
65 fct : theano.compile.function_module.Function
66 A compiled Theano function, variable, apply or a list of variables.
67 outfile : str
68 Path to output HTML file.
69 copy_deps : bool, optional
70 Copy javascript and CSS dependencies to output directory.
71
72 Notes
73 -----
74 This function accepts extra parameters which will be forwarded to
75 :class:`theano.d3viz.formatting.PyDotFormatter`.
76
77 """
78
79 # Create DOT graph
80 formatter = PyDotFormatter(*args, **kwargs)
81 graph = formatter(fct)
82 dot_graph_raw = graph.create_dot()
83 if not six.PY2:
84 dot_graph_raw = dot_graph_raw.decode('utf8')
85 dot_graph = escape_quotes(dot_graph_raw).replace('\n', '').replace('\r', '')
86
87 # Create output directory if not existing
88 outdir = os.path.dirname(outfile)
89 if not outdir == '' and not os.path.exists(outdir):
90 os.makedirs(outdir)
91
92 # Read template HTML file
93 template_file = os.path.join(__path__, 'html', 'template.html')
94 with open(template_file) as f:
95 template = f.read()
96
97 # Copy dependencies to output directory
98 src_deps = __path__
99 if copy_deps:
100 dst_deps = 'd3viz'
101 for d in ['js', 'css']:
102 dep = os.path.join(outdir, dst_deps, d)
103 if not os.path.exists(dep):
104 shutil.copytree(os.path.join(src_deps, d), dep)
105 else:
106 dst_deps = src_deps
107
108 # Replace patterns in template
109 replace = {
110 '%% JS_DIR %%': os.path.join(dst_deps, 'js'),
111 '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),
112 '%% DOT_GRAPH %%': dot_graph,
113 }
114 html = replace_patterns(template, replace)
115
116 # Write HTML file
117 with open(outfile, 'w') as f:
118 f.write(html)
119
120
121 def d3write(fct, path, *args, **kwargs):
122 """Convert Theano graph to pydot graph and write to dot file.
123
124 Parameters
125 ----------
126 fct : theano.compile.function_module.Function
127 A compiled Theano function, variable, apply or a list of variables.
128 path: str
129 Path to output file
130
131 Notes
132 -----
133 This function accepts extra parameters which will be forwarded to
134 :class:`theano.d3viz.formatting.PyDotFormatter`.
135
136 """
137
138 formatter = PyDotFormatter(*args, **kwargs)
139 graph = formatter(fct)
140 graph.write_dot(path)
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/theano/d3viz/d3viz.py b/theano/d3viz/d3viz.py
--- a/theano/d3viz/d3viz.py
+++ b/theano/d3viz/d3viz.py
@@ -5,8 +5,8 @@
from __future__ import absolute_import, print_function, division
import os
+import json
import shutil
-import re
import six
from six import iteritems
@@ -31,16 +31,15 @@
return x
-def escape_quotes(s):
- """Escape quotes in string.
+def safe_json(obj):
+ """Encode `obj` to JSON so that it can be embedded safely inside HTML.
Parameters
----------
- s : str
- String on which function is applied
+ obj : object
+ object to serialize
"""
- s = re.sub(r'''(['"])''', r'\\\1', s)
- return s
+ return json.dumps(obj).replace('<', '\\u003c')
def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):
@@ -79,10 +78,9 @@
# Create DOT graph
formatter = PyDotFormatter(*args, **kwargs)
graph = formatter(fct)
- dot_graph_raw = graph.create_dot()
+ dot_graph = graph.create_dot()
if not six.PY2:
- dot_graph_raw = dot_graph_raw.decode('utf8')
- dot_graph = escape_quotes(dot_graph_raw).replace('\n', '').replace('\r', '')
+ dot_graph = dot_graph.decode('utf8')
# Create output directory if not existing
outdir = os.path.dirname(outfile)
@@ -109,7 +107,7 @@
replace = {
'%% JS_DIR %%': os.path.join(dst_deps, 'js'),
'%% CSS_DIR %%': os.path.join(dst_deps, 'css'),
- '%% DOT_GRAPH %%': dot_graph,
+ '%% DOT_GRAPH %%': safe_json(dot_graph),
}
html = replace_patterns(template, replace)
| {"golden_diff": "diff --git a/theano/d3viz/d3viz.py b/theano/d3viz/d3viz.py\n--- a/theano/d3viz/d3viz.py\n+++ b/theano/d3viz/d3viz.py\n@@ -5,8 +5,8 @@\n from __future__ import absolute_import, print_function, division\n \n import os\n+import json\n import shutil\n-import re\n import six\n from six import iteritems\n \n@@ -31,16 +31,15 @@\n return x\n \n \n-def escape_quotes(s):\n- \"\"\"Escape quotes in string.\n+def safe_json(obj):\n+ \"\"\"Encode `obj` to JSON so that it can be embedded safely inside HTML.\n \n Parameters\n ----------\n- s : str\n- String on which function is applied\n+ obj : object\n+ object to serialize\n \"\"\"\n- s = re.sub(r'''(['\"])''', r'\\\\\\1', s)\n- return s\n+ return json.dumps(obj).replace('<', '\\\\u003c')\n \n \n def d3viz(fct, outfile, copy_deps=True, *args, **kwargs):\n@@ -79,10 +78,9 @@\n # Create DOT graph\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n- dot_graph_raw = graph.create_dot()\n+ dot_graph = graph.create_dot()\n if not six.PY2:\n- dot_graph_raw = dot_graph_raw.decode('utf8')\n- dot_graph = escape_quotes(dot_graph_raw).replace('\\n', '').replace('\\r', '')\n+ dot_graph = dot_graph.decode('utf8')\n \n # Create output directory if not existing\n outdir = os.path.dirname(outfile)\n@@ -109,7 +107,7 @@\n replace = {\n '%% JS_DIR %%': os.path.join(dst_deps, 'js'),\n '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),\n- '%% DOT_GRAPH %%': dot_graph,\n+ '%% DOT_GRAPH %%': safe_json(dot_graph),\n }\n html = replace_patterns(template, replace)\n", "issue": "d3viz doesn't work for some graphs\nSometimes d3viz doesn't work for me - empty graph is displayed, and there is an error message in JS console. I tried to reduce it to a minimal example\n\n``` py\nimport os \nos.environ['THEANO_FLAGS'] = \"device=gpu0,floatX=float32\" #,optimizer=fast_compile\"\n\nimport numpy as np\nfrom lasagne.updates import adam\nfrom theano import tensor as T, shared, function\nimport theano.d3viz as d3v\n\ndef show_d3(g):\n d3v.d3viz(g, 'example.html')\n from IPython.display import IFrame\n return IFrame('example.html', width=800, height=500)\n\nx = T.fvector()\nW = shared(np.zeros((10, 5), dtype=np.float32))\nb = shared(np.zeros((10,), dtype=np.float32))\ny_true = T.fvector()\n\ny = T.nnet.sigmoid(T.dot(x, W) + b)\ncost = T.sqrt(((y - y_true)**2).sum())\n\nupdates = adam(cost, [W]) # no b!\nf_cost = function([x, y_true], cost, updates=updates)\nshow_d3(f_cost)\n```\n\n(IPython notebook: https://gist.github.com/kmike/13b0fb747dccd4f2f1e44789a9cb832c).\nThis is brittle: if `adam` is replaced with any other training method from Lasagne (or if a simple SGD is implemented manually) chart works; if `T.sqrt` is removed from `cost` expression chart works; if `T.nnet sigmoid` is removed from `y` expression chart works; if `b` is added to adam updates chart works; if `optimizer=fast_compile` is added to THEANO_FLAGS chart works.\n\n", "before_files": [{"content": "\"\"\"Dynamic visualization of Theano graphs.\n\nAuthor: Christof Angermueller <[email protected]>\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport os\nimport shutil\nimport re\nimport six\nfrom six import iteritems\n\nfrom theano.d3viz.formatting import PyDotFormatter\n\n__path__ = os.path.dirname(os.path.realpath(__file__))\n\n\ndef replace_patterns(x, replace):\n \"\"\"Replace `replace` in string `x`.\n\n Parameters\n ----------\n s : str\n String on which function is applied\n replace : dict\n `key`, `value` pairs where key is a regular expression and `value` a\n string by which `key` is replaced\n \"\"\"\n for from_, to in iteritems(replace):\n x = x.replace(str(from_), str(to))\n return x\n\n\ndef escape_quotes(s):\n \"\"\"Escape quotes in string.\n\n Parameters\n ----------\n s : str\n String on which function is applied\n \"\"\"\n s = re.sub(r'''(['\"])''', r'\\\\\\1', s)\n return s\n\n\ndef d3viz(fct, outfile, copy_deps=True, *args, **kwargs):\n \"\"\"Create HTML file with dynamic visualizing of a Theano function graph.\n\n In the HTML file, the whole graph or single nodes can be moved by drag and\n drop. Zooming is possible via the mouse wheel. Detailed information about\n nodes and edges are displayed via mouse-over events. Node labels can be\n edited by selecting Edit from the context menu.\n\n Input nodes are colored in green, output nodes in blue. Apply nodes are\n ellipses, and colored depending on the type of operation they perform. Red\n ellipses are transfers from/to the GPU (ops with names GpuFromHost,\n HostFromGpu).\n\n Edges are black by default. If a node returns a view of an\n input, the input edge will be blue. If it returns a destroyed input, the\n edge will be red.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n outfile : str\n Path to output HTML file.\n copy_deps : bool, optional\n Copy javascript and CSS dependencies to output directory.\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n # Create DOT graph\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n dot_graph_raw = graph.create_dot()\n if not six.PY2:\n dot_graph_raw = dot_graph_raw.decode('utf8')\n dot_graph = escape_quotes(dot_graph_raw).replace('\\n', '').replace('\\r', '')\n\n # Create output directory if not existing\n outdir = os.path.dirname(outfile)\n if not outdir == '' and not os.path.exists(outdir):\n os.makedirs(outdir)\n\n # Read template HTML file\n template_file = os.path.join(__path__, 'html', 'template.html')\n with open(template_file) as f:\n template = f.read()\n\n # Copy dependencies to output directory\n src_deps = __path__\n if copy_deps:\n dst_deps = 'd3viz'\n for d in ['js', 'css']:\n dep = os.path.join(outdir, dst_deps, d)\n if not os.path.exists(dep):\n shutil.copytree(os.path.join(src_deps, d), dep)\n else:\n dst_deps = src_deps\n\n # Replace patterns in template\n replace = {\n '%% JS_DIR %%': os.path.join(dst_deps, 'js'),\n '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),\n '%% DOT_GRAPH %%': dot_graph,\n }\n html = replace_patterns(template, replace)\n\n # Write HTML file\n with open(outfile, 'w') as f:\n f.write(html)\n\n\ndef d3write(fct, path, *args, **kwargs):\n \"\"\"Convert Theano graph to pydot graph and write to dot file.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n path: str\n Path to output file\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n graph.write_dot(path)\n", "path": "theano/d3viz/d3viz.py"}], "after_files": [{"content": "\"\"\"Dynamic visualization of Theano graphs.\n\nAuthor: Christof Angermueller <[email protected]>\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport os\nimport json\nimport shutil\nimport six\nfrom six import iteritems\n\nfrom theano.d3viz.formatting import PyDotFormatter\n\n__path__ = os.path.dirname(os.path.realpath(__file__))\n\n\ndef replace_patterns(x, replace):\n \"\"\"Replace `replace` in string `x`.\n\n Parameters\n ----------\n s : str\n String on which function is applied\n replace : dict\n `key`, `value` pairs where key is a regular expression and `value` a\n string by which `key` is replaced\n \"\"\"\n for from_, to in iteritems(replace):\n x = x.replace(str(from_), str(to))\n return x\n\n\ndef safe_json(obj):\n \"\"\"Encode `obj` to JSON so that it can be embedded safely inside HTML.\n\n Parameters\n ----------\n obj : object\n object to serialize\n \"\"\"\n return json.dumps(obj).replace('<', '\\\\u003c')\n\n\ndef d3viz(fct, outfile, copy_deps=True, *args, **kwargs):\n \"\"\"Create HTML file with dynamic visualizing of a Theano function graph.\n\n In the HTML file, the whole graph or single nodes can be moved by drag and\n drop. Zooming is possible via the mouse wheel. Detailed information about\n nodes and edges are displayed via mouse-over events. Node labels can be\n edited by selecting Edit from the context menu.\n\n Input nodes are colored in green, output nodes in blue. Apply nodes are\n ellipses, and colored depending on the type of operation they perform. Red\n ellipses are transfers from/to the GPU (ops with names GpuFromHost,\n HostFromGpu).\n\n Edges are black by default. If a node returns a view of an\n input, the input edge will be blue. If it returns a destroyed input, the\n edge will be red.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n outfile : str\n Path to output HTML file.\n copy_deps : bool, optional\n Copy javascript and CSS dependencies to output directory.\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n # Create DOT graph\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n dot_graph = graph.create_dot()\n if not six.PY2:\n dot_graph = dot_graph.decode('utf8')\n\n # Create output directory if not existing\n outdir = os.path.dirname(outfile)\n if not outdir == '' and not os.path.exists(outdir):\n os.makedirs(outdir)\n\n # Read template HTML file\n template_file = os.path.join(__path__, 'html', 'template.html')\n with open(template_file) as f:\n template = f.read()\n\n # Copy dependencies to output directory\n src_deps = __path__\n if copy_deps:\n dst_deps = 'd3viz'\n for d in ['js', 'css']:\n dep = os.path.join(outdir, dst_deps, d)\n if not os.path.exists(dep):\n shutil.copytree(os.path.join(src_deps, d), dep)\n else:\n dst_deps = src_deps\n\n # Replace patterns in template\n replace = {\n '%% JS_DIR %%': os.path.join(dst_deps, 'js'),\n '%% CSS_DIR %%': os.path.join(dst_deps, 'css'),\n '%% DOT_GRAPH %%': safe_json(dot_graph),\n }\n html = replace_patterns(template, replace)\n\n # Write HTML file\n with open(outfile, 'w') as f:\n f.write(html)\n\n\ndef d3write(fct, path, *args, **kwargs):\n \"\"\"Convert Theano graph to pydot graph and write to dot file.\n\n Parameters\n ----------\n fct : theano.compile.function_module.Function\n A compiled Theano function, variable, apply or a list of variables.\n path: str\n Path to output file\n\n Notes\n -----\n This function accepts extra parameters which will be forwarded to\n :class:`theano.d3viz.formatting.PyDotFormatter`.\n\n \"\"\"\n\n formatter = PyDotFormatter(*args, **kwargs)\n graph = formatter(fct)\n graph.write_dot(path)\n", "path": "theano/d3viz/d3viz.py"}]} | 1,997 | 462 |
gh_patches_debug_3837 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
[BUG]: wrong in KL_approx , the order of two distribution is handled incorrectly
compute_approx_kl for `NaiveExperienceMaker` maybe incorrect.
As motion in [Approximating KL Divergence](http://joschu.net/blog/kl-approx.html)
$$ KL[q,p] = \mathbb{E}_{x\sim q}[\log\frac{q(x)}{p(x)}] $$
let
$$ r = \frac{p(x)}{q(x)} $$
note that, x is sample from distribution q.
Then
$$ KL_{approx}[q,p] = \mathbb{E}_{x\sim q}[-\log(r) + (r-1) ] $$
---
In paper [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155), object for actor , (e.i. reward of experience , ignore loss_ptx)
<img width="756" alt="image" src="https://user-images.githubusercontent.com/22851737/236610916-2f068c34-1508-438f-bd16-6fe6ed491e8c.png">
<img width="795" alt="image" src="https://user-images.githubusercontent.com/22851737/236611424-17600f6e-7aca-4bdf-95bd-2ce4035bcd3a.png">
So for computing KL, samples are sampled from actor model e.i $\pi^{RL}_\phi$, instead of $\pi^{SFT}$
KL in the object should be $KL[\pi^{RL}, \pi^{SFT}] =KL[q,p]$ , and $r$ of KL_approx should be $\frac{\pi^{SFT}(x)}{\pi^{RL}_\phi(x)}$
---
While on the `coati.models.utils.compute_approx_kl`
``` python
log_ratio = log_probs - log_probs_base
```
and log_probs and log_probs_base correspond to actor_model and sft_model respectively.
This should be modify to
```python
log_ratio = log_probs_base - log_probs
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `applications/Chat/coati/models/utils.py`
Content:
```
1 from typing import Optional, Union
2
3 import loralib as lora
4 import torch
5 import torch.nn as nn
6 import torch.nn.functional as F
7
8
9 def compute_approx_kl(log_probs: torch.Tensor,
10 log_probs_base: torch.Tensor,
11 action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
12 """
13 Compute the approximate KL divergence between two distributions.
14 Schulman blog: http://joschu.net/blog/kl-approx.html
15
16 Args:
17 log_probs: Log probabilities of the new distribution.
18 log_probs_base: Log probabilities of the base distribution.
19 action_mask: Mask for actions.
20 """
21
22 log_ratio = log_probs - log_probs_base
23 approx_kl = (log_ratio.exp() - 1) - log_ratio
24 if action_mask is not None:
25 approx_kl = masked_mean(approx_kl, action_mask, dim=1)
26 return approx_kl
27 approx_kl = approx_kl.mean(dim=1)
28 return approx_kl
29
30
31 def compute_reward(r: Union[torch.Tensor, float],
32 kl_coef: float,
33 log_probs: torch.Tensor,
34 log_probs_base: torch.Tensor,
35 action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
36 if kl_coef <= 0.0:
37 return r
38 kl = compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)
39 reward = r - kl_coef * kl
40 return reward
41
42
43 def log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
44 log_probs = F.log_softmax(logits, dim=-1)
45 log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))
46 return log_probs_labels.squeeze(-1)
47
48
49 def calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:
50 """Calculate action log probs.
51
52 Args:
53 output (torch.Tensor): Output tensor of Actor.forward.
54 sequences (torch.LongTensor): Input sequences.
55 num_actions (int): Number of actions.
56
57 Returns:
58 torch.Tensor: Action log probs.
59 """
60 logits = output['logits']
61 log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])
62 return log_probs[:, -num_actions:]
63
64
65 def masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:
66 tensor = tensor * mask
67 tensor = tensor.sum(dim=dim)
68 mask_sum = mask.sum(dim=dim)
69 mean = tensor / (mask_sum + 1e-8)
70 return mean
71
72
73 def masked_normalize(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor:
74 tensor = tensor * mask
75 mean = masked_mean(tensor, mask, dim=dim)
76 mean_centered = tensor - mean
77 var = masked_mean(mean_centered**2, mask, dim=dim)
78 return mean_centered * var.clamp(min=eps).rsqrt()
79
80
81 def normalize(tensor: torch.Tensor, dim: int = 0, eps: float = 1e-8) -> torch.Tensor:
82 mean = tensor.mean(dim)
83 mean_centered = tensor - mean
84 var = (mean_centered**2).mean(dim)
85 norm = mean_centered * var.clamp(min=eps).rsqrt()
86 return norm
87
88
89 def convert_to_lora(model: nn.Module,
90 input_size: int,
91 output_size: int,
92 lora_rank: int = 16,
93 lora_alpha: int = 1,
94 lora_dropout: float = 0.,
95 fan_in_fan_out: bool = False,
96 merge_weights: bool = True):
97 if lora_rank > min(input_size, output_size):
98 raise ValueError(f"LoRA rank {lora_rank} must be less or equal than {min(input_size, output_size)}")
99
100 for name, module in model.named_modules():
101 if isinstance(module, nn.Linear):
102 module._modules[name] = lora.Linear(input_size,
103 output_size,
104 r=lora_rank,
105 lora_alpha=lora_alpha,
106 lora_dropout=lora_dropout,
107 fan_in_fan_out=fan_in_fan_out,
108 merge_weights=merge_weights)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/applications/Chat/coati/models/utils.py b/applications/Chat/coati/models/utils.py
--- a/applications/Chat/coati/models/utils.py
+++ b/applications/Chat/coati/models/utils.py
@@ -19,7 +19,7 @@
action_mask: Mask for actions.
"""
- log_ratio = log_probs - log_probs_base
+ log_ratio = log_probs_base - log_probs
approx_kl = (log_ratio.exp() - 1) - log_ratio
if action_mask is not None:
approx_kl = masked_mean(approx_kl, action_mask, dim=1)
| {"golden_diff": "diff --git a/applications/Chat/coati/models/utils.py b/applications/Chat/coati/models/utils.py\n--- a/applications/Chat/coati/models/utils.py\n+++ b/applications/Chat/coati/models/utils.py\n@@ -19,7 +19,7 @@\n action_mask: Mask for actions.\n \"\"\"\n \n- log_ratio = log_probs - log_probs_base\n+ log_ratio = log_probs_base - log_probs\n approx_kl = (log_ratio.exp() - 1) - log_ratio\n if action_mask is not None:\n approx_kl = masked_mean(approx_kl, action_mask, dim=1)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[BUG]: wrong in KL_approx , the order of two distribution is handled incorrectly\ncompute_approx_kl for `NaiveExperienceMaker` maybe incorrect. \r\n\r\nAs motion in [Approximating KL Divergence](http://joschu.net/blog/kl-approx.html)\r\n\r\n \r\n$$ KL[q,p] = \\mathbb{E}_{x\\sim q}[\\log\\frac{q(x)}{p(x)}] $$\r\n\r\nlet \r\n\r\n$$ r = \\frac{p(x)}{q(x)} $$\r\n\r\nnote that, x is sample from distribution q. \r\n\r\nThen \r\n\r\n$$ KL_{approx}[q,p] = \\mathbb{E}_{x\\sim q}[-\\log(r) + (r-1) ] $$\r\n\r\n---\r\n \r\nIn paper [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155), object for actor , (e.i. reward of experience , ignore loss_ptx) \r\n\r\n<img width=\"756\" alt=\"image\" src=\"https://user-images.githubusercontent.com/22851737/236610916-2f068c34-1508-438f-bd16-6fe6ed491e8c.png\">\r\n\r\n\r\n<img width=\"795\" alt=\"image\" src=\"https://user-images.githubusercontent.com/22851737/236611424-17600f6e-7aca-4bdf-95bd-2ce4035bcd3a.png\">\r\n\r\nSo for computing KL, samples are sampled from actor model e.i $\\pi^{RL}_\\phi$, instead of $\\pi^{SFT}$\r\n\r\n KL in the object should be $KL[\\pi^{RL}, \\pi^{SFT}] =KL[q,p]$ , and $r$ of KL_approx should be $\\frac{\\pi^{SFT}(x)}{\\pi^{RL}_\\phi(x)}$\r\n\r\n--- \r\n\r\nWhile on the `coati.models.utils.compute_approx_kl`\r\n\r\n``` python \r\n log_ratio = log_probs - log_probs_base\r\n```\r\n\r\nand log_probs and log_probs_base correspond to actor_model and sft_model respectively.\r\nThis should be modify to \r\n\r\n```python \r\n log_ratio = log_probs_base - log_probs \r\n```\r\n\r\n\n", "before_files": [{"content": "from typing import Optional, Union\n\nimport loralib as lora\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef compute_approx_kl(log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Compute the approximate KL divergence between two distributions.\n Schulman blog: http://joschu.net/blog/kl-approx.html\n\n Args:\n log_probs: Log probabilities of the new distribution.\n log_probs_base: Log probabilities of the base distribution.\n action_mask: Mask for actions.\n \"\"\"\n\n log_ratio = log_probs - log_probs_base\n approx_kl = (log_ratio.exp() - 1) - log_ratio\n if action_mask is not None:\n approx_kl = masked_mean(approx_kl, action_mask, dim=1)\n return approx_kl\n approx_kl = approx_kl.mean(dim=1)\n return approx_kl\n\n\ndef compute_reward(r: Union[torch.Tensor, float],\n kl_coef: float,\n log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n if kl_coef <= 0.0:\n return r\n kl = compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)\n reward = r - kl_coef * kl\n return reward\n\n\ndef log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n log_probs = F.log_softmax(logits, dim=-1)\n log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))\n return log_probs_labels.squeeze(-1)\n\n\ndef calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:\n \"\"\"Calculate action log probs.\n\n Args:\n output (torch.Tensor): Output tensor of Actor.forward.\n sequences (torch.LongTensor): Input sequences.\n num_actions (int): Number of actions.\n\n Returns:\n torch.Tensor: Action log probs.\n \"\"\"\n logits = output['logits']\n log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])\n return log_probs[:, -num_actions:]\n\n\ndef masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:\n tensor = tensor * mask\n tensor = tensor.sum(dim=dim)\n mask_sum = mask.sum(dim=dim)\n mean = tensor / (mask_sum + 1e-8)\n return mean\n\n\ndef masked_normalize(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n return mean_centered * var.clamp(min=eps).rsqrt()\n\n\ndef normalize(tensor: torch.Tensor, dim: int = 0, eps: float = 1e-8) -> torch.Tensor:\n mean = tensor.mean(dim)\n mean_centered = tensor - mean\n var = (mean_centered**2).mean(dim)\n norm = mean_centered * var.clamp(min=eps).rsqrt()\n return norm\n\n\ndef convert_to_lora(model: nn.Module,\n input_size: int,\n output_size: int,\n lora_rank: int = 16,\n lora_alpha: int = 1,\n lora_dropout: float = 0.,\n fan_in_fan_out: bool = False,\n merge_weights: bool = True):\n if lora_rank > min(input_size, output_size):\n raise ValueError(f\"LoRA rank {lora_rank} must be less or equal than {min(input_size, output_size)}\")\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module._modules[name] = lora.Linear(input_size,\n output_size,\n r=lora_rank,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n fan_in_fan_out=fan_in_fan_out,\n merge_weights=merge_weights)\n", "path": "applications/Chat/coati/models/utils.py"}], "after_files": [{"content": "from typing import Optional, Union\n\nimport loralib as lora\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef compute_approx_kl(log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Compute the approximate KL divergence between two distributions.\n Schulman blog: http://joschu.net/blog/kl-approx.html\n\n Args:\n log_probs: Log probabilities of the new distribution.\n log_probs_base: Log probabilities of the base distribution.\n action_mask: Mask for actions.\n \"\"\"\n\n log_ratio = log_probs_base - log_probs\n approx_kl = (log_ratio.exp() - 1) - log_ratio\n if action_mask is not None:\n approx_kl = masked_mean(approx_kl, action_mask, dim=1)\n return approx_kl\n approx_kl = approx_kl.mean(dim=1)\n return approx_kl\n\n\ndef compute_reward(r: Union[torch.Tensor, float],\n kl_coef: float,\n log_probs: torch.Tensor,\n log_probs_base: torch.Tensor,\n action_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n if kl_coef <= 0.0:\n return r\n kl = compute_approx_kl(log_probs, log_probs_base, action_mask=action_mask)\n reward = r - kl_coef * kl\n return reward\n\n\ndef log_probs_from_logits(logits: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n log_probs = F.log_softmax(logits, dim=-1)\n log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1))\n return log_probs_labels.squeeze(-1)\n\n\ndef calc_action_log_probs(output: torch.Tensor, sequences: torch.LongTensor, num_actions: int) -> torch.Tensor:\n \"\"\"Calculate action log probs.\n\n Args:\n output (torch.Tensor): Output tensor of Actor.forward.\n sequences (torch.LongTensor): Input sequences.\n num_actions (int): Number of actions.\n\n Returns:\n torch.Tensor: Action log probs.\n \"\"\"\n logits = output['logits']\n log_probs = log_probs_from_logits(logits[:, :-1, :], sequences[:, 1:])\n return log_probs[:, -num_actions:]\n\n\ndef masked_mean(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1) -> torch.Tensor:\n tensor = tensor * mask\n tensor = tensor.sum(dim=dim)\n mask_sum = mask.sum(dim=dim)\n mean = tensor / (mask_sum + 1e-8)\n return mean\n\n\ndef masked_normalize(tensor: torch.Tensor, mask: torch.Tensor, dim: int = 1, eps: float = 1e-8) -> torch.Tensor:\n tensor = tensor * mask\n mean = masked_mean(tensor, mask, dim=dim)\n mean_centered = tensor - mean\n var = masked_mean(mean_centered**2, mask, dim=dim)\n return mean_centered * var.clamp(min=eps).rsqrt()\n\n\ndef normalize(tensor: torch.Tensor, dim: int = 0, eps: float = 1e-8) -> torch.Tensor:\n mean = tensor.mean(dim)\n mean_centered = tensor - mean\n var = (mean_centered**2).mean(dim)\n norm = mean_centered * var.clamp(min=eps).rsqrt()\n return norm\n\n\ndef convert_to_lora(model: nn.Module,\n input_size: int,\n output_size: int,\n lora_rank: int = 16,\n lora_alpha: int = 1,\n lora_dropout: float = 0.,\n fan_in_fan_out: bool = False,\n merge_weights: bool = True):\n if lora_rank > min(input_size, output_size):\n raise ValueError(f\"LoRA rank {lora_rank} must be less or equal than {min(input_size, output_size)}\")\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module._modules[name] = lora.Linear(input_size,\n output_size,\n r=lora_rank,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n fan_in_fan_out=fan_in_fan_out,\n merge_weights=merge_weights)\n", "path": "applications/Chat/coati/models/utils.py"}]} | 1,954 | 140 |
gh_patches_debug_27561 | rasdani/github-patches | git_diff | huggingface__transformers-11746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)
the command to reproduce:
cd huggingface-transformers/examples/pytorch/question-answering
python -m torch.distributed.launch --nproc_per_node=8 ./run_qa.py \
--model_name_or_path roberta-large \
--dataset_name squad \
--do_train --do_eval \
--per_device_train_batch_size 16 \
--per_device_eval_batch_size 256 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir test_result2/$trials --overwrite_output_dir \
--logging_dir test_result2/$trials/tensorboard --logging_first_step --logging_steps 50 \
--fp16
i tried add "--max_eval_samples 10240", this will fix the error, while the AUC result is quite low(exact_match = 4.9414, f1 = 8.9784). and when i ran with 1gpu, the above command can succeed(exact_match = 88.5336, f1 = 94.3266)
the full error is "File "./transformers/src/transformers/trainer_pt_utils.py", line 410, in _nested_set_tensors
i * slice_len : (i + 1) * slice_len
i * slice_len : (i + 1) * slice_len
ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/pytorch/question-answering/trainer_qa.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2020 The HuggingFace Team All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """
16 A subclass of `Trainer` specific to Question-Answering tasks
17 """
18
19 from transformers import Trainer, is_torch_tpu_available
20 from transformers.trainer_utils import PredictionOutput
21
22
23 if is_torch_tpu_available():
24 import torch_xla.core.xla_model as xm
25 import torch_xla.debug.metrics as met
26
27
28 class QuestionAnsweringTrainer(Trainer):
29 def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
30 super().__init__(*args, **kwargs)
31 self.eval_examples = eval_examples
32 self.post_process_function = post_process_function
33
34 def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):
35 eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
36 eval_dataloader = self.get_eval_dataloader(eval_dataset)
37 eval_examples = self.eval_examples if eval_examples is None else eval_examples
38
39 # Temporarily disable metric computation, we will do it in the loop here.
40 compute_metrics = self.compute_metrics
41 self.compute_metrics = None
42 try:
43 output = self.prediction_loop(
44 eval_dataloader,
45 description="Evaluation",
46 # No point gathering the predictions if there are no metrics, otherwise we defer to
47 # self.args.prediction_loss_only
48 prediction_loss_only=True if compute_metrics is None else None,
49 ignore_keys=ignore_keys,
50 )
51 finally:
52 self.compute_metrics = compute_metrics
53
54 if self.post_process_function is not None and self.compute_metrics is not None:
55 eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
56 metrics = self.compute_metrics(eval_preds)
57
58 self.log(metrics)
59 else:
60 metrics = {}
61
62 if self.args.tpu_metrics_debug or self.args.debug:
63 # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
64 xm.master_print(met.metrics_report())
65
66 self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
67 return metrics
68
69 def predict(self, predict_dataset, predict_examples, ignore_keys=None):
70 predict_dataloader = self.get_test_dataloader(predict_dataset)
71
72 # Temporarily disable metric computation, we will do it in the loop here.
73 compute_metrics = self.compute_metrics
74 self.compute_metrics = None
75 try:
76 output = self.prediction_loop(
77 predict_dataloader,
78 description="Prediction",
79 # No point gathering the predictions if there are no metrics, otherwise we defer to
80 # self.args.prediction_loss_only
81 prediction_loss_only=True if compute_metrics is None else None,
82 ignore_keys=ignore_keys,
83 )
84 finally:
85 self.compute_metrics = compute_metrics
86
87 if self.post_process_function is None or self.compute_metrics is None:
88 return output
89
90 predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, "predict")
91 metrics = self.compute_metrics(predictions)
92
93 return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/pytorch/question-answering/trainer_qa.py b/examples/pytorch/question-answering/trainer_qa.py
--- a/examples/pytorch/question-answering/trainer_qa.py
+++ b/examples/pytorch/question-answering/trainer_qa.py
@@ -39,8 +39,9 @@
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
- output = self.prediction_loop(
+ output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
@@ -72,8 +73,9 @@
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
- output = self.prediction_loop(
+ output = eval_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
| {"golden_diff": "diff --git a/examples/pytorch/question-answering/trainer_qa.py b/examples/pytorch/question-answering/trainer_qa.py\n--- a/examples/pytorch/question-answering/trainer_qa.py\n+++ b/examples/pytorch/question-answering/trainer_qa.py\n@@ -39,8 +39,9 @@\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n- output = self.prediction_loop(\n+ output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n@@ -72,8 +73,9 @@\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n+ eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n- output = self.prediction_loop(\n+ output = eval_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n", "issue": "ValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)\nthe command to reproduce:\r\ncd huggingface-transformers/examples/pytorch/question-answering\r\npython -m torch.distributed.launch --nproc_per_node=8 ./run_qa.py \\\r\n\t\t --model_name_or_path roberta-large \\\r\n\t\t --dataset_name squad \\\r\n\t\t --do_train --do_eval \\\r\n\t\t --per_device_train_batch_size 16 \\\r\n\t\t --per_device_eval_batch_size 256 \\\r\n\t\t --learning_rate 3e-5 \\\r\n\t\t --num_train_epochs 2 \\\r\n\t\t --max_seq_length 384 \\\r\n\t\t --doc_stride 128 \\\r\n\t\t --output_dir test_result2/$trials --overwrite_output_dir \\\r\n\t\t --logging_dir test_result2/$trials/tensorboard --logging_first_step --logging_steps 50 \\\r\n --fp16\r\n\r\n\r\n\r\ni tried add \"--max_eval_samples 10240\", this will fix the error, while the AUC result is quite low(exact_match = 4.9414, f1 = 8.9784). and when i ran with 1gpu, the above command can succeed(exact_match = 88.5336, f1 = 94.3266)\r\n\r\n\r\nthe full error is \"File \"./transformers/src/transformers/trainer_pt_utils.py\", line 410, in _nested_set_tensors\r\n i * slice_len : (i + 1) * slice_len\r\n i * slice_len : (i + 1) * slice_len\r\nValueError: could not broadcast input array from shape (2816,384) into shape (2698,384)\"\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA subclass of `Trainer` specific to Question-Answering tasks\n\"\"\"\n\nfrom transformers import Trainer, is_torch_tpu_available\nfrom transformers.trainer_utils import PredictionOutput\n\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n\n\nclass QuestionAnsweringTrainer(Trainer):\n def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.eval_examples = eval_examples\n self.post_process_function = post_process_function\n\n def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):\n eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n eval_examples = self.eval_examples if eval_examples is None else eval_examples\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n try:\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is not None and self.compute_metrics is not None:\n eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)\n metrics = self.compute_metrics(eval_preds)\n\n self.log(metrics)\n else:\n metrics = {}\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n return metrics\n\n def predict(self, predict_dataset, predict_examples, ignore_keys=None):\n predict_dataloader = self.get_test_dataloader(predict_dataset)\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n try:\n output = self.prediction_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is None or self.compute_metrics is None:\n return output\n\n predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, \"predict\")\n metrics = self.compute_metrics(predictions)\n\n return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)\n", "path": "examples/pytorch/question-answering/trainer_qa.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2020 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nA subclass of `Trainer` specific to Question-Answering tasks\n\"\"\"\n\nfrom transformers import Trainer, is_torch_tpu_available\nfrom transformers.trainer_utils import PredictionOutput\n\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n\n\nclass QuestionAnsweringTrainer(Trainer):\n def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.eval_examples = eval_examples\n self.post_process_function = post_process_function\n\n def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None):\n eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n eval_examples = self.eval_examples if eval_examples is None else eval_examples\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n output = eval_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is not None and self.compute_metrics is not None:\n eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)\n metrics = self.compute_metrics(eval_preds)\n\n self.log(metrics)\n else:\n metrics = {}\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)\n return metrics\n\n def predict(self, predict_dataset, predict_examples, ignore_keys=None):\n predict_dataloader = self.get_test_dataloader(predict_dataset)\n\n # Temporarily disable metric computation, we will do it in the loop here.\n compute_metrics = self.compute_metrics\n self.compute_metrics = None\n eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop\n try:\n output = eval_loop(\n predict_dataloader,\n description=\"Prediction\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if compute_metrics is None else None,\n ignore_keys=ignore_keys,\n )\n finally:\n self.compute_metrics = compute_metrics\n\n if self.post_process_function is None or self.compute_metrics is None:\n return output\n\n predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, \"predict\")\n metrics = self.compute_metrics(predictions)\n\n return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)\n", "path": "examples/pytorch/question-answering/trainer_qa.py"}]} | 1,635 | 290 |
gh_patches_debug_16666 | rasdani/github-patches | git_diff | SeldonIO__MLServer-613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compress requests / responses
Leverage `gzip` to compress requests / responses.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/rest/app.py`
Content:
```
1 from typing import Callable
2 from fastapi import FastAPI
3 from fastapi.responses import Response as FastAPIResponse
4 from fastapi.routing import APIRoute as FastAPIRoute
5 from fastapi.middleware.cors import CORSMiddleware
6 from starlette_exporter import PrometheusMiddleware
7
8 from .endpoints import Endpoints, ModelRepositoryEndpoints
9 from .requests import Request
10 from .responses import Response
11 from .errors import _EXCEPTION_HANDLERS
12
13 from ..settings import Settings
14 from ..handlers import DataPlane, ModelRepositoryHandlers
15
16
17 class APIRoute(FastAPIRoute):
18 """
19 Custom route to use our own Request handler.
20 """
21
22 def get_route_handler(self) -> Callable:
23 original_route_handler = super().get_route_handler()
24
25 async def custom_route_handler(request: Request) -> FastAPIResponse:
26 request = Request(request.scope, request.receive)
27 return await original_route_handler(request)
28
29 return custom_route_handler
30
31
32 def create_app(
33 settings: Settings,
34 data_plane: DataPlane,
35 model_repository_handlers: ModelRepositoryHandlers,
36 ) -> FastAPI:
37 endpoints = Endpoints(data_plane)
38 model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)
39
40 routes = [
41 # Model ready
42 APIRoute(
43 "/v2/models/{model_name}/ready",
44 endpoints.model_ready,
45 ),
46 APIRoute(
47 "/v2/models/{model_name}/versions/{model_version}/ready",
48 endpoints.model_ready,
49 ),
50 # Model infer
51 APIRoute(
52 "/v2/models/{model_name}/infer",
53 endpoints.infer,
54 methods=["POST"],
55 ),
56 APIRoute(
57 "/v2/models/{model_name}/versions/{model_version}/infer",
58 endpoints.infer,
59 methods=["POST"],
60 ),
61 # Model metadata
62 APIRoute(
63 "/v2/models/{model_name}",
64 endpoints.model_metadata,
65 ),
66 APIRoute(
67 "/v2/models/{model_name}/versions/{model_version}",
68 endpoints.model_metadata,
69 ),
70 # Liveness and readiness
71 APIRoute("/v2/health/live", endpoints.live),
72 APIRoute("/v2/health/ready", endpoints.ready),
73 # Server metadata
74 APIRoute(
75 "/v2",
76 endpoints.metadata,
77 ),
78 ]
79
80 routes += [
81 # Model Repository API
82 APIRoute(
83 "/v2/repository/index",
84 model_repository_endpoints.index,
85 methods=["POST"],
86 ),
87 APIRoute(
88 "/v2/repository/models/{model_name}/load",
89 model_repository_endpoints.load,
90 methods=["POST"],
91 ),
92 APIRoute(
93 "/v2/repository/models/{model_name}/unload",
94 model_repository_endpoints.unload,
95 methods=["POST"],
96 ),
97 ]
98
99 app = FastAPI(
100 debug=settings.debug,
101 routes=routes, # type: ignore
102 default_response_class=Response,
103 exception_handlers=_EXCEPTION_HANDLERS, # type: ignore
104 )
105
106 if settings.cors_settings is not None:
107 app.add_middleware(
108 CORSMiddleware,
109 allow_origins=settings.cors_settings.allow_origins,
110 allow_origin_regex=settings.cors_settings.allow_origin_regex,
111 allow_credentials=settings.cors_settings.allow_credentials,
112 allow_methods=settings.cors_settings.allow_methods,
113 allow_headers=settings.cors_settings.allow_headers,
114 max_age=settings.cors_settings.max_age,
115 )
116
117 if settings.metrics_endpoint:
118 app.add_middleware(
119 PrometheusMiddleware,
120 app_name="mlserver",
121 prefix="rest_server",
122 # TODO: Should we also exclude model's health endpoints?
123 skip_paths=[
124 settings.metrics_endpoint,
125 "/v2/health/live",
126 "/v2/health/ready",
127 ],
128 )
129
130 return app
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/rest/app.py b/mlserver/rest/app.py
--- a/mlserver/rest/app.py
+++ b/mlserver/rest/app.py
@@ -3,6 +3,7 @@
from fastapi.responses import Response as FastAPIResponse
from fastapi.routing import APIRoute as FastAPIRoute
from fastapi.middleware.cors import CORSMiddleware
+from fastapi.middleware.gzip import GZipMiddleware
from starlette_exporter import PrometheusMiddleware
from .endpoints import Endpoints, ModelRepositoryEndpoints
@@ -103,6 +104,7 @@
exception_handlers=_EXCEPTION_HANDLERS, # type: ignore
)
+ app.add_middleware(GZipMiddleware)
if settings.cors_settings is not None:
app.add_middleware(
CORSMiddleware,
| {"golden_diff": "diff --git a/mlserver/rest/app.py b/mlserver/rest/app.py\n--- a/mlserver/rest/app.py\n+++ b/mlserver/rest/app.py\n@@ -3,6 +3,7 @@\n from fastapi.responses import Response as FastAPIResponse\n from fastapi.routing import APIRoute as FastAPIRoute\n from fastapi.middleware.cors import CORSMiddleware\n+from fastapi.middleware.gzip import GZipMiddleware\n from starlette_exporter import PrometheusMiddleware\n \n from .endpoints import Endpoints, ModelRepositoryEndpoints\n@@ -103,6 +104,7 @@\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n \n+ app.add_middleware(GZipMiddleware)\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n", "issue": "Compress requests / responses\nLeverage `gzip` to compress requests / responses.\n", "before_files": [{"content": "from typing import Callable\nfrom fastapi import FastAPI\nfrom fastapi.responses import Response as FastAPIResponse\nfrom fastapi.routing import APIRoute as FastAPIRoute\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom starlette_exporter import PrometheusMiddleware\n\nfrom .endpoints import Endpoints, ModelRepositoryEndpoints\nfrom .requests import Request\nfrom .responses import Response\nfrom .errors import _EXCEPTION_HANDLERS\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\n\n\nclass APIRoute(FastAPIRoute):\n \"\"\"\n Custom route to use our own Request handler.\n \"\"\"\n\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def custom_route_handler(request: Request) -> FastAPIResponse:\n request = Request(request.scope, request.receive)\n return await original_route_handler(request)\n\n return custom_route_handler\n\n\ndef create_app(\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n) -> FastAPI:\n endpoints = Endpoints(data_plane)\n model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)\n\n routes = [\n # Model ready\n APIRoute(\n \"/v2/models/{model_name}/ready\",\n endpoints.model_ready,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/ready\",\n endpoints.model_ready,\n ),\n # Model infer\n APIRoute(\n \"/v2/models/{model_name}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n # Model metadata\n APIRoute(\n \"/v2/models/{model_name}\",\n endpoints.model_metadata,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}\",\n endpoints.model_metadata,\n ),\n # Liveness and readiness\n APIRoute(\"/v2/health/live\", endpoints.live),\n APIRoute(\"/v2/health/ready\", endpoints.ready),\n # Server metadata\n APIRoute(\n \"/v2\",\n endpoints.metadata,\n ),\n ]\n\n routes += [\n # Model Repository API\n APIRoute(\n \"/v2/repository/index\",\n model_repository_endpoints.index,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/load\",\n model_repository_endpoints.load,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/unload\",\n model_repository_endpoints.unload,\n methods=[\"POST\"],\n ),\n ]\n\n app = FastAPI(\n debug=settings.debug,\n routes=routes, # type: ignore\n default_response_class=Response,\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=settings.cors_settings.allow_origins,\n allow_origin_regex=settings.cors_settings.allow_origin_regex,\n allow_credentials=settings.cors_settings.allow_credentials,\n allow_methods=settings.cors_settings.allow_methods,\n allow_headers=settings.cors_settings.allow_headers,\n max_age=settings.cors_settings.max_age,\n )\n\n if settings.metrics_endpoint:\n app.add_middleware(\n PrometheusMiddleware,\n app_name=\"mlserver\",\n prefix=\"rest_server\",\n # TODO: Should we also exclude model's health endpoints?\n skip_paths=[\n settings.metrics_endpoint,\n \"/v2/health/live\",\n \"/v2/health/ready\",\n ],\n )\n\n return app\n", "path": "mlserver/rest/app.py"}], "after_files": [{"content": "from typing import Callable\nfrom fastapi import FastAPI\nfrom fastapi.responses import Response as FastAPIResponse\nfrom fastapi.routing import APIRoute as FastAPIRoute\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.middleware.gzip import GZipMiddleware\nfrom starlette_exporter import PrometheusMiddleware\n\nfrom .endpoints import Endpoints, ModelRepositoryEndpoints\nfrom .requests import Request\nfrom .responses import Response\nfrom .errors import _EXCEPTION_HANDLERS\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers\n\n\nclass APIRoute(FastAPIRoute):\n \"\"\"\n Custom route to use our own Request handler.\n \"\"\"\n\n def get_route_handler(self) -> Callable:\n original_route_handler = super().get_route_handler()\n\n async def custom_route_handler(request: Request) -> FastAPIResponse:\n request = Request(request.scope, request.receive)\n return await original_route_handler(request)\n\n return custom_route_handler\n\n\ndef create_app(\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n) -> FastAPI:\n endpoints = Endpoints(data_plane)\n model_repository_endpoints = ModelRepositoryEndpoints(model_repository_handlers)\n\n routes = [\n # Model ready\n APIRoute(\n \"/v2/models/{model_name}/ready\",\n endpoints.model_ready,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/ready\",\n endpoints.model_ready,\n ),\n # Model infer\n APIRoute(\n \"/v2/models/{model_name}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}/infer\",\n endpoints.infer,\n methods=[\"POST\"],\n ),\n # Model metadata\n APIRoute(\n \"/v2/models/{model_name}\",\n endpoints.model_metadata,\n ),\n APIRoute(\n \"/v2/models/{model_name}/versions/{model_version}\",\n endpoints.model_metadata,\n ),\n # Liveness and readiness\n APIRoute(\"/v2/health/live\", endpoints.live),\n APIRoute(\"/v2/health/ready\", endpoints.ready),\n # Server metadata\n APIRoute(\n \"/v2\",\n endpoints.metadata,\n ),\n ]\n\n routes += [\n # Model Repository API\n APIRoute(\n \"/v2/repository/index\",\n model_repository_endpoints.index,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/load\",\n model_repository_endpoints.load,\n methods=[\"POST\"],\n ),\n APIRoute(\n \"/v2/repository/models/{model_name}/unload\",\n model_repository_endpoints.unload,\n methods=[\"POST\"],\n ),\n ]\n\n app = FastAPI(\n debug=settings.debug,\n routes=routes, # type: ignore\n default_response_class=Response,\n exception_handlers=_EXCEPTION_HANDLERS, # type: ignore\n )\n\n app.add_middleware(GZipMiddleware)\n if settings.cors_settings is not None:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=settings.cors_settings.allow_origins,\n allow_origin_regex=settings.cors_settings.allow_origin_regex,\n allow_credentials=settings.cors_settings.allow_credentials,\n allow_methods=settings.cors_settings.allow_methods,\n allow_headers=settings.cors_settings.allow_headers,\n max_age=settings.cors_settings.max_age,\n )\n\n if settings.metrics_endpoint:\n app.add_middleware(\n PrometheusMiddleware,\n app_name=\"mlserver\",\n prefix=\"rest_server\",\n # TODO: Should we also exclude model's health endpoints?\n skip_paths=[\n settings.metrics_endpoint,\n \"/v2/health/live\",\n \"/v2/health/ready\",\n ],\n )\n\n return app\n", "path": "mlserver/rest/app.py"}]} | 1,353 | 173 |
gh_patches_debug_26969 | rasdani/github-patches | git_diff | conda__conda-707 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ability to keep retrying with a lock error
The yum installer (IIRC) has a nice feature that it will keep trying every 10 seconds or so if there is a lock error. This could be useful for conda.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/lock.py`
Content:
```
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6
7 """
8 Tools for working with locks
9
10 A lock is just an empty directory. We use directories because this lets us use
11 the race condition-proof os.makedirs.
12
13 For now, there is one global lock for all of conda, because some things happen
14 globally (such as downloading packages).
15
16 We don't raise an error if the lock is named with the current PID
17 """
18
19 import os
20 from os.path import join
21 import glob
22
23
24 LOCKFN = '.conda_lock'
25
26
27 class Locked(object):
28 """
29 Context manager to handle locks.
30 """
31 def __init__(self, path):
32 self.path = path
33 self.end = "-" + str(os.getpid())
34 self.lock_path = join(self.path, LOCKFN + self.end)
35 self.pattern = join(self.path, LOCKFN + '-*')
36 self.remove = True
37
38 def __enter__(self):
39 files = glob.glob(self.pattern)
40 if files and not files[0].endswith(self.end):
41 # Keep the string "LOCKERROR" in this string so that external
42 # programs can look for it.
43 raise RuntimeError("""\
44 LOCKERROR: It looks like conda is already doing something.
45 The lock %s was found. Wait for it to finish before continuing.
46 If you are sure that conda is not running, remove it and try again.
47 You can also use: $ conda clean --lock""" % self.lock_path)
48
49 if not files:
50 try:
51 os.makedirs(self.lock_path)
52 except OSError:
53 pass
54 else: # PID lock already here --- someone else will remove it.
55 self.remove = False
56
57 def __exit__(self, exc_type, exc_value, traceback):
58 if self.remove:
59 for path in self.lock_path, self.path:
60 try:
61 os.rmdir(path)
62 except OSError:
63 pass
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/lock.py b/conda/lock.py
--- a/conda/lock.py
+++ b/conda/lock.py
@@ -19,7 +19,7 @@
import os
from os.path import join
import glob
-
+from time import sleep
LOCKFN = '.conda_lock'
@@ -36,15 +36,28 @@
self.remove = True
def __enter__(self):
- files = glob.glob(self.pattern)
- if files and not files[0].endswith(self.end):
- # Keep the string "LOCKERROR" in this string so that external
- # programs can look for it.
- raise RuntimeError("""\
-LOCKERROR: It looks like conda is already doing something.
-The lock %s was found. Wait for it to finish before continuing.
-If you are sure that conda is not running, remove it and try again.
-You can also use: $ conda clean --lock""" % self.lock_path)
+ retries = 10
+ # Keep the string "LOCKERROR" in this string so that external
+ # programs can look for it.
+ lockstr = ("""\
+ LOCKERROR: It looks like conda is already doing something.
+ The lock %s was found. Wait for it to finish before continuing.
+ If you are sure that conda is not running, remove it and try again.
+ You can also use: $ conda clean --lock""" % self.lock_path)
+ sleeptime = 1
+ while retries:
+ files = glob.glob(self.pattern)
+ if files and not files[0].endswith(self.end):
+ print(lockstr)
+ print("Sleeping for %s seconds" % sleeptime)
+ sleep(sleeptime)
+ sleeptime *= 2
+ retries -= 1
+ else:
+ break
+ else:
+ print("Exceeded max retries, giving up")
+ raise RuntimeError(lockstr)
if not files:
try:
| {"golden_diff": "diff --git a/conda/lock.py b/conda/lock.py\n--- a/conda/lock.py\n+++ b/conda/lock.py\n@@ -19,7 +19,7 @@\n import os\n from os.path import join\n import glob\n-\n+from time import sleep\n \n LOCKFN = '.conda_lock'\n \n@@ -36,15 +36,28 @@\n self.remove = True\n \n def __enter__(self):\n- files = glob.glob(self.pattern)\n- if files and not files[0].endswith(self.end):\n- # Keep the string \"LOCKERROR\" in this string so that external\n- # programs can look for it.\n- raise RuntimeError(\"\"\"\\\n-LOCKERROR: It looks like conda is already doing something.\n-The lock %s was found. Wait for it to finish before continuing.\n-If you are sure that conda is not running, remove it and try again.\n-You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n+ retries = 10\n+ # Keep the string \"LOCKERROR\" in this string so that external\n+ # programs can look for it.\n+ lockstr = (\"\"\"\\\n+ LOCKERROR: It looks like conda is already doing something.\n+ The lock %s was found. Wait for it to finish before continuing.\n+ If you are sure that conda is not running, remove it and try again.\n+ You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n+ sleeptime = 1\n+ while retries:\n+ files = glob.glob(self.pattern)\n+ if files and not files[0].endswith(self.end):\n+ print(lockstr)\n+ print(\"Sleeping for %s seconds\" % sleeptime)\n+ sleep(sleeptime)\n+ sleeptime *= 2\n+ retries -= 1\n+ else:\n+ break\n+ else:\n+ print(\"Exceeded max retries, giving up\")\n+ raise RuntimeError(lockstr)\n \n if not files:\n try:\n", "issue": "Add ability to keep retrying with a lock error\nThe yum installer (IIRC) has a nice feature that it will keep trying every 10 seconds or so if there is a lock error. This could be useful for conda. \n\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\n\nimport os\nfrom os.path import join\nimport glob\n\n\nLOCKFN = '.conda_lock'\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = join(self.path, LOCKFN + self.end)\n self.pattern = join(self.path, LOCKFN + '-*')\n self.remove = True\n\n def __enter__(self):\n files = glob.glob(self.pattern)\n if files and not files[0].endswith(self.end):\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n raise RuntimeError(\"\"\"\\\nLOCKERROR: It looks like conda is already doing something.\nThe lock %s was found. Wait for it to finish before continuing.\nIf you are sure that conda is not running, remove it and try again.\nYou can also use: $ conda clean --lock\"\"\" % self.lock_path)\n\n if not files:\n try:\n os.makedirs(self.lock_path)\n except OSError:\n pass\n else: # PID lock already here --- someone else will remove it.\n self.remove = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.remove:\n for path in self.lock_path, self.path:\n try:\n os.rmdir(path)\n except OSError:\n pass\n", "path": "conda/lock.py"}], "after_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\n\"\"\"\nTools for working with locks\n\nA lock is just an empty directory. We use directories because this lets us use\nthe race condition-proof os.makedirs.\n\nFor now, there is one global lock for all of conda, because some things happen\nglobally (such as downloading packages).\n\nWe don't raise an error if the lock is named with the current PID\n\"\"\"\n\nimport os\nfrom os.path import join\nimport glob\nfrom time import sleep\n\nLOCKFN = '.conda_lock'\n\n\nclass Locked(object):\n \"\"\"\n Context manager to handle locks.\n \"\"\"\n def __init__(self, path):\n self.path = path\n self.end = \"-\" + str(os.getpid())\n self.lock_path = join(self.path, LOCKFN + self.end)\n self.pattern = join(self.path, LOCKFN + '-*')\n self.remove = True\n\n def __enter__(self):\n retries = 10\n # Keep the string \"LOCKERROR\" in this string so that external\n # programs can look for it.\n lockstr = (\"\"\"\\\n LOCKERROR: It looks like conda is already doing something.\n The lock %s was found. Wait for it to finish before continuing.\n If you are sure that conda is not running, remove it and try again.\n You can also use: $ conda clean --lock\"\"\" % self.lock_path)\n sleeptime = 1\n while retries:\n files = glob.glob(self.pattern)\n if files and not files[0].endswith(self.end):\n print(lockstr)\n print(\"Sleeping for %s seconds\" % sleeptime)\n sleep(sleeptime)\n sleeptime *= 2\n retries -= 1\n else:\n break\n else:\n print(\"Exceeded max retries, giving up\")\n raise RuntimeError(lockstr)\n\n if not files:\n try:\n os.makedirs(self.lock_path)\n except OSError:\n pass\n else: # PID lock already here --- someone else will remove it.\n self.remove = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n if self.remove:\n for path in self.lock_path, self.path:\n try:\n os.rmdir(path)\n except OSError:\n pass\n", "path": "conda/lock.py"}]} | 884 | 451 |
gh_patches_debug_58136 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-4730 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
No "moderation tasks" filter in participatory budget (one phase)
**URL:** https://meinberlin-dev.liqd.net/projekte/module/burgerhaushalt/?mode=list (list view)
or https://meinberlin-dev.liqd.net/dashboard/projects/burgerhaushalt-spandau/basic/ (dashboard)
**user:** Moderator, Admin
**expected behaviour:** When using participatory budget with one phase i want to be able to set up moderation tasks for the discussion of ideas and want to filter ideas with an filter "open moderationtasks"
**behaviour:** There is no filter "moderation tasks" in the list view of ideas in participatory budget (one phase) nor is there the possibility to create moderation tasks in the dashboard of the project
**important screensize:** no
**device & browser:** Mac/Windows Chrome, Edge Firefox, Iphone, Samsung Galaxy 20
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/moderationtasks/dashboard.py`
Content:
```
1 from django.utils.translation import gettext_lazy as _
2
3 from adhocracy4.dashboard import ModuleFormSetComponent
4 from adhocracy4.dashboard import components
5
6 from . import forms
7
8
9 class ModerationTasksComponent(ModuleFormSetComponent):
10 identifier = 'moderation_tasks'
11 weight = 15
12 label = _('Moderation Tasks')
13
14 form_title = _('Edit moderation tasks')
15 form_class = forms.ModerationTasksFormSet
16 form_template_name = \
17 'meinberlin_moderationtasks/moderation_tasks_form.html'
18
19 def is_effective(self, module):
20 return module.blueprint_type in ['PB1', 'PB2', 'PB3']
21
22
23 components.register_module(ModerationTasksComponent())
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/moderationtasks/dashboard.py b/meinberlin/apps/moderationtasks/dashboard.py
--- a/meinberlin/apps/moderationtasks/dashboard.py
+++ b/meinberlin/apps/moderationtasks/dashboard.py
@@ -17,7 +17,7 @@
'meinberlin_moderationtasks/moderation_tasks_form.html'
def is_effective(self, module):
- return module.blueprint_type in ['PB1', 'PB2', 'PB3']
+ return module.blueprint_type in ['PB', 'PB2', 'PB3']
components.register_module(ModerationTasksComponent())
| {"golden_diff": "diff --git a/meinberlin/apps/moderationtasks/dashboard.py b/meinberlin/apps/moderationtasks/dashboard.py\n--- a/meinberlin/apps/moderationtasks/dashboard.py\n+++ b/meinberlin/apps/moderationtasks/dashboard.py\n@@ -17,7 +17,7 @@\n 'meinberlin_moderationtasks/moderation_tasks_form.html'\n \n def is_effective(self, module):\n- return module.blueprint_type in ['PB1', 'PB2', 'PB3']\n+ return module.blueprint_type in ['PB', 'PB2', 'PB3']\n \n \n components.register_module(ModerationTasksComponent())\n", "issue": "No \"moderation tasks\" filter in participatory budget (one phase)\n**URL:** https://meinberlin-dev.liqd.net/projekte/module/burgerhaushalt/?mode=list (list view)\r\nor https://meinberlin-dev.liqd.net/dashboard/projects/burgerhaushalt-spandau/basic/ (dashboard)\r\n**user:** Moderator, Admin\r\n**expected behaviour:** When using participatory budget with one phase i want to be able to set up moderation tasks for the discussion of ideas and want to filter ideas with an filter \"open moderationtasks\"\r\n**behaviour:** There is no filter \"moderation tasks\" in the list view of ideas in participatory budget (one phase) nor is there the possibility to create moderation tasks in the dashboard of the project\r\n**important screensize:** no\r\n**device & browser:** Mac/Windows Chrome, Edge Firefox, Iphone, Samsung Galaxy 20\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import ModuleFormSetComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import forms\n\n\nclass ModerationTasksComponent(ModuleFormSetComponent):\n identifier = 'moderation_tasks'\n weight = 15\n label = _('Moderation Tasks')\n\n form_title = _('Edit moderation tasks')\n form_class = forms.ModerationTasksFormSet\n form_template_name = \\\n 'meinberlin_moderationtasks/moderation_tasks_form.html'\n\n def is_effective(self, module):\n return module.blueprint_type in ['PB1', 'PB2', 'PB3']\n\n\ncomponents.register_module(ModerationTasksComponent())\n", "path": "meinberlin/apps/moderationtasks/dashboard.py"}], "after_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import ModuleFormSetComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import forms\n\n\nclass ModerationTasksComponent(ModuleFormSetComponent):\n identifier = 'moderation_tasks'\n weight = 15\n label = _('Moderation Tasks')\n\n form_title = _('Edit moderation tasks')\n form_class = forms.ModerationTasksFormSet\n form_template_name = \\\n 'meinberlin_moderationtasks/moderation_tasks_form.html'\n\n def is_effective(self, module):\n return module.blueprint_type in ['PB', 'PB2', 'PB3']\n\n\ncomponents.register_module(ModerationTasksComponent())\n", "path": "meinberlin/apps/moderationtasks/dashboard.py"}]} | 650 | 139 |
gh_patches_debug_30055 | rasdani/github-patches | git_diff | pytorch__torchdynamo-193 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make torchdynamo not import third party package in `skipfiles.py`
@xuzhao9 in https://github.com/facebookresearch/torchdynamo/issues/107#issuecomment-1095681515 found that the following line makes alexnet 18% slower:
https://github.com/jansel/torchdynamo/blob/bf90b8cdbacf35944fa8c12185b1823dc5cb90bb/torchdynamo/skipfiles.py#L123
It seems importing: "networkx", "omegaconf", "onnx", "pandas", and "sklearn" cause performance issues.
TorchDynamo is only importing these modules to find the filename, which is also a bit wasteful. We should rewrite `skipfiles.py` to use [find_spec](https://docs.python.org/3/library/importlib.html#importlib.abc.PathEntryFinder.find_spec) instead, so we don't need to import unused packages.
Also, I think we can cut down the list of modules in skipfiles dramatically. Most of those were added when TorchDynamo didn't automatically skip backends and supported much less of python, so likely many (most?) can be removed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchdynamo/skipfiles.py`
Content:
```
1 import abc
2 import collections
3 import contextlib
4 import copy
5 import copyreg
6 import dataclasses
7 import enum
8 import functools
9 import importlib
10 import inspect
11 import linecache
12 import logging
13 import multiprocessing
14 import operator
15 import os
16 import posixpath
17 import random
18 import re
19 import selectors
20 import signal
21 import tempfile
22 import threading
23 import tokenize
24 import traceback
25 import types
26 import typing
27 import unittest
28 import weakref
29
30 import _collections_abc
31 import _weakrefset
32 import torch
33
34
35 def _module_dir(m: types.ModuleType):
36 return re.sub(r"__init__.py$", "", m.__file__)
37
38
39 SKIP_DIRS = [
40 # torch.*
41 _module_dir(torch),
42 # torchdynamo.*
43 os.path.dirname(__file__) + "/",
44 "<frozen importlib",
45 "<__array_function__ internals>",
46 ] + [
47 # skip some standard libs
48 _module_dir(m)
49 for m in (
50 abc,
51 collections,
52 contextlib,
53 copy,
54 copyreg,
55 dataclasses,
56 enum,
57 functools,
58 importlib,
59 inspect,
60 linecache,
61 logging,
62 multiprocessing,
63 operator,
64 os,
65 posixpath,
66 random,
67 re,
68 selectors,
69 signal,
70 tempfile,
71 threading,
72 tokenize,
73 traceback,
74 types,
75 typing,
76 unittest,
77 weakref,
78 _collections_abc,
79 _weakrefset,
80 )
81 ]
82 SKIP_DIRS_RE = None # set in add() below
83 FILENAME_ALLOWLIST = {
84 torch.nn.Sequential.__init__.__code__.co_filename,
85 }
86
87
88 def add(module: types.ModuleType):
89 assert isinstance(module, types.ModuleType)
90 global SKIP_DIRS_RE
91 name = module.__file__
92 if name is None:
93 return
94 SKIP_DIRS.append(_module_dir(module))
95 SKIP_DIRS_RE = re.compile(f"^({'|'.join(map(re.escape, SKIP_DIRS))})")
96
97
98 def check(filename, allow_torch=False):
99 """Should skip this file?"""
100 if filename is None:
101 return True
102 if filename in FILENAME_ALLOWLIST:
103 return False
104 if allow_torch and is_torch(filename):
105 return False
106 return bool(SKIP_DIRS_RE.match(filename))
107
108
109 # skip common third party libs
110 for _name in (
111 "functorch",
112 "intel_extension_for_pytorch",
113 "networkx",
114 "numpy",
115 "omegaconf",
116 "onnx",
117 "onnxruntime",
118 "onnx_tf",
119 "pandas",
120 "sklearn",
121 "tabulate",
122 "tensorflow",
123 "tensorrt",
124 "torch2trt",
125 "tqdm",
126 "tree",
127 "tvm",
128 "fx2trt_oss",
129 ):
130 try:
131 add(importlib.import_module(_name))
132 except (ImportError, TypeError):
133 pass
134
135
136 def is_torch_inline_allowed(filename):
137 return filename.startswith(_module_dir(torch.nn)) or filename.startswith(
138 _module_dir(torch.distributions)
139 )
140
141
142 def is_torch(filename):
143 return filename.startswith(_module_dir(torch))
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchdynamo/skipfiles.py b/torchdynamo/skipfiles.py
--- a/torchdynamo/skipfiles.py
+++ b/torchdynamo/skipfiles.py
@@ -32,8 +32,12 @@
import torch
+def _strip_init_py(s):
+ return re.sub(r"__init__.py$", "", s)
+
+
def _module_dir(m: types.ModuleType):
- return re.sub(r"__init__.py$", "", m.__file__)
+ return _strip_init_py(m.__file__)
SKIP_DIRS = [
@@ -79,22 +83,32 @@
_weakrefset,
)
]
-SKIP_DIRS_RE = None # set in add() below
FILENAME_ALLOWLIST = {
torch.nn.Sequential.__init__.__code__.co_filename,
}
+SKIP_DIRS_RE = None
-def add(module: types.ModuleType):
- assert isinstance(module, types.ModuleType)
+def _recompile_re():
global SKIP_DIRS_RE
- name = module.__file__
- if name is None:
- return
- SKIP_DIRS.append(_module_dir(module))
SKIP_DIRS_RE = re.compile(f"^({'|'.join(map(re.escape, SKIP_DIRS))})")
+def add(import_name: str):
+ if isinstance(import_name, types.ModuleType):
+ return add(import_name.__name__)
+ assert isinstance(import_name, str)
+ module_spec = importlib.util.find_spec(import_name)
+ if not module_spec:
+ return
+ origin = module_spec.origin
+ if origin is None:
+ return
+ global SKIP_DIRS_RE
+ SKIP_DIRS.append(_strip_init_py(origin))
+ _recompile_re()
+
+
def check(filename, allow_torch=False):
"""Should skip this file?"""
if filename is None:
@@ -127,10 +141,9 @@
"tvm",
"fx2trt_oss",
):
- try:
- add(importlib.import_module(_name))
- except (ImportError, TypeError):
- pass
+ add(_name)
+
+_recompile_re()
def is_torch_inline_allowed(filename):
| {"golden_diff": "diff --git a/torchdynamo/skipfiles.py b/torchdynamo/skipfiles.py\n--- a/torchdynamo/skipfiles.py\n+++ b/torchdynamo/skipfiles.py\n@@ -32,8 +32,12 @@\n import torch\n \n \n+def _strip_init_py(s):\n+ return re.sub(r\"__init__.py$\", \"\", s)\n+\n+\n def _module_dir(m: types.ModuleType):\n- return re.sub(r\"__init__.py$\", \"\", m.__file__)\n+ return _strip_init_py(m.__file__)\n \n \n SKIP_DIRS = [\n@@ -79,22 +83,32 @@\n _weakrefset,\n )\n ]\n-SKIP_DIRS_RE = None # set in add() below\n FILENAME_ALLOWLIST = {\n torch.nn.Sequential.__init__.__code__.co_filename,\n }\n+SKIP_DIRS_RE = None\n \n \n-def add(module: types.ModuleType):\n- assert isinstance(module, types.ModuleType)\n+def _recompile_re():\n global SKIP_DIRS_RE\n- name = module.__file__\n- if name is None:\n- return\n- SKIP_DIRS.append(_module_dir(module))\n SKIP_DIRS_RE = re.compile(f\"^({'|'.join(map(re.escape, SKIP_DIRS))})\")\n \n \n+def add(import_name: str):\n+ if isinstance(import_name, types.ModuleType):\n+ return add(import_name.__name__)\n+ assert isinstance(import_name, str)\n+ module_spec = importlib.util.find_spec(import_name)\n+ if not module_spec:\n+ return\n+ origin = module_spec.origin\n+ if origin is None:\n+ return\n+ global SKIP_DIRS_RE\n+ SKIP_DIRS.append(_strip_init_py(origin))\n+ _recompile_re()\n+\n+\n def check(filename, allow_torch=False):\n \"\"\"Should skip this file?\"\"\"\n if filename is None:\n@@ -127,10 +141,9 @@\n \"tvm\",\n \"fx2trt_oss\",\n ):\n- try:\n- add(importlib.import_module(_name))\n- except (ImportError, TypeError):\n- pass\n+ add(_name)\n+\n+_recompile_re()\n \n \n def is_torch_inline_allowed(filename):\n", "issue": "Make torchdynamo not import third party package in `skipfiles.py`\n@xuzhao9 in https://github.com/facebookresearch/torchdynamo/issues/107#issuecomment-1095681515 found that the following line makes alexnet 18% slower: \r\n\r\nhttps://github.com/jansel/torchdynamo/blob/bf90b8cdbacf35944fa8c12185b1823dc5cb90bb/torchdynamo/skipfiles.py#L123\r\n\r\nIt seems importing: \"networkx\", \"omegaconf\", \"onnx\", \"pandas\", and \"sklearn\" cause performance issues.\r\n\r\nTorchDynamo is only importing these modules to find the filename, which is also a bit wasteful. We should rewrite `skipfiles.py` to use [find_spec](https://docs.python.org/3/library/importlib.html#importlib.abc.PathEntryFinder.find_spec) instead, so we don't need to import unused packages.\r\n\r\nAlso, I think we can cut down the list of modules in skipfiles dramatically. Most of those were added when TorchDynamo didn't automatically skip backends and supported much less of python, so likely many (most?) can be removed.\r\n\n", "before_files": [{"content": "import abc\nimport collections\nimport contextlib\nimport copy\nimport copyreg\nimport dataclasses\nimport enum\nimport functools\nimport importlib\nimport inspect\nimport linecache\nimport logging\nimport multiprocessing\nimport operator\nimport os\nimport posixpath\nimport random\nimport re\nimport selectors\nimport signal\nimport tempfile\nimport threading\nimport tokenize\nimport traceback\nimport types\nimport typing\nimport unittest\nimport weakref\n\nimport _collections_abc\nimport _weakrefset\nimport torch\n\n\ndef _module_dir(m: types.ModuleType):\n return re.sub(r\"__init__.py$\", \"\", m.__file__)\n\n\nSKIP_DIRS = [\n # torch.*\n _module_dir(torch),\n # torchdynamo.*\n os.path.dirname(__file__) + \"/\",\n \"<frozen importlib\",\n \"<__array_function__ internals>\",\n] + [\n # skip some standard libs\n _module_dir(m)\n for m in (\n abc,\n collections,\n contextlib,\n copy,\n copyreg,\n dataclasses,\n enum,\n functools,\n importlib,\n inspect,\n linecache,\n logging,\n multiprocessing,\n operator,\n os,\n posixpath,\n random,\n re,\n selectors,\n signal,\n tempfile,\n threading,\n tokenize,\n traceback,\n types,\n typing,\n unittest,\n weakref,\n _collections_abc,\n _weakrefset,\n )\n]\nSKIP_DIRS_RE = None # set in add() below\nFILENAME_ALLOWLIST = {\n torch.nn.Sequential.__init__.__code__.co_filename,\n}\n\n\ndef add(module: types.ModuleType):\n assert isinstance(module, types.ModuleType)\n global SKIP_DIRS_RE\n name = module.__file__\n if name is None:\n return\n SKIP_DIRS.append(_module_dir(module))\n SKIP_DIRS_RE = re.compile(f\"^({'|'.join(map(re.escape, SKIP_DIRS))})\")\n\n\ndef check(filename, allow_torch=False):\n \"\"\"Should skip this file?\"\"\"\n if filename is None:\n return True\n if filename in FILENAME_ALLOWLIST:\n return False\n if allow_torch and is_torch(filename):\n return False\n return bool(SKIP_DIRS_RE.match(filename))\n\n\n# skip common third party libs\nfor _name in (\n \"functorch\",\n \"intel_extension_for_pytorch\",\n \"networkx\",\n \"numpy\",\n \"omegaconf\",\n \"onnx\",\n \"onnxruntime\",\n \"onnx_tf\",\n \"pandas\",\n \"sklearn\",\n \"tabulate\",\n \"tensorflow\",\n \"tensorrt\",\n \"torch2trt\",\n \"tqdm\",\n \"tree\",\n \"tvm\",\n \"fx2trt_oss\",\n):\n try:\n add(importlib.import_module(_name))\n except (ImportError, TypeError):\n pass\n\n\ndef is_torch_inline_allowed(filename):\n return filename.startswith(_module_dir(torch.nn)) or filename.startswith(\n _module_dir(torch.distributions)\n )\n\n\ndef is_torch(filename):\n return filename.startswith(_module_dir(torch))\n", "path": "torchdynamo/skipfiles.py"}], "after_files": [{"content": "import abc\nimport collections\nimport contextlib\nimport copy\nimport copyreg\nimport dataclasses\nimport enum\nimport functools\nimport importlib\nimport inspect\nimport linecache\nimport logging\nimport multiprocessing\nimport operator\nimport os\nimport posixpath\nimport random\nimport re\nimport selectors\nimport signal\nimport tempfile\nimport threading\nimport tokenize\nimport traceback\nimport types\nimport typing\nimport unittest\nimport weakref\n\nimport _collections_abc\nimport _weakrefset\nimport torch\n\n\ndef _strip_init_py(s):\n return re.sub(r\"__init__.py$\", \"\", s)\n\n\ndef _module_dir(m: types.ModuleType):\n return _strip_init_py(m.__file__)\n\n\nSKIP_DIRS = [\n # torch.*\n _module_dir(torch),\n # torchdynamo.*\n os.path.dirname(__file__) + \"/\",\n \"<frozen importlib\",\n \"<__array_function__ internals>\",\n] + [\n # skip some standard libs\n _module_dir(m)\n for m in (\n abc,\n collections,\n contextlib,\n copy,\n copyreg,\n dataclasses,\n enum,\n functools,\n importlib,\n inspect,\n linecache,\n logging,\n multiprocessing,\n operator,\n os,\n posixpath,\n random,\n re,\n selectors,\n signal,\n tempfile,\n threading,\n tokenize,\n traceback,\n types,\n typing,\n unittest,\n weakref,\n _collections_abc,\n _weakrefset,\n )\n]\nFILENAME_ALLOWLIST = {\n torch.nn.Sequential.__init__.__code__.co_filename,\n}\nSKIP_DIRS_RE = None\n\n\ndef _recompile_re():\n global SKIP_DIRS_RE\n SKIP_DIRS_RE = re.compile(f\"^({'|'.join(map(re.escape, SKIP_DIRS))})\")\n\n\ndef add(import_name: str):\n if isinstance(import_name, types.ModuleType):\n return add(import_name.__name__)\n assert isinstance(import_name, str)\n module_spec = importlib.util.find_spec(import_name)\n if not module_spec:\n return\n origin = module_spec.origin\n if origin is None:\n return\n global SKIP_DIRS_RE\n SKIP_DIRS.append(_strip_init_py(origin))\n _recompile_re()\n\n\ndef check(filename, allow_torch=False):\n \"\"\"Should skip this file?\"\"\"\n if filename is None:\n return True\n if filename in FILENAME_ALLOWLIST:\n return False\n if allow_torch and is_torch(filename):\n return False\n return bool(SKIP_DIRS_RE.match(filename))\n\n\n# skip common third party libs\nfor _name in (\n \"functorch\",\n \"intel_extension_for_pytorch\",\n \"networkx\",\n \"numpy\",\n \"omegaconf\",\n \"onnx\",\n \"onnxruntime\",\n \"onnx_tf\",\n \"pandas\",\n \"sklearn\",\n \"tabulate\",\n \"tensorflow\",\n \"tensorrt\",\n \"torch2trt\",\n \"tqdm\",\n \"tree\",\n \"tvm\",\n \"fx2trt_oss\",\n):\n add(_name)\n\n_recompile_re()\n\n\ndef is_torch_inline_allowed(filename):\n return filename.startswith(_module_dir(torch.nn)) or filename.startswith(\n _module_dir(torch.distributions)\n )\n\n\ndef is_torch(filename):\n return filename.startswith(_module_dir(torch))\n", "path": "torchdynamo/skipfiles.py"}]} | 1,531 | 485 |
gh_patches_debug_31 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1456 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AWS::AutoScaling::AutoScalingGroup MaxInstanceLifetime Validation
*cfn-lint version: 0.29.2*
*Description of issue.*
When using the parameter `MaxInstanceLifetime` for `AWS::AutoScaling::AutoScalingGroup` we are hit with the following lint error:
```
$ cfn-lint templates/proj/rgs/rgs_autoscale_stretch_elb.yml
E3002 Invalid Property Resources/autoscalegroup/Properties/MaxInstanceLifetime
templates/proj/rgs/rgs_autoscale_stretch_elb.yml:194:7
```
The template which leads to the error:
```
[...]
autoscalegroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
AvailabilityZones: !Ref AvailabilityZones
Cooldown: '300'
HealthCheckGracePeriod: !Ref GracePeriod
HealthCheckType: ELB
MaxSize: !Ref MaxSize
MinSize: !Ref MinSize
MaxInstanceLifetime: !Ref MaxInstanceLifetime
VPCZoneIdentifier: !Ref EC2SubnetIDs
TargetGroupARNs:
- !Ref elbtargetgroup
LaunchConfigurationName: !Ref launchconfiguration
Tags: [...]
PropagateAtLaunch: true
TerminationPolicies:
- Default
[..]
```
It seems the parameter is currently not supported by cfn-lint, would be cool to see support for it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/version.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 __version__ = '0.29.3'
7
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py
--- a/src/cfnlint/version.py
+++ b/src/cfnlint/version.py
@@ -3,4 +3,4 @@
SPDX-License-Identifier: MIT-0
"""
-__version__ = '0.29.3'
+__version__ = '0.29.4'
| {"golden_diff": "diff --git a/src/cfnlint/version.py b/src/cfnlint/version.py\n--- a/src/cfnlint/version.py\n+++ b/src/cfnlint/version.py\n@@ -3,4 +3,4 @@\n SPDX-License-Identifier: MIT-0\n \"\"\"\n \n-__version__ = '0.29.3'\n+__version__ = '0.29.4'\n", "issue": "AWS::AutoScaling::AutoScalingGroup MaxInstanceLifetime Validation\n*cfn-lint version: 0.29.2*\r\n\r\n*Description of issue.*\r\n\r\nWhen using the parameter `MaxInstanceLifetime` for `AWS::AutoScaling::AutoScalingGroup` we are hit with the following lint error:\r\n\r\n```\r\n$ cfn-lint templates/proj/rgs/rgs_autoscale_stretch_elb.yml\r\nE3002 Invalid Property Resources/autoscalegroup/Properties/MaxInstanceLifetime\r\ntemplates/proj/rgs/rgs_autoscale_stretch_elb.yml:194:7\r\n```\r\n\r\nThe template which leads to the error:\r\n\r\n```\r\n[...]\r\n\r\n autoscalegroup:\r\n Type: AWS::AutoScaling::AutoScalingGroup\r\n Properties:\r\n AvailabilityZones: !Ref AvailabilityZones\r\n Cooldown: '300'\r\n HealthCheckGracePeriod: !Ref GracePeriod\r\n HealthCheckType: ELB\r\n MaxSize: !Ref MaxSize\r\n MinSize: !Ref MinSize\r\n MaxInstanceLifetime: !Ref MaxInstanceLifetime\r\n VPCZoneIdentifier: !Ref EC2SubnetIDs\r\n TargetGroupARNs:\r\n - !Ref elbtargetgroup\r\n LaunchConfigurationName: !Ref launchconfiguration\r\n Tags: [...]\r\n PropagateAtLaunch: true\r\n TerminationPolicies:\r\n - Default\r\n\r\n[..]\r\n```\r\n\r\nIt seems the parameter is currently not supported by cfn-lint, would be cool to see support for it.\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.29.3'\n", "path": "src/cfnlint/version.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\n__version__ = '0.29.4'\n", "path": "src/cfnlint/version.py"}]} | 634 | 82 |
gh_patches_debug_55 | rasdani/github-patches | git_diff | emissary-ingress__emissary-23 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users need statsd support
Ambassador needs to be able to send stats off to statsd, whatever statsd the user wants to use.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ambassador/VERSION.py`
Content:
```
1 # Don't change this line without also changing .bumpversion.cfg
2 Version = "0.5.0"
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ambassador/VERSION.py b/ambassador/VERSION.py
--- a/ambassador/VERSION.py
+++ b/ambassador/VERSION.py
@@ -1,2 +1,2 @@
# Don't change this line without also changing .bumpversion.cfg
-Version = "0.5.0"
+Version = "0.5.1"
| {"golden_diff": "diff --git a/ambassador/VERSION.py b/ambassador/VERSION.py\n--- a/ambassador/VERSION.py\n+++ b/ambassador/VERSION.py\n@@ -1,2 +1,2 @@\n # Don't change this line without also changing .bumpversion.cfg\n-Version = \"0.5.0\"\n+Version = \"0.5.1\"\n", "issue": "Users need statsd support\nAmbassador needs to be able to send stats off to statsd, whatever statsd the user wants to use.\n", "before_files": [{"content": "# Don't change this line without also changing .bumpversion.cfg\nVersion = \"0.5.0\"\n", "path": "ambassador/VERSION.py"}], "after_files": [{"content": "# Don't change this line without also changing .bumpversion.cfg\nVersion = \"0.5.1\"\n", "path": "ambassador/VERSION.py"}]} | 315 | 80 |
gh_patches_debug_38263 | rasdani/github-patches | git_diff | microsoft__MLOS-573 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't expose all params as shell environment variables by default
_Originally posted by @bpkroth in https://github.com/microsoft/MLOS/pull/557#discussion_r1374921396_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlos_bench/mlos_bench/environments/script_env.py`
Content:
```
1 #
2 # Copyright (c) Microsoft Corporation.
3 # Licensed under the MIT License.
4 #
5 """
6 Base scriptable benchmark environment.
7 """
8
9 import abc
10 import logging
11 import re
12 from typing import Dict, Iterable, Optional
13
14 from mlos_bench.environments.base_environment import Environment
15 from mlos_bench.services.base_service import Service
16 from mlos_bench.tunables.tunable import TunableValue
17 from mlos_bench.tunables.tunable_groups import TunableGroups
18
19 from mlos_bench.util import try_parse_val
20
21 _LOG = logging.getLogger(__name__)
22
23
24 class ScriptEnv(Environment, metaclass=abc.ABCMeta):
25 """
26 Base Environment that runs scripts for setup/run/teardown.
27 """
28
29 _RE_INVALID = re.compile(r"[^a-zA-Z0-9_]")
30
31 def __init__(self,
32 *,
33 name: str,
34 config: dict,
35 global_config: Optional[dict] = None,
36 tunables: Optional[TunableGroups] = None,
37 service: Optional[Service] = None):
38 """
39 Create a new environment for script execution.
40
41 Parameters
42 ----------
43 name: str
44 Human-readable name of the environment.
45 config : dict
46 Free-format dictionary that contains the benchmark environment
47 configuration. Each config must have at least the `tunable_params`
48 and the `const_args` sections. It must also have at least one of
49 the following parameters: {`setup`, `run`, `teardown`}.
50 Additional parameters:
51 * `shell_env_params` - an array of parameters to pass to the script
52 as shell environment variables, and
53 * `shell_env_params_rename` - a dictionary of {to: from} mappings
54 of the script parameters. If not specified, replace all
55 non-alphanumeric characters with underscores.
56 If neither `shell_env_params` nor `shell_env_params_rename` are specified,
57 pass *all* parameters to the script.
58 global_config : dict
59 Free-format dictionary of global parameters (e.g., security credentials)
60 to be mixed in into the "const_args" section of the local config.
61 tunables : TunableGroups
62 A collection of tunable parameters for *all* environments.
63 service: Service
64 An optional service object (e.g., providing methods to
65 deploy or reboot a VM, etc.).
66 """
67 super().__init__(name=name, config=config, global_config=global_config,
68 tunables=tunables, service=service)
69
70 self._script_setup = self.config.get("setup")
71 self._script_run = self.config.get("run")
72 self._script_teardown = self.config.get("teardown")
73
74 self._shell_env_params: Optional[Iterable[str]] = self.config.get("shell_env_params")
75 self._shell_env_params_rename: Dict[str, str] = self.config.get("shell_env_params_rename", {})
76
77 results_stdout_pattern = self.config.get("results_stdout_pattern")
78 self._results_stdout_pattern: Optional[re.Pattern[str]] = \
79 re.compile(results_stdout_pattern) if results_stdout_pattern else None
80
81 def _get_env_params(self) -> Dict[str, str]:
82 """
83 Get the *shell* environment parameters to be passed to the script.
84
85 Returns
86 -------
87 env_params : Dict[str, str]
88 Parameters to pass as *shell* environment variables into the script.
89 This is usually a subset of `_params` with some possible conversions.
90 """
91 rename: Dict[str, str] # {to: from} mapping of the script parameters.
92 if self._shell_env_params is None:
93 if self._shell_env_params_rename:
94 # Only rename specified - use it.
95 rename = self._shell_env_params_rename.copy()
96 else:
97 # FIXME: We should not be exposing all params by default.
98 # Neither `shell_env_params` nor rename are specified - use all params.
99 rename = {self._RE_INVALID.sub("_", key): key for key in self._params}
100 else:
101 # Use `shell_env_params` and rename if specified.
102 rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
103 rename.update(self._shell_env_params_rename)
104
105 return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
106
107 def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:
108 """
109 Extract the results from the stdout of the script.
110
111 Parameters
112 ----------
113 stdout : str
114 The stdout of the script.
115
116 Returns
117 -------
118 results : Dict[str, TunableValue]
119 A dictionary of results extracted from the stdout.
120 """
121 if not self._results_stdout_pattern:
122 return {}
123 _LOG.debug("Extract regex: '%s' from: '%s'", self._results_stdout_pattern, stdout)
124 return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlos_bench/mlos_bench/environments/script_env.py b/mlos_bench/mlos_bench/environments/script_env.py
--- a/mlos_bench/mlos_bench/environments/script_env.py
+++ b/mlos_bench/mlos_bench/environments/script_env.py
@@ -54,7 +54,7 @@
of the script parameters. If not specified, replace all
non-alphanumeric characters with underscores.
If neither `shell_env_params` nor `shell_env_params_rename` are specified,
- pass *all* parameters to the script.
+ *no* additional shell parameters will be passed to the script.
global_config : dict
Free-format dictionary of global parameters (e.g., security credentials)
to be mixed in into the "const_args" section of the local config.
@@ -71,7 +71,7 @@
self._script_run = self.config.get("run")
self._script_teardown = self.config.get("teardown")
- self._shell_env_params: Optional[Iterable[str]] = self.config.get("shell_env_params")
+ self._shell_env_params: Iterable[str] = self.config.get("shell_env_params", [])
self._shell_env_params_rename: Dict[str, str] = self.config.get("shell_env_params_rename", {})
results_stdout_pattern = self.config.get("results_stdout_pattern")
@@ -88,20 +88,8 @@
Parameters to pass as *shell* environment variables into the script.
This is usually a subset of `_params` with some possible conversions.
"""
- rename: Dict[str, str] # {to: from} mapping of the script parameters.
- if self._shell_env_params is None:
- if self._shell_env_params_rename:
- # Only rename specified - use it.
- rename = self._shell_env_params_rename.copy()
- else:
- # FIXME: We should not be exposing all params by default.
- # Neither `shell_env_params` nor rename are specified - use all params.
- rename = {self._RE_INVALID.sub("_", key): key for key in self._params}
- else:
- # Use `shell_env_params` and rename if specified.
- rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
- rename.update(self._shell_env_params_rename)
-
+ rename = {self._RE_INVALID.sub("_", key): key for key in self._shell_env_params}
+ rename.update(self._shell_env_params_rename)
return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}
def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:
| {"golden_diff": "diff --git a/mlos_bench/mlos_bench/environments/script_env.py b/mlos_bench/mlos_bench/environments/script_env.py\n--- a/mlos_bench/mlos_bench/environments/script_env.py\n+++ b/mlos_bench/mlos_bench/environments/script_env.py\n@@ -54,7 +54,7 @@\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n- pass *all* parameters to the script.\n+ *no* additional shell parameters will be passed to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n@@ -71,7 +71,7 @@\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n \n- self._shell_env_params: Optional[Iterable[str]] = self.config.get(\"shell_env_params\")\n+ self._shell_env_params: Iterable[str] = self.config.get(\"shell_env_params\", [])\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n \n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n@@ -88,20 +88,8 @@\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n- rename: Dict[str, str] # {to: from} mapping of the script parameters.\n- if self._shell_env_params is None:\n- if self._shell_env_params_rename:\n- # Only rename specified - use it.\n- rename = self._shell_env_params_rename.copy()\n- else:\n- # FIXME: We should not be exposing all params by default.\n- # Neither `shell_env_params` nor rename are specified - use all params.\n- rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._params}\n- else:\n- # Use `shell_env_params` and rename if specified.\n- rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n- rename.update(self._shell_env_params_rename)\n-\n+ rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n+ rename.update(self._shell_env_params_rename)\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n \n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n", "issue": "Don't expose all params as shell environment variables by default\n_Originally posted by @bpkroth in https://github.com/microsoft/MLOS/pull/557#discussion_r1374921396_\r\n \n", "before_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nBase scriptable benchmark environment.\n\"\"\"\n\nimport abc\nimport logging\nimport re\nfrom typing import Dict, Iterable, Optional\n\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\nfrom mlos_bench.util import try_parse_val\n\n_LOG = logging.getLogger(__name__)\n\n\nclass ScriptEnv(Environment, metaclass=abc.ABCMeta):\n \"\"\"\n Base Environment that runs scripts for setup/run/teardown.\n \"\"\"\n\n _RE_INVALID = re.compile(r\"[^a-zA-Z0-9_]\")\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for script execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the `tunable_params`\n and the `const_args` sections. It must also have at least one of\n the following parameters: {`setup`, `run`, `teardown`}.\n Additional parameters:\n * `shell_env_params` - an array of parameters to pass to the script\n as shell environment variables, and\n * `shell_env_params_rename` - a dictionary of {to: from} mappings\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n pass *all* parameters to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n self._script_setup = self.config.get(\"setup\")\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n\n self._shell_env_params: Optional[Iterable[str]] = self.config.get(\"shell_env_params\")\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n\n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n self._results_stdout_pattern: Optional[re.Pattern[str]] = \\\n re.compile(results_stdout_pattern) if results_stdout_pattern else None\n\n def _get_env_params(self) -> Dict[str, str]:\n \"\"\"\n Get the *shell* environment parameters to be passed to the script.\n\n Returns\n -------\n env_params : Dict[str, str]\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n rename: Dict[str, str] # {to: from} mapping of the script parameters.\n if self._shell_env_params is None:\n if self._shell_env_params_rename:\n # Only rename specified - use it.\n rename = self._shell_env_params_rename.copy()\n else:\n # FIXME: We should not be exposing all params by default.\n # Neither `shell_env_params` nor rename are specified - use all params.\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._params}\n else:\n # Use `shell_env_params` and rename if specified.\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n rename.update(self._shell_env_params_rename)\n\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n\n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n \"\"\"\n Extract the results from the stdout of the script.\n\n Parameters\n ----------\n stdout : str\n The stdout of the script.\n\n Returns\n -------\n results : Dict[str, TunableValue]\n A dictionary of results extracted from the stdout.\n \"\"\"\n if not self._results_stdout_pattern:\n return {}\n _LOG.debug(\"Extract regex: '%s' from: '%s'\", self._results_stdout_pattern, stdout)\n return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}\n", "path": "mlos_bench/mlos_bench/environments/script_env.py"}], "after_files": [{"content": "#\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n#\n\"\"\"\nBase scriptable benchmark environment.\n\"\"\"\n\nimport abc\nimport logging\nimport re\nfrom typing import Dict, Iterable, Optional\n\nfrom mlos_bench.environments.base_environment import Environment\nfrom mlos_bench.services.base_service import Service\nfrom mlos_bench.tunables.tunable import TunableValue\nfrom mlos_bench.tunables.tunable_groups import TunableGroups\n\nfrom mlos_bench.util import try_parse_val\n\n_LOG = logging.getLogger(__name__)\n\n\nclass ScriptEnv(Environment, metaclass=abc.ABCMeta):\n \"\"\"\n Base Environment that runs scripts for setup/run/teardown.\n \"\"\"\n\n _RE_INVALID = re.compile(r\"[^a-zA-Z0-9_]\")\n\n def __init__(self,\n *,\n name: str,\n config: dict,\n global_config: Optional[dict] = None,\n tunables: Optional[TunableGroups] = None,\n service: Optional[Service] = None):\n \"\"\"\n Create a new environment for script execution.\n\n Parameters\n ----------\n name: str\n Human-readable name of the environment.\n config : dict\n Free-format dictionary that contains the benchmark environment\n configuration. Each config must have at least the `tunable_params`\n and the `const_args` sections. It must also have at least one of\n the following parameters: {`setup`, `run`, `teardown`}.\n Additional parameters:\n * `shell_env_params` - an array of parameters to pass to the script\n as shell environment variables, and\n * `shell_env_params_rename` - a dictionary of {to: from} mappings\n of the script parameters. If not specified, replace all\n non-alphanumeric characters with underscores.\n If neither `shell_env_params` nor `shell_env_params_rename` are specified,\n *no* additional shell parameters will be passed to the script.\n global_config : dict\n Free-format dictionary of global parameters (e.g., security credentials)\n to be mixed in into the \"const_args\" section of the local config.\n tunables : TunableGroups\n A collection of tunable parameters for *all* environments.\n service: Service\n An optional service object (e.g., providing methods to\n deploy or reboot a VM, etc.).\n \"\"\"\n super().__init__(name=name, config=config, global_config=global_config,\n tunables=tunables, service=service)\n\n self._script_setup = self.config.get(\"setup\")\n self._script_run = self.config.get(\"run\")\n self._script_teardown = self.config.get(\"teardown\")\n\n self._shell_env_params: Iterable[str] = self.config.get(\"shell_env_params\", [])\n self._shell_env_params_rename: Dict[str, str] = self.config.get(\"shell_env_params_rename\", {})\n\n results_stdout_pattern = self.config.get(\"results_stdout_pattern\")\n self._results_stdout_pattern: Optional[re.Pattern[str]] = \\\n re.compile(results_stdout_pattern) if results_stdout_pattern else None\n\n def _get_env_params(self) -> Dict[str, str]:\n \"\"\"\n Get the *shell* environment parameters to be passed to the script.\n\n Returns\n -------\n env_params : Dict[str, str]\n Parameters to pass as *shell* environment variables into the script.\n This is usually a subset of `_params` with some possible conversions.\n \"\"\"\n rename = {self._RE_INVALID.sub(\"_\", key): key for key in self._shell_env_params}\n rename.update(self._shell_env_params_rename)\n return {key_sub: str(self._params[key]) for (key_sub, key) in rename.items()}\n\n def _extract_stdout_results(self, stdout: str) -> Dict[str, TunableValue]:\n \"\"\"\n Extract the results from the stdout of the script.\n\n Parameters\n ----------\n stdout : str\n The stdout of the script.\n\n Returns\n -------\n results : Dict[str, TunableValue]\n A dictionary of results extracted from the stdout.\n \"\"\"\n if not self._results_stdout_pattern:\n return {}\n _LOG.debug(\"Extract regex: '%s' from: '%s'\", self._results_stdout_pattern, stdout)\n return {key: try_parse_val(val) for (key, val) in self._results_stdout_pattern.findall(stdout)}\n", "path": "mlos_bench/mlos_bench/environments/script_env.py"}]} | 1,653 | 599 |
gh_patches_debug_7557 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModuleNotFoundError: No module named 'plasmapy.classes' on plasmapy import
On importing freshly installed plasmapy into a new environment:
(plasmapy) [~]$ python
Python 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import plasmapy
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/dominik/.anaconda3/envs/plasmapy/lib/python3.6/site-packages/plasmapy/__init__.py", line 8, in <module>
from .classes import Plasma
ModuleNotFoundError: No module named 'plasmapy.classes'
The goal of this one is being able to import plasmapy. At all.
The issue likely lies in `plasmapy/__init__.py`.
To quote @cadair 's words of encouragement on this bugfixing journey, *packaging is a special kind of hell*.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3
4 # Package metadata
5 metadata = {}
6 with open('plasmapy/_metadata.py', 'r') as metadata_file:
7 exec(metadata_file.read(), metadata)
8
9 # Requirements
10 with open('requirements/base.txt', 'r') as req_file:
11 requirements = req_file.read().splitlines()
12
13 setup(name=metadata['name'],
14 version=metadata['version'],
15 description="Python package for plasma physics",
16 requires=requirements,
17 install_requires=requirements,
18 provides=[metadata['name']],
19 author=metadata['author'],
20 author_email="[email protected]", # until we get an email address
21 license="BSD",
22 url="https://github.com/PlasmaPy/PlasmaPy", # until we make a webpage
23 long_description=metadata['description'],
24 keywords=['plasma', 'plasma physics', 'science'],
25 classifiers=[
26 'Intended Audience :: Science/Research',
27 'License :: OSI Approved :: BSD License',
28 'Operating System :: OS Independent',
29 'Programming Language :: Python :: 3 :: Only',
30 'Programming Language :: Python :: 3.6',
31 'Topic :: Scientific/Engineering :: Physics',
32 'Topic :: Scientific/Engineering :: Astronomy',
33 'Development Status :: 2 - Pre-Alpha',
34 ],
35 packages=["plasmapy"],
36 zip_safe=False,
37 use_2to3=False,
38 python_requires='>=3.6',
39 )
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,4 @@
-from setuptools import setup
+from setuptools import setup, find_packages
# Package metadata
@@ -32,7 +32,7 @@
'Topic :: Scientific/Engineering :: Astronomy',
'Development Status :: 2 - Pre-Alpha',
],
- packages=["plasmapy"],
+ packages=find_packages(),
zip_safe=False,
use_2to3=False,
python_requires='>=3.6',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,4 @@\n-from setuptools import setup\n+from setuptools import setup, find_packages\n \n \n # Package metadata\n@@ -32,7 +32,7 @@\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Development Status :: 2 - Pre-Alpha',\n ],\n- packages=[\"plasmapy\"],\n+ packages=find_packages(),\n zip_safe=False,\n use_2to3=False,\n python_requires='>=3.6',\n", "issue": "ModuleNotFoundError: No module named 'plasmapy.classes' on plasmapy import\nOn importing freshly installed plasmapy into a new environment:\r\n\r\n (plasmapy) [~]$ python\r\n Python 3.6.2 |Continuum Analytics, Inc.| (default, Jul 20 2017, 13:51:32) \r\n [GCC 4.4.7 20120313 (Red Hat 4.4.7-1)] on linux\r\n Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n >>> import plasmapy\r\n Traceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/dominik/.anaconda3/envs/plasmapy/lib/python3.6/site-packages/plasmapy/__init__.py\", line 8, in <module>\r\n from .classes import Plasma\r\n ModuleNotFoundError: No module named 'plasmapy.classes'\r\n\r\nThe goal of this one is being able to import plasmapy. At all.\r\n\r\nThe issue likely lies in `plasmapy/__init__.py`. \r\n\r\nTo quote @cadair 's words of encouragement on this bugfixing journey, *packaging is a special kind of hell*. \n", "before_files": [{"content": "from setuptools import setup\n\n\n# Package metadata\nmetadata = {}\nwith open('plasmapy/_metadata.py', 'r') as metadata_file:\n exec(metadata_file.read(), metadata)\n\n# Requirements\nwith open('requirements/base.txt', 'r') as req_file:\n requirements = req_file.read().splitlines()\n\nsetup(name=metadata['name'],\n version=metadata['version'],\n description=\"Python package for plasma physics\",\n requires=requirements,\n install_requires=requirements,\n provides=[metadata['name']],\n author=metadata['author'],\n author_email=\"[email protected]\", # until we get an email address\n license=\"BSD\",\n url=\"https://github.com/PlasmaPy/PlasmaPy\", # until we make a webpage\n long_description=metadata['description'],\n keywords=['plasma', 'plasma physics', 'science'],\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Development Status :: 2 - Pre-Alpha',\n ],\n packages=[\"plasmapy\"],\n zip_safe=False,\n use_2to3=False,\n python_requires='>=3.6',\n )\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\n\n# Package metadata\nmetadata = {}\nwith open('plasmapy/_metadata.py', 'r') as metadata_file:\n exec(metadata_file.read(), metadata)\n\n# Requirements\nwith open('requirements/base.txt', 'r') as req_file:\n requirements = req_file.read().splitlines()\n\nsetup(name=metadata['name'],\n version=metadata['version'],\n description=\"Python package for plasma physics\",\n requires=requirements,\n install_requires=requirements,\n provides=[metadata['name']],\n author=metadata['author'],\n author_email=\"[email protected]\", # until we get an email address\n license=\"BSD\",\n url=\"https://github.com/PlasmaPy/PlasmaPy\", # until we make a webpage\n long_description=metadata['description'],\n keywords=['plasma', 'plasma physics', 'science'],\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Development Status :: 2 - Pre-Alpha',\n ],\n packages=find_packages(),\n zip_safe=False,\n use_2to3=False,\n python_requires='>=3.6',\n )\n", "path": "setup.py"}]} | 928 | 122 |
gh_patches_debug_14227 | rasdani/github-patches | git_diff | castorini__pyserini-1626 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error for SPLADE on-the-fly encoding with pytorch
command used:
```bash
python -m pyserini.search.lucene --threads 12 --batch-size 128 \
--index msmarco-v1-passage-splade-pp-ed \
--topics msmarco-passage-dev-subset \
--encoder naver/splade-cocondenser-ensembledistil \
--output run.msmarco-v1-passage.splade-pp-ed-pytorch.dev.txt \
--hits 1000 --impact
```
error message:
> ...
> File "/home/arthur/workplace/pyserini/pyserini/encode/_splade.py", line 28, in encode
> raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
> NameError: name 'batch_token_ids' is not defined
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyserini/encode/_splade.py`
Content:
```
1 import torch
2 from transformers import AutoModelForMaskedLM, AutoTokenizer
3 import numpy as np
4
5 from pyserini.encode import QueryEncoder
6
7
8 class SpladeQueryEncoder(QueryEncoder):
9 def __init__(self, model_name_or_path, tokenizer_name=None, device='cpu'):
10 self.device = device
11 self.model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)
12 self.model.to(self.device)
13 self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name or model_name_or_path)
14 self.reverse_voc = {v: k for k, v in self.tokenizer.vocab.items()}
15 self.weight_range = 5
16 self.quant_range = 256
17
18 def encode(self, text, max_length=256, **kwargs):
19 inputs = self.tokenizer([text], max_length=max_length, padding='longest',
20 truncation=True, add_special_tokens=True,
21 return_tensors='pt').to(self.device)
22 input_ids = inputs['input_ids']
23 input_attention = inputs['attention_mask']
24 batch_logits = self.model(input_ids)['logits']
25 batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))
26 * input_attention.unsqueeze(-1), dim=1)
27 batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()
28 raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
29 return self._get_encoded_query_token_wight_dicts(raw_weights)[0]
30
31 def _output_to_weight_dicts(self, batch_aggregated_logits):
32 to_return = []
33 for aggregated_logits in batch_aggregated_logits:
34 col = np.nonzero(aggregated_logits)[0]
35 weights = aggregated_logits[col]
36 d = {self.reverse_voc[k]: float(v) for k, v in zip(list(col), list(weights))}
37 to_return.append(d)
38 return to_return
39
40 def _get_encoded_query_token_wight_dicts(self, tok_weights):
41 to_return = []
42 for _tok_weight in tok_weights:
43 _weights = {}
44 for token, weight in _tok_weight.items():
45 weight_quanted = round(weight / self.weight_range * self.quant_range)
46 _weights[token] = weight_quanted
47 to_return.append(_weights)
48 return to_return
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyserini/encode/_splade.py b/pyserini/encode/_splade.py
--- a/pyserini/encode/_splade.py
+++ b/pyserini/encode/_splade.py
@@ -25,7 +25,7 @@
batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))
* input_attention.unsqueeze(-1), dim=1)
batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()
- raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
+ raw_weights = self._output_to_weight_dicts(batch_aggregated_logits)
return self._get_encoded_query_token_wight_dicts(raw_weights)[0]
def _output_to_weight_dicts(self, batch_aggregated_logits):
| {"golden_diff": "diff --git a/pyserini/encode/_splade.py b/pyserini/encode/_splade.py\n--- a/pyserini/encode/_splade.py\n+++ b/pyserini/encode/_splade.py\n@@ -25,7 +25,7 @@\n batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))\n * input_attention.unsqueeze(-1), dim=1)\n batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()\n- raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)\n+ raw_weights = self._output_to_weight_dicts(batch_aggregated_logits)\n return self._get_encoded_query_token_wight_dicts(raw_weights)[0]\n \n def _output_to_weight_dicts(self, batch_aggregated_logits):\n", "issue": "Error for SPLADE on-the-fly encoding with pytorch \ncommand used:\r\n```bash\r\npython -m pyserini.search.lucene --threads 12 --batch-size 128 \\\r\n --index msmarco-v1-passage-splade-pp-ed \\\r\n --topics msmarco-passage-dev-subset \\\r\n --encoder naver/splade-cocondenser-ensembledistil \\\r\n --output run.msmarco-v1-passage.splade-pp-ed-pytorch.dev.txt \\\r\n --hits 1000 --impact\r\n```\r\n\r\nerror message:\r\n> ...\r\n> File \"/home/arthur/workplace/pyserini/pyserini/encode/_splade.py\", line 28, in encode\r\n> raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)\r\n> NameError: name 'batch_token_ids' is not defined\r\n\n", "before_files": [{"content": "import torch\nfrom transformers import AutoModelForMaskedLM, AutoTokenizer\nimport numpy as np\n\nfrom pyserini.encode import QueryEncoder\n\n\nclass SpladeQueryEncoder(QueryEncoder):\n def __init__(self, model_name_or_path, tokenizer_name=None, device='cpu'):\n self.device = device\n self.model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n self.model.to(self.device)\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name or model_name_or_path)\n self.reverse_voc = {v: k for k, v in self.tokenizer.vocab.items()}\n self.weight_range = 5\n self.quant_range = 256\n\n def encode(self, text, max_length=256, **kwargs):\n inputs = self.tokenizer([text], max_length=max_length, padding='longest',\n truncation=True, add_special_tokens=True,\n return_tensors='pt').to(self.device)\n input_ids = inputs['input_ids']\n input_attention = inputs['attention_mask']\n batch_logits = self.model(input_ids)['logits']\n batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))\n * input_attention.unsqueeze(-1), dim=1)\n batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()\n raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)\n return self._get_encoded_query_token_wight_dicts(raw_weights)[0]\n\n def _output_to_weight_dicts(self, batch_aggregated_logits):\n to_return = []\n for aggregated_logits in batch_aggregated_logits:\n col = np.nonzero(aggregated_logits)[0]\n weights = aggregated_logits[col]\n d = {self.reverse_voc[k]: float(v) for k, v in zip(list(col), list(weights))}\n to_return.append(d)\n return to_return\n\n def _get_encoded_query_token_wight_dicts(self, tok_weights):\n to_return = []\n for _tok_weight in tok_weights:\n _weights = {}\n for token, weight in _tok_weight.items():\n weight_quanted = round(weight / self.weight_range * self.quant_range)\n _weights[token] = weight_quanted\n to_return.append(_weights)\n return to_return\n", "path": "pyserini/encode/_splade.py"}], "after_files": [{"content": "import torch\nfrom transformers import AutoModelForMaskedLM, AutoTokenizer\nimport numpy as np\n\nfrom pyserini.encode import QueryEncoder\n\n\nclass SpladeQueryEncoder(QueryEncoder):\n def __init__(self, model_name_or_path, tokenizer_name=None, device='cpu'):\n self.device = device\n self.model = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n self.model.to(self.device)\n self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name or model_name_or_path)\n self.reverse_voc = {v: k for k, v in self.tokenizer.vocab.items()}\n self.weight_range = 5\n self.quant_range = 256\n\n def encode(self, text, max_length=256, **kwargs):\n inputs = self.tokenizer([text], max_length=max_length, padding='longest',\n truncation=True, add_special_tokens=True,\n return_tensors='pt').to(self.device)\n input_ids = inputs['input_ids']\n input_attention = inputs['attention_mask']\n batch_logits = self.model(input_ids)['logits']\n batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))\n * input_attention.unsqueeze(-1), dim=1)\n batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()\n raw_weights = self._output_to_weight_dicts(batch_aggregated_logits)\n return self._get_encoded_query_token_wight_dicts(raw_weights)[0]\n\n def _output_to_weight_dicts(self, batch_aggregated_logits):\n to_return = []\n for aggregated_logits in batch_aggregated_logits:\n col = np.nonzero(aggregated_logits)[0]\n weights = aggregated_logits[col]\n d = {self.reverse_voc[k]: float(v) for k, v in zip(list(col), list(weights))}\n to_return.append(d)\n return to_return\n\n def _get_encoded_query_token_wight_dicts(self, tok_weights):\n to_return = []\n for _tok_weight in tok_weights:\n _weights = {}\n for token, weight in _tok_weight.items():\n weight_quanted = round(weight / self.weight_range * self.quant_range)\n _weights[token] = weight_quanted\n to_return.append(_weights)\n return to_return\n", "path": "pyserini/encode/_splade.py"}]} | 1,030 | 173 |
gh_patches_debug_9108 | rasdani/github-patches | git_diff | Kinto__kinto-726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The /permissions endpoint is broken
To reproduce just access https://kinto-ota.dev.mozaws.net/v1/permissions
```
File "/home/ubuntu/venvs/kinto/local/lib/python2.7/site-packages/kinto/core/permission/re
dis.py", line 103, in get_accessible_objects
_, object_id, permission = key.decode('utf-8').split(':')
ValueError: too many values to unpack
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/permission/redis.py`
Content:
```
1 from __future__ import absolute_import
2
3 from collections import defaultdict
4
5 from kinto.core.permission import PermissionBase
6 from kinto.core.storage.redis import create_from_config, wrap_redis_error
7
8
9 class Permission(PermissionBase):
10 """Permission backend implementation using Redis.
11
12 Enable in configuration::
13
14 kinto.permission_backend = kinto.core.permission.redis
15
16 *(Optional)* Instance location URI can be customized::
17
18 kinto.permission_url = redis://localhost:6379/2
19
20 A threaded connection pool is enabled by default::
21
22 kinto.permission_pool_size = 50
23
24 :noindex:
25 """
26
27 def __init__(self, client, *args, **kwargs):
28 super(Permission, self).__init__(*args, **kwargs)
29 self._client = client
30
31 @property
32 def settings(self):
33 return dict(self._client.connection_pool.connection_kwargs)
34
35 def initialize_schema(self):
36 # Nothing to do.
37 pass
38
39 def _decode_set(self, results):
40 return set([r.decode('utf-8') for r in results])
41
42 @wrap_redis_error
43 def flush(self):
44 self._client.flushdb()
45
46 @wrap_redis_error
47 def add_user_principal(self, user_id, principal):
48 user_key = 'user:%s' % user_id
49 self._client.sadd(user_key, principal)
50
51 @wrap_redis_error
52 def remove_user_principal(self, user_id, principal):
53 user_key = 'user:%s' % user_id
54 self._client.srem(user_key, principal)
55 if self._client.scard(user_key) == 0:
56 self._client.delete(user_key)
57
58 def remove_principal(self, principal):
59 with self._client.pipeline() as pipe:
60 user_keys = self._client.scan_iter(match='user:*')
61 for user_key in user_keys:
62 pipe.srem(user_key, principal)
63 pipe.execute()
64
65 @wrap_redis_error
66 def get_user_principals(self, user_id):
67 user_key = 'user:%s' % user_id
68 return self._decode_set(self._client.smembers(user_key))
69
70 @wrap_redis_error
71 def add_principal_to_ace(self, object_id, permission, principal):
72 permission_key = 'permission:%s:%s' % (object_id, permission)
73 self._client.sadd(permission_key, principal)
74
75 @wrap_redis_error
76 def remove_principal_from_ace(self, object_id, permission, principal):
77 permission_key = 'permission:%s:%s' % (object_id, permission)
78 self._client.srem(permission_key, principal)
79 if self._client.scard(permission_key) == 0:
80 self._client.delete(permission_key)
81
82 @wrap_redis_error
83 def get_object_permission_principals(self, object_id, permission):
84 permission_key = 'permission:%s:%s' % (object_id, permission)
85 members = self._client.smembers(permission_key)
86 return self._decode_set(members)
87
88 @wrap_redis_error
89 def get_accessible_objects(self, principals, bound_permissions=None):
90 principals = set(principals)
91
92 if bound_permissions:
93 keys = ['permission:%s:%s' % op for op in bound_permissions]
94 else:
95 keys = ['permission:*']
96
97 perms_by_id = dict()
98 for key_pattern in keys:
99 matched = self._client.scan_iter(match=key_pattern)
100 for key in matched:
101 authorized = self._decode_set(self._client.smembers(key))
102 if len(authorized & principals) > 0:
103 _, object_id, permission = key.decode('utf-8').split(':')
104 perms_by_id.setdefault(object_id, set()).add(permission)
105
106 return perms_by_id
107
108 @wrap_redis_error
109 def get_authorized_principals(self, bound_permissions):
110 keys = ['permission:%s:%s' % (o, p) for (o, p) in bound_permissions]
111 if keys:
112 return self._decode_set(self._client.sunion(*list(keys)))
113 return set()
114
115 @wrap_redis_error
116 def get_objects_permissions(self, objects_ids, permissions=None):
117 objects_perms = []
118 for object_id in objects_ids:
119 if permissions is not None:
120 keys = ['permission:%s:%s' % (object_id, permission)
121 for permission in permissions]
122 else:
123 keys = [key.decode('utf-8') for key in self._client.scan_iter(
124 match='permission:%s:*' % object_id)]
125
126 with self._client.pipeline() as pipe:
127 for permission_key in keys:
128 pipe.smembers(permission_key)
129
130 results = pipe.execute()
131
132 permissions = defaultdict(set)
133 for i, result in enumerate(results):
134 permission = keys[i].split(':', 2)[-1]
135 permissions[permission] = self._decode_set(result)
136 objects_perms.append(permissions)
137 return objects_perms
138
139 @wrap_redis_error
140 def replace_object_permissions(self, object_id, permissions):
141 keys = ['permission:%s:%s' % (object_id, permission)
142 for permission in permissions]
143 with self._client.pipeline() as pipe:
144 for key in keys:
145 pipe.delete(key)
146 permission = key.split(':', 2)[-1]
147 principals = permissions[permission]
148 if len(principals) > 0:
149 pipe.sadd(key, *principals)
150 pipe.execute()
151
152 @wrap_redis_error
153 def delete_object_permissions(self, *object_id_list):
154 with self._client.pipeline() as pipe:
155 for object_id in object_id_list:
156 keys = list(self._client.scan_iter(
157 match='permission:%s:*' % object_id))
158 if len(keys) > 0:
159 pipe.delete(*keys)
160 pipe.execute()
161
162
163 def load_from_config(config):
164 client = create_from_config(config, prefix='permission_')
165 return Permission(client)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/permission/redis.py b/kinto/core/permission/redis.py
--- a/kinto/core/permission/redis.py
+++ b/kinto/core/permission/redis.py
@@ -100,8 +100,8 @@
for key in matched:
authorized = self._decode_set(self._client.smembers(key))
if len(authorized & principals) > 0:
- _, object_id, permission = key.decode('utf-8').split(':')
- perms_by_id.setdefault(object_id, set()).add(permission)
+ _, obj_id, permission = key.decode('utf-8').split(':', 2)
+ perms_by_id.setdefault(obj_id, set()).add(permission)
return perms_by_id
| {"golden_diff": "diff --git a/kinto/core/permission/redis.py b/kinto/core/permission/redis.py\n--- a/kinto/core/permission/redis.py\n+++ b/kinto/core/permission/redis.py\n@@ -100,8 +100,8 @@\n for key in matched:\n authorized = self._decode_set(self._client.smembers(key))\n if len(authorized & principals) > 0:\n- _, object_id, permission = key.decode('utf-8').split(':')\n- perms_by_id.setdefault(object_id, set()).add(permission)\n+ _, obj_id, permission = key.decode('utf-8').split(':', 2)\n+ perms_by_id.setdefault(obj_id, set()).add(permission)\n \n return perms_by_id\n", "issue": "The /permissions endpoint is broken\nTo reproduce just access https://kinto-ota.dev.mozaws.net/v1/permissions\n\n```\n File \"/home/ubuntu/venvs/kinto/local/lib/python2.7/site-packages/kinto/core/permission/re\ndis.py\", line 103, in get_accessible_objects\n _, object_id, permission = key.decode('utf-8').split(':')\nValueError: too many values to unpack\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom collections import defaultdict\n\nfrom kinto.core.permission import PermissionBase\nfrom kinto.core.storage.redis import create_from_config, wrap_redis_error\n\n\nclass Permission(PermissionBase):\n \"\"\"Permission backend implementation using Redis.\n\n Enable in configuration::\n\n kinto.permission_backend = kinto.core.permission.redis\n\n *(Optional)* Instance location URI can be customized::\n\n kinto.permission_url = redis://localhost:6379/2\n\n A threaded connection pool is enabled by default::\n\n kinto.permission_pool_size = 50\n\n :noindex:\n \"\"\"\n\n def __init__(self, client, *args, **kwargs):\n super(Permission, self).__init__(*args, **kwargs)\n self._client = client\n\n @property\n def settings(self):\n return dict(self._client.connection_pool.connection_kwargs)\n\n def initialize_schema(self):\n # Nothing to do.\n pass\n\n def _decode_set(self, results):\n return set([r.decode('utf-8') for r in results])\n\n @wrap_redis_error\n def flush(self):\n self._client.flushdb()\n\n @wrap_redis_error\n def add_user_principal(self, user_id, principal):\n user_key = 'user:%s' % user_id\n self._client.sadd(user_key, principal)\n\n @wrap_redis_error\n def remove_user_principal(self, user_id, principal):\n user_key = 'user:%s' % user_id\n self._client.srem(user_key, principal)\n if self._client.scard(user_key) == 0:\n self._client.delete(user_key)\n\n def remove_principal(self, principal):\n with self._client.pipeline() as pipe:\n user_keys = self._client.scan_iter(match='user:*')\n for user_key in user_keys:\n pipe.srem(user_key, principal)\n pipe.execute()\n\n @wrap_redis_error\n def get_user_principals(self, user_id):\n user_key = 'user:%s' % user_id\n return self._decode_set(self._client.smembers(user_key))\n\n @wrap_redis_error\n def add_principal_to_ace(self, object_id, permission, principal):\n permission_key = 'permission:%s:%s' % (object_id, permission)\n self._client.sadd(permission_key, principal)\n\n @wrap_redis_error\n def remove_principal_from_ace(self, object_id, permission, principal):\n permission_key = 'permission:%s:%s' % (object_id, permission)\n self._client.srem(permission_key, principal)\n if self._client.scard(permission_key) == 0:\n self._client.delete(permission_key)\n\n @wrap_redis_error\n def get_object_permission_principals(self, object_id, permission):\n permission_key = 'permission:%s:%s' % (object_id, permission)\n members = self._client.smembers(permission_key)\n return self._decode_set(members)\n\n @wrap_redis_error\n def get_accessible_objects(self, principals, bound_permissions=None):\n principals = set(principals)\n\n if bound_permissions:\n keys = ['permission:%s:%s' % op for op in bound_permissions]\n else:\n keys = ['permission:*']\n\n perms_by_id = dict()\n for key_pattern in keys:\n matched = self._client.scan_iter(match=key_pattern)\n for key in matched:\n authorized = self._decode_set(self._client.smembers(key))\n if len(authorized & principals) > 0:\n _, object_id, permission = key.decode('utf-8').split(':')\n perms_by_id.setdefault(object_id, set()).add(permission)\n\n return perms_by_id\n\n @wrap_redis_error\n def get_authorized_principals(self, bound_permissions):\n keys = ['permission:%s:%s' % (o, p) for (o, p) in bound_permissions]\n if keys:\n return self._decode_set(self._client.sunion(*list(keys)))\n return set()\n\n @wrap_redis_error\n def get_objects_permissions(self, objects_ids, permissions=None):\n objects_perms = []\n for object_id in objects_ids:\n if permissions is not None:\n keys = ['permission:%s:%s' % (object_id, permission)\n for permission in permissions]\n else:\n keys = [key.decode('utf-8') for key in self._client.scan_iter(\n match='permission:%s:*' % object_id)]\n\n with self._client.pipeline() as pipe:\n for permission_key in keys:\n pipe.smembers(permission_key)\n\n results = pipe.execute()\n\n permissions = defaultdict(set)\n for i, result in enumerate(results):\n permission = keys[i].split(':', 2)[-1]\n permissions[permission] = self._decode_set(result)\n objects_perms.append(permissions)\n return objects_perms\n\n @wrap_redis_error\n def replace_object_permissions(self, object_id, permissions):\n keys = ['permission:%s:%s' % (object_id, permission)\n for permission in permissions]\n with self._client.pipeline() as pipe:\n for key in keys:\n pipe.delete(key)\n permission = key.split(':', 2)[-1]\n principals = permissions[permission]\n if len(principals) > 0:\n pipe.sadd(key, *principals)\n pipe.execute()\n\n @wrap_redis_error\n def delete_object_permissions(self, *object_id_list):\n with self._client.pipeline() as pipe:\n for object_id in object_id_list:\n keys = list(self._client.scan_iter(\n match='permission:%s:*' % object_id))\n if len(keys) > 0:\n pipe.delete(*keys)\n pipe.execute()\n\n\ndef load_from_config(config):\n client = create_from_config(config, prefix='permission_')\n return Permission(client)\n", "path": "kinto/core/permission/redis.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom collections import defaultdict\n\nfrom kinto.core.permission import PermissionBase\nfrom kinto.core.storage.redis import create_from_config, wrap_redis_error\n\n\nclass Permission(PermissionBase):\n \"\"\"Permission backend implementation using Redis.\n\n Enable in configuration::\n\n kinto.permission_backend = kinto.core.permission.redis\n\n *(Optional)* Instance location URI can be customized::\n\n kinto.permission_url = redis://localhost:6379/2\n\n A threaded connection pool is enabled by default::\n\n kinto.permission_pool_size = 50\n\n :noindex:\n \"\"\"\n\n def __init__(self, client, *args, **kwargs):\n super(Permission, self).__init__(*args, **kwargs)\n self._client = client\n\n @property\n def settings(self):\n return dict(self._client.connection_pool.connection_kwargs)\n\n def initialize_schema(self):\n # Nothing to do.\n pass\n\n def _decode_set(self, results):\n return set([r.decode('utf-8') for r in results])\n\n @wrap_redis_error\n def flush(self):\n self._client.flushdb()\n\n @wrap_redis_error\n def add_user_principal(self, user_id, principal):\n user_key = 'user:%s' % user_id\n self._client.sadd(user_key, principal)\n\n @wrap_redis_error\n def remove_user_principal(self, user_id, principal):\n user_key = 'user:%s' % user_id\n self._client.srem(user_key, principal)\n if self._client.scard(user_key) == 0:\n self._client.delete(user_key)\n\n def remove_principal(self, principal):\n with self._client.pipeline() as pipe:\n user_keys = self._client.scan_iter(match='user:*')\n for user_key in user_keys:\n pipe.srem(user_key, principal)\n pipe.execute()\n\n @wrap_redis_error\n def get_user_principals(self, user_id):\n user_key = 'user:%s' % user_id\n return self._decode_set(self._client.smembers(user_key))\n\n @wrap_redis_error\n def add_principal_to_ace(self, object_id, permission, principal):\n permission_key = 'permission:%s:%s' % (object_id, permission)\n self._client.sadd(permission_key, principal)\n\n @wrap_redis_error\n def remove_principal_from_ace(self, object_id, permission, principal):\n permission_key = 'permission:%s:%s' % (object_id, permission)\n self._client.srem(permission_key, principal)\n if self._client.scard(permission_key) == 0:\n self._client.delete(permission_key)\n\n @wrap_redis_error\n def get_object_permission_principals(self, object_id, permission):\n permission_key = 'permission:%s:%s' % (object_id, permission)\n members = self._client.smembers(permission_key)\n return self._decode_set(members)\n\n @wrap_redis_error\n def get_accessible_objects(self, principals, bound_permissions=None):\n principals = set(principals)\n\n if bound_permissions:\n keys = ['permission:%s:%s' % op for op in bound_permissions]\n else:\n keys = ['permission:*']\n\n perms_by_id = dict()\n for key_pattern in keys:\n matched = self._client.scan_iter(match=key_pattern)\n for key in matched:\n authorized = self._decode_set(self._client.smembers(key))\n if len(authorized & principals) > 0:\n _, obj_id, permission = key.decode('utf-8').split(':', 2)\n perms_by_id.setdefault(obj_id, set()).add(permission)\n\n return perms_by_id\n\n @wrap_redis_error\n def get_authorized_principals(self, bound_permissions):\n keys = ['permission:%s:%s' % (o, p) for (o, p) in bound_permissions]\n if keys:\n return self._decode_set(self._client.sunion(*list(keys)))\n return set()\n\n @wrap_redis_error\n def get_objects_permissions(self, objects_ids, permissions=None):\n objects_perms = []\n for object_id in objects_ids:\n if permissions is not None:\n keys = ['permission:%s:%s' % (object_id, permission)\n for permission in permissions]\n else:\n keys = [key.decode('utf-8') for key in self._client.scan_iter(\n match='permission:%s:*' % object_id)]\n\n with self._client.pipeline() as pipe:\n for permission_key in keys:\n pipe.smembers(permission_key)\n\n results = pipe.execute()\n\n permissions = defaultdict(set)\n for i, result in enumerate(results):\n permission = keys[i].split(':', 2)[-1]\n permissions[permission] = self._decode_set(result)\n objects_perms.append(permissions)\n return objects_perms\n\n @wrap_redis_error\n def replace_object_permissions(self, object_id, permissions):\n keys = ['permission:%s:%s' % (object_id, permission)\n for permission in permissions]\n with self._client.pipeline() as pipe:\n for key in keys:\n pipe.delete(key)\n permission = key.split(':', 2)[-1]\n principals = permissions[permission]\n if len(principals) > 0:\n pipe.sadd(key, *principals)\n pipe.execute()\n\n @wrap_redis_error\n def delete_object_permissions(self, *object_id_list):\n with self._client.pipeline() as pipe:\n for object_id in object_id_list:\n keys = list(self._client.scan_iter(\n match='permission:%s:*' % object_id))\n if len(keys) > 0:\n pipe.delete(*keys)\n pipe.execute()\n\n\ndef load_from_config(config):\n client = create_from_config(config, prefix='permission_')\n return Permission(client)\n", "path": "kinto/core/permission/redis.py"}]} | 2,018 | 163 |
gh_patches_debug_24023 | rasdani/github-patches | git_diff | apache__airflow-13371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AirflowMacroPluginRemovedRule fails on non-python files
**Apache Airflow version**: 1.10.14
**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):
**Environment**:
- **Cloud provider or hardware configuration**: X
- **OS** (e.g. from /etc/os-release): X
- **Kernel** (e.g. `uname -a`): X
- **Install tools**: X
- **Others**: X
**What happened**:
The `AirflowMacroPluginRemovedRule` seems unable to process non-standard python files (e.g. `.xlsx`) and chokes out with an unhelpful error message.:
```python
========================================================================================================================================================== STATUS ==========================================================================================================================================================
Check for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS
Traceback (most recent call last):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow", line 37, in <module>
args.func(args)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 88, in run
all_problems = check_upgrade(formatter, rules)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py", line 37, in check_upgrade
rule_status = RuleStatus.from_rule(rule)
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py", line 44, in from_rule
result = rule.check()
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 52, in check
problems.extend(self._check_file(file_path))
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py", line 42, in _check_file
for line_number, line in enumerate(file_pointer, 1):
File "/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte
```
**What you expected to happen**:
I expected the macro to skip over files it could not process/understand
**How to reproduce it**:
Add an `.xlsx` or other binary document to the DAGs folder and run the upgrade check.
**Suggested resolution**:
I think it's fine to fail out on these files (it led us to add certain items to the `.airflowignore` which should have been there anyway) but I had to modify the upgrade rule directly to tell me _which_ files were the problem. A more helpful error message here, and possibly a message prompting users to add said files to their `.airflowignore` would be ideal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `airflow/upgrade/rules/airflow_macro_plugin_removed.py`
Content:
```
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17
18 from __future__ import absolute_import
19
20 from airflow import conf
21 from airflow.upgrade.rules.base_rule import BaseRule
22 from airflow.utils.dag_processing import list_py_file_paths
23
24
25 class AirflowMacroPluginRemovedRule(BaseRule):
26
27 title = "Remove airflow.AirflowMacroPlugin class"
28
29 description = "The airflow.AirflowMacroPlugin class has been removed."
30
31 MACRO_PLUGIN_CLASS = "airflow.AirflowMacroPlugin"
32
33 def _change_info(self, file_path, line_number):
34 return "{} will be removed. Affected file: {} (line {})".format(
35 self.MACRO_PLUGIN_CLASS, file_path, line_number
36 )
37
38 def _check_file(self, file_path):
39 problems = []
40 class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
41 with open(file_path, "r") as file_pointer:
42 for line_number, line in enumerate(file_pointer, 1):
43 if class_name_to_check in line:
44 problems.append(self._change_info(file_path, line_number))
45 return problems
46
47 def check(self):
48 dag_folder = conf.get("core", "dags_folder")
49 file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
50 problems = []
51 for file_path in file_paths:
52 problems.extend(self._check_file(file_path))
53 return problems
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/airflow/upgrade/rules/airflow_macro_plugin_removed.py b/airflow/upgrade/rules/airflow_macro_plugin_removed.py
--- a/airflow/upgrade/rules/airflow_macro_plugin_removed.py
+++ b/airflow/upgrade/rules/airflow_macro_plugin_removed.py
@@ -39,9 +39,12 @@
problems = []
class_name_to_check = self.MACRO_PLUGIN_CLASS.split(".")[-1]
with open(file_path, "r") as file_pointer:
- for line_number, line in enumerate(file_pointer, 1):
- if class_name_to_check in line:
- problems.append(self._change_info(file_path, line_number))
+ try:
+ for line_number, line in enumerate(file_pointer, 1):
+ if class_name_to_check in line:
+ problems.append(self._change_info(file_path, line_number))
+ except UnicodeDecodeError:
+ problems.append("Unable to read python file {}".format(file_path))
return problems
def check(self):
@@ -49,5 +52,7 @@
file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)
problems = []
for file_path in file_paths:
+ if not file_path.endswith(".py"):
+ continue
problems.extend(self._check_file(file_path))
return problems
| {"golden_diff": "diff --git a/airflow/upgrade/rules/airflow_macro_plugin_removed.py b/airflow/upgrade/rules/airflow_macro_plugin_removed.py\n--- a/airflow/upgrade/rules/airflow_macro_plugin_removed.py\n+++ b/airflow/upgrade/rules/airflow_macro_plugin_removed.py\n@@ -39,9 +39,12 @@\n problems = []\n class_name_to_check = self.MACRO_PLUGIN_CLASS.split(\".\")[-1]\n with open(file_path, \"r\") as file_pointer:\n- for line_number, line in enumerate(file_pointer, 1):\n- if class_name_to_check in line:\n- problems.append(self._change_info(file_path, line_number))\n+ try:\n+ for line_number, line in enumerate(file_pointer, 1):\n+ if class_name_to_check in line:\n+ problems.append(self._change_info(file_path, line_number))\n+ except UnicodeDecodeError:\n+ problems.append(\"Unable to read python file {}\".format(file_path))\n return problems\n \n def check(self):\n@@ -49,5 +52,7 @@\n file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)\n problems = []\n for file_path in file_paths:\n+ if not file_path.endswith(\".py\"):\n+ continue\n problems.extend(self._check_file(file_path))\n return problems\n", "issue": "AirflowMacroPluginRemovedRule fails on non-python files\n**Apache Airflow version**: 1.10.14\r\n\r\n\r\n**Kubernetes version (if you are using kubernetes)** (use `kubectl version`):\r\n\r\n**Environment**:\r\n\r\n- **Cloud provider or hardware configuration**: X\r\n- **OS** (e.g. from /etc/os-release): X\r\n- **Kernel** (e.g. `uname -a`): X\r\n- **Install tools**: X\r\n- **Others**: X\r\n\r\n**What happened**:\r\n\r\nThe `AirflowMacroPluginRemovedRule` seems unable to process non-standard python files (e.g. `.xlsx`) and chokes out with an unhelpful error message.:\r\n\r\n```python\r\n========================================================================================================================================================== STATUS ==========================================================================================================================================================\r\n\r\nCheck for latest versions of apache-airflow and checker...........................................................................................................................................................................................................................................................SUCCESS\r\nTraceback (most recent call last):\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/bin/airflow\", line 37, in <module>\r\n args.func(args)\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py\", line 88, in run\r\n all_problems = check_upgrade(formatter, rules)\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/checker.py\", line 37, in check_upgrade\r\n rule_status = RuleStatus.from_rule(rule)\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/problem.py\", line 44, in from_rule\r\n result = rule.check()\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py\", line 52, in check\r\n problems.extend(self._check_file(file_path))\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/site-packages/airflow/upgrade/rules/airflow_macro_plugin_removed.py\", line 42, in _check_file\r\n for line_number, line in enumerate(file_pointer, 1):\r\n File \"/Users/madison/programs/anaconda3/envs/memphis-airflow/lib/python3.8/codecs.py\", line 322, in decode\r\n (result, consumed) = self._buffer_decode(data, self.errors, final)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0x82 in position 16: invalid start byte\r\n```\r\n\r\n**What you expected to happen**:\r\n\r\nI expected the macro to skip over files it could not process/understand\r\n\r\n**How to reproduce it**:\r\n\r\nAdd an `.xlsx` or other binary document to the DAGs folder and run the upgrade check.\r\n\r\n\r\n**Suggested resolution**:\r\n\r\nI think it's fine to fail out on these files (it led us to add certain items to the `.airflowignore` which should have been there anyway) but I had to modify the upgrade rule directly to tell me _which_ files were the problem. A more helpful error message here, and possibly a message prompting users to add said files to their `.airflowignore` would be ideal.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\n\nfrom airflow import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.dag_processing import list_py_file_paths\n\n\nclass AirflowMacroPluginRemovedRule(BaseRule):\n\n title = \"Remove airflow.AirflowMacroPlugin class\"\n\n description = \"The airflow.AirflowMacroPlugin class has been removed.\"\n\n MACRO_PLUGIN_CLASS = \"airflow.AirflowMacroPlugin\"\n\n def _change_info(self, file_path, line_number):\n return \"{} will be removed. Affected file: {} (line {})\".format(\n self.MACRO_PLUGIN_CLASS, file_path, line_number\n )\n\n def _check_file(self, file_path):\n problems = []\n class_name_to_check = self.MACRO_PLUGIN_CLASS.split(\".\")[-1]\n with open(file_path, \"r\") as file_pointer:\n for line_number, line in enumerate(file_pointer, 1):\n if class_name_to_check in line:\n problems.append(self._change_info(file_path, line_number))\n return problems\n\n def check(self):\n dag_folder = conf.get(\"core\", \"dags_folder\")\n file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)\n problems = []\n for file_path in file_paths:\n problems.extend(self._check_file(file_path))\n return problems\n", "path": "airflow/upgrade/rules/airflow_macro_plugin_removed.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\n\nfrom airflow import conf\nfrom airflow.upgrade.rules.base_rule import BaseRule\nfrom airflow.utils.dag_processing import list_py_file_paths\n\n\nclass AirflowMacroPluginRemovedRule(BaseRule):\n\n title = \"Remove airflow.AirflowMacroPlugin class\"\n\n description = \"The airflow.AirflowMacroPlugin class has been removed.\"\n\n MACRO_PLUGIN_CLASS = \"airflow.AirflowMacroPlugin\"\n\n def _change_info(self, file_path, line_number):\n return \"{} will be removed. Affected file: {} (line {})\".format(\n self.MACRO_PLUGIN_CLASS, file_path, line_number\n )\n\n def _check_file(self, file_path):\n problems = []\n class_name_to_check = self.MACRO_PLUGIN_CLASS.split(\".\")[-1]\n with open(file_path, \"r\") as file_pointer:\n try:\n for line_number, line in enumerate(file_pointer, 1):\n if class_name_to_check in line:\n problems.append(self._change_info(file_path, line_number))\n except UnicodeDecodeError:\n problems.append(\"Unable to read python file {}\".format(file_path))\n return problems\n\n def check(self):\n dag_folder = conf.get(\"core\", \"dags_folder\")\n file_paths = list_py_file_paths(directory=dag_folder, include_examples=False)\n problems = []\n for file_path in file_paths:\n if not file_path.endswith(\".py\"):\n continue\n problems.extend(self._check_file(file_path))\n return problems\n", "path": "airflow/upgrade/rules/airflow_macro_plugin_removed.py"}]} | 1,567 | 293 |
gh_patches_debug_9399 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2541 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bplan template dates saved but not shown in Dashboard
URL: https://mein.berlin.de/dashboard/projects/erweiterung-mauerpark-bebauungsplan-3-64-im-bezirk/bplan/
user: initiator
expected behaviour: date and time that I have entered are still shown after saving form
behaviour: dates are no longer shown after saving, no error message, I can still publish the project and date is shown correctly on project tile
device & browser: Desktop, mac, chrome Version 76.0.3809.132 (Offizieller Build) (64-Bit)
Importance: relevant bug, fix before next release
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/bplan/forms.py`
Content:
```
1 from django import forms
2
3 from meinberlin.apps.extprojects.forms import ExternalProjectCreateForm
4 from meinberlin.apps.extprojects.forms import ExternalProjectForm
5
6 from . import models
7
8
9 class StatementForm(forms.ModelForm):
10 class Meta:
11 model = models.Statement
12 fields = ['name', 'email', 'statement',
13 'street_number', 'postal_code_city']
14
15
16 class BplanProjectCreateForm(ExternalProjectCreateForm):
17
18 class Meta:
19 model = models.Bplan
20 fields = ['name', 'description', 'tile_image', 'tile_image_copyright']
21
22
23 class BplanProjectForm(ExternalProjectForm):
24
25 class Meta:
26 model = models.Bplan
27 fields = ['name', 'identifier', 'url', 'description', 'tile_image',
28 'tile_image_copyright', 'is_archived', 'office_worker_email']
29 required_for_project_publish = ['name', 'url', 'description',
30 'office_worker_email']
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py
--- a/meinberlin/apps/bplan/forms.py
+++ b/meinberlin/apps/bplan/forms.py
@@ -25,6 +25,7 @@
class Meta:
model = models.Bplan
fields = ['name', 'identifier', 'url', 'description', 'tile_image',
- 'tile_image_copyright', 'is_archived', 'office_worker_email']
+ 'tile_image_copyright', 'is_archived', 'office_worker_email',
+ 'start_date', 'end_date']
required_for_project_publish = ['name', 'url', 'description',
'office_worker_email']
| {"golden_diff": "diff --git a/meinberlin/apps/bplan/forms.py b/meinberlin/apps/bplan/forms.py\n--- a/meinberlin/apps/bplan/forms.py\n+++ b/meinberlin/apps/bplan/forms.py\n@@ -25,6 +25,7 @@\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n- 'tile_image_copyright', 'is_archived', 'office_worker_email']\n+ 'tile_image_copyright', 'is_archived', 'office_worker_email',\n+ 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n", "issue": "bplan template dates saved but not shown in Dashboard\nURL: https://mein.berlin.de/dashboard/projects/erweiterung-mauerpark-bebauungsplan-3-64-im-bezirk/bplan/\r\nuser: initiator\r\nexpected behaviour: date and time that I have entered are still shown after saving form\r\nbehaviour: dates are no longer shown after saving, no error message, I can still publish the project and date is shown correctly on project tile\r\ndevice & browser: Desktop, mac, chrome Version 76.0.3809.132 (Offizieller Build) (64-Bit)\r\nImportance: relevant bug, fix before next release\n", "before_files": [{"content": "from django import forms\n\nfrom meinberlin.apps.extprojects.forms import ExternalProjectCreateForm\nfrom meinberlin.apps.extprojects.forms import ExternalProjectForm\n\nfrom . import models\n\n\nclass StatementForm(forms.ModelForm):\n class Meta:\n model = models.Statement\n fields = ['name', 'email', 'statement',\n 'street_number', 'postal_code_city']\n\n\nclass BplanProjectCreateForm(ExternalProjectCreateForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'description', 'tile_image', 'tile_image_copyright']\n\n\nclass BplanProjectForm(ExternalProjectForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived', 'office_worker_email']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n", "path": "meinberlin/apps/bplan/forms.py"}], "after_files": [{"content": "from django import forms\n\nfrom meinberlin.apps.extprojects.forms import ExternalProjectCreateForm\nfrom meinberlin.apps.extprojects.forms import ExternalProjectForm\n\nfrom . import models\n\n\nclass StatementForm(forms.ModelForm):\n class Meta:\n model = models.Statement\n fields = ['name', 'email', 'statement',\n 'street_number', 'postal_code_city']\n\n\nclass BplanProjectCreateForm(ExternalProjectCreateForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'description', 'tile_image', 'tile_image_copyright']\n\n\nclass BplanProjectForm(ExternalProjectForm):\n\n class Meta:\n model = models.Bplan\n fields = ['name', 'identifier', 'url', 'description', 'tile_image',\n 'tile_image_copyright', 'is_archived', 'office_worker_email',\n 'start_date', 'end_date']\n required_for_project_publish = ['name', 'url', 'description',\n 'office_worker_email']\n", "path": "meinberlin/apps/bplan/forms.py"}]} | 667 | 157 |
gh_patches_debug_62942 | rasdani/github-patches | git_diff | great-expectations__great_expectations-3803 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/core/usage_statistics/anonymizers/anonymizer.py`
Content:
```
1 import logging
2 from hashlib import md5
3 from typing import Optional
4
5 from great_expectations.util import load_class
6
7 logger = logging.getLogger(__name__)
8
9
10 class Anonymizer:
11 """Anonymize string names in an optionally-consistent way."""
12
13 def __init__(self, salt=None):
14 if salt is not None and not isinstance(salt, str):
15 logger.error("invalid salt: must provide a string. Setting a random salt.")
16 salt = None
17 if salt is None:
18 import secrets
19
20 self._salt = secrets.token_hex(8)
21 else:
22 self._salt = salt
23
24 @property
25 def salt(self):
26 return self._salt
27
28 def anonymize(self, string_):
29 if string_ is None:
30 return None
31
32 if not isinstance(string_, str):
33 raise TypeError(
34 f"""The type of the "string_" argument must be a string (Python "str"). The type given is
35 "{str(type(string_))}", which is illegal.
36 """
37 )
38 salted = self._salt + string_
39 return md5(salted.encode("utf-8")).hexdigest()
40
41 def anonymize_object_info(
42 self,
43 anonymized_info_dict,
44 ge_classes,
45 object_=None,
46 object_class=None,
47 object_config=None,
48 runtime_environment=None,
49 ) -> dict:
50 assert (
51 object_ or object_class or object_config
52 ), "Must pass either object_ or object_class or object_config."
53
54 if runtime_environment is None:
55 runtime_environment = {}
56
57 object_class_name: Optional[str] = None
58 try:
59 if object_class is None and object_ is not None:
60 object_class = object_.__class__
61 elif object_class is None and object_config is not None:
62 object_class_name = object_config.get("class_name")
63 object_module_name = object_config.get(
64 "module_name"
65 ) or runtime_environment.get("module_name")
66 object_class = load_class(object_class_name, object_module_name)
67 object_class_name = object_class.__name__
68
69 for ge_class in ge_classes:
70 if issubclass(object_class, ge_class):
71 anonymized_info_dict["parent_class"] = ge_class.__name__
72 if not object_class == ge_class:
73 anonymized_info_dict["anonymized_class"] = self.anonymize(
74 object_class_name
75 )
76 break
77
78 if not anonymized_info_dict.get("parent_class"):
79 anonymized_info_dict["parent_class"] = "__not_recognized__"
80 anonymized_info_dict["anonymized_class"] = self.anonymize(
81 object_class_name
82 )
83 except AttributeError:
84 anonymized_info_dict["parent_class"] = "__not_recognized__"
85 anonymized_info_dict["anonymized_class"] = self.anonymize(object_class_name)
86
87 return anonymized_info_dict
88
89 @staticmethod
90 def _is_parent_class_recognized(
91 classes_to_check,
92 object_=None,
93 object_class=None,
94 object_config=None,
95 ) -> Optional[str]:
96 """
97 Check if the parent class is a subclass of any core GE class.
98 This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.
99 Returns:
100 The name of the parent class found, or None if no parent class was found
101 """
102 assert (
103 object_ or object_class or object_config
104 ), "Must pass either object_ or object_class or object_config."
105 try:
106 if object_class is None and object_ is not None:
107 object_class = object_.__class__
108 elif object_class is None and object_config is not None:
109 object_class_name = object_config.get("class_name")
110 object_module_name = object_config.get("module_name")
111 object_class = load_class(object_class_name, object_module_name)
112
113 for class_to_check in classes_to_check:
114 if issubclass(object_class, class_to_check):
115 return class_to_check.__name__
116
117 return None
118
119 except AttributeError:
120 return None
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py
--- a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py
+++ b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py
@@ -35,6 +35,7 @@
"{str(type(string_))}", which is illegal.
"""
)
+
salted = self._salt + string_
return md5(salted.encode("utf-8")).hexdigest()
| {"golden_diff": "diff --git a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py\n--- a/great_expectations/core/usage_statistics/anonymizers/anonymizer.py\n+++ b/great_expectations/core/usage_statistics/anonymizers/anonymizer.py\n@@ -35,6 +35,7 @@\n \"{str(type(string_))}\", which is illegal.\n \"\"\"\n )\n+\n salted = self._salt + string_\n return md5(salted.encode(\"utf-8\")).hexdigest()\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import logging\nfrom hashlib import md5\nfrom typing import Optional\n\nfrom great_expectations.util import load_class\n\nlogger = logging.getLogger(__name__)\n\n\nclass Anonymizer:\n \"\"\"Anonymize string names in an optionally-consistent way.\"\"\"\n\n def __init__(self, salt=None):\n if salt is not None and not isinstance(salt, str):\n logger.error(\"invalid salt: must provide a string. Setting a random salt.\")\n salt = None\n if salt is None:\n import secrets\n\n self._salt = secrets.token_hex(8)\n else:\n self._salt = salt\n\n @property\n def salt(self):\n return self._salt\n\n def anonymize(self, string_):\n if string_ is None:\n return None\n\n if not isinstance(string_, str):\n raise TypeError(\n f\"\"\"The type of the \"string_\" argument must be a string (Python \"str\"). The type given is\n\"{str(type(string_))}\", which is illegal.\n \"\"\"\n )\n salted = self._salt + string_\n return md5(salted.encode(\"utf-8\")).hexdigest()\n\n def anonymize_object_info(\n self,\n anonymized_info_dict,\n ge_classes,\n object_=None,\n object_class=None,\n object_config=None,\n runtime_environment=None,\n ) -> dict:\n assert (\n object_ or object_class or object_config\n ), \"Must pass either object_ or object_class or object_config.\"\n\n if runtime_environment is None:\n runtime_environment = {}\n\n object_class_name: Optional[str] = None\n try:\n if object_class is None and object_ is not None:\n object_class = object_.__class__\n elif object_class is None and object_config is not None:\n object_class_name = object_config.get(\"class_name\")\n object_module_name = object_config.get(\n \"module_name\"\n ) or runtime_environment.get(\"module_name\")\n object_class = load_class(object_class_name, object_module_name)\n object_class_name = object_class.__name__\n\n for ge_class in ge_classes:\n if issubclass(object_class, ge_class):\n anonymized_info_dict[\"parent_class\"] = ge_class.__name__\n if not object_class == ge_class:\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(\n object_class_name\n )\n break\n\n if not anonymized_info_dict.get(\"parent_class\"):\n anonymized_info_dict[\"parent_class\"] = \"__not_recognized__\"\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(\n object_class_name\n )\n except AttributeError:\n anonymized_info_dict[\"parent_class\"] = \"__not_recognized__\"\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(object_class_name)\n\n return anonymized_info_dict\n\n @staticmethod\n def _is_parent_class_recognized(\n classes_to_check,\n object_=None,\n object_class=None,\n object_config=None,\n ) -> Optional[str]:\n \"\"\"\n Check if the parent class is a subclass of any core GE class.\n This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.\n Returns:\n The name of the parent class found, or None if no parent class was found\n \"\"\"\n assert (\n object_ or object_class or object_config\n ), \"Must pass either object_ or object_class or object_config.\"\n try:\n if object_class is None and object_ is not None:\n object_class = object_.__class__\n elif object_class is None and object_config is not None:\n object_class_name = object_config.get(\"class_name\")\n object_module_name = object_config.get(\"module_name\")\n object_class = load_class(object_class_name, object_module_name)\n\n for class_to_check in classes_to_check:\n if issubclass(object_class, class_to_check):\n return class_to_check.__name__\n\n return None\n\n except AttributeError:\n return None\n", "path": "great_expectations/core/usage_statistics/anonymizers/anonymizer.py"}], "after_files": [{"content": "import logging\nfrom hashlib import md5\nfrom typing import Optional\n\nfrom great_expectations.util import load_class\n\nlogger = logging.getLogger(__name__)\n\n\nclass Anonymizer:\n \"\"\"Anonymize string names in an optionally-consistent way.\"\"\"\n\n def __init__(self, salt=None):\n if salt is not None and not isinstance(salt, str):\n logger.error(\"invalid salt: must provide a string. Setting a random salt.\")\n salt = None\n if salt is None:\n import secrets\n\n self._salt = secrets.token_hex(8)\n else:\n self._salt = salt\n\n @property\n def salt(self):\n return self._salt\n\n def anonymize(self, string_):\n if string_ is None:\n return None\n\n if not isinstance(string_, str):\n raise TypeError(\n f\"\"\"The type of the \"string_\" argument must be a string (Python \"str\"). The type given is\n\"{str(type(string_))}\", which is illegal.\n \"\"\"\n )\n\n salted = self._salt + string_\n return md5(salted.encode(\"utf-8\")).hexdigest()\n\n def anonymize_object_info(\n self,\n anonymized_info_dict,\n ge_classes,\n object_=None,\n object_class=None,\n object_config=None,\n runtime_environment=None,\n ) -> dict:\n assert (\n object_ or object_class or object_config\n ), \"Must pass either object_ or object_class or object_config.\"\n\n if runtime_environment is None:\n runtime_environment = {}\n\n object_class_name: Optional[str] = None\n try:\n if object_class is None and object_ is not None:\n object_class = object_.__class__\n elif object_class is None and object_config is not None:\n object_class_name = object_config.get(\"class_name\")\n object_module_name = object_config.get(\n \"module_name\"\n ) or runtime_environment.get(\"module_name\")\n object_class = load_class(object_class_name, object_module_name)\n object_class_name = object_class.__name__\n\n for ge_class in ge_classes:\n if issubclass(object_class, ge_class):\n anonymized_info_dict[\"parent_class\"] = ge_class.__name__\n if not object_class == ge_class:\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(\n object_class_name\n )\n break\n\n if not anonymized_info_dict.get(\"parent_class\"):\n anonymized_info_dict[\"parent_class\"] = \"__not_recognized__\"\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(\n object_class_name\n )\n except AttributeError:\n anonymized_info_dict[\"parent_class\"] = \"__not_recognized__\"\n anonymized_info_dict[\"anonymized_class\"] = self.anonymize(object_class_name)\n\n return anonymized_info_dict\n\n @staticmethod\n def _is_parent_class_recognized(\n classes_to_check,\n object_=None,\n object_class=None,\n object_config=None,\n ) -> Optional[str]:\n \"\"\"\n Check if the parent class is a subclass of any core GE class.\n This private method is intended to be used by anonymizers in a public `is_parent_class_recognized()` method. These anonymizers define and provide the core GE classes_to_check.\n Returns:\n The name of the parent class found, or None if no parent class was found\n \"\"\"\n assert (\n object_ or object_class or object_config\n ), \"Must pass either object_ or object_class or object_config.\"\n try:\n if object_class is None and object_ is not None:\n object_class = object_.__class__\n elif object_class is None and object_config is not None:\n object_class_name = object_config.get(\"class_name\")\n object_module_name = object_config.get(\"module_name\")\n object_class = load_class(object_class_name, object_module_name)\n\n for class_to_check in classes_to_check:\n if issubclass(object_class, class_to_check):\n return class_to_check.__name__\n\n return None\n\n except AttributeError:\n return None\n", "path": "great_expectations/core/usage_statistics/anonymizers/anonymizer.py"}]} | 1,438 | 124 |
gh_patches_debug_23074 | rasdani/github-patches | git_diff | speechbrain__speechbrain-187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Train Logger use average as the default summary function
Right now users have to specify a summary function for each statistic, however average is the function to use in the vast majority of cases (the exception is error rates). Why not make it default?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `speechbrain/utils/train_logger.py`
Content:
```
1 """
2 Loggers for experiment monitoring
3
4 Authors
5 * Peter Plantinga 2020
6 """
7 import logging
8 from speechbrain.utils.edit_distance import wer_summary
9
10 logger = logging.getLogger(__name__)
11
12
13 class TrainLogger:
14 """Abstract class defining an interface for training loggers."""
15
16 def log_stats(
17 self,
18 stats_meta,
19 train_stats=None,
20 valid_stats=None,
21 test_stats=None,
22 verbose=False,
23 ):
24 """Log the stats for one epoch.
25
26 Arguments
27 ---------
28 stats_meta : dict of str:scalar pairs
29 Meta information about the stats (e.g. epoch, learning-rate, etc.)
30 train_stats : dict of str:list pairs
31 Each loss type is represented with a str : list pair including
32 all the values for the training pass.
33 valid_stats : dict of str:list pairs
34 Each loss type is represented with a str : list pair including
35 all the values for the validation pass.
36 test_stats : dict of str:list pairs
37 Each loss type is represented with a str : list pair including
38 all the values for the test pass.
39 verbose : bool
40 Whether to also put logging information to the standard logger.
41 """
42 raise NotImplementedError
43
44
45 class FileTrainLogger(TrainLogger):
46 """Text logger of training information
47
48 Arguments
49 ---------
50 save_file : str
51 The file to use for logging train information.
52 summary_fns : dict of str:function pairs
53 Each summary function should take a list produced as output
54 from a training/validation pass and summarize it to a single scalar.
55 """
56
57 def __init__(self, save_file, summary_fns):
58 self.save_file = save_file
59 self.summary_fns = summary_fns
60
61 def _item_to_string(self, key, value, dataset=None):
62 """Convert one item to string, handling floats"""
63 if isinstance(value, float) and 0.01 < value < 100.0:
64 value = f"{value:.2f}"
65 elif isinstance(value, float):
66 value = f"{value:.2e}"
67 if dataset is not None:
68 key = f"{dataset} {key}"
69 return f"{key}: {value}"
70
71 def _stats_to_string(self, stats, dataset=None):
72 """Convert all stats to a single string summary"""
73 return ", ".join(
74 [self._item_to_string(k, v, dataset) for k, v in stats.items()]
75 )
76
77 def log_stats(
78 self,
79 stats_meta,
80 train_stats=None,
81 valid_stats=None,
82 test_stats=None,
83 verbose=True,
84 ):
85 """See TrainLogger.log_stats()"""
86 string_summary = self._stats_to_string(stats_meta)
87 for dataset, stats in [
88 ("train", train_stats),
89 ("valid", valid_stats),
90 ("test", test_stats),
91 ]:
92 if stats is None:
93 continue
94 summary = {}
95 for stat, value_list in stats.items():
96 summary[stat] = self.summary_fns[stat](value_list)
97 string_summary += " - " + self._stats_to_string(summary, dataset)
98
99 with open(self.save_file, "a") as fout:
100 print(string_summary, file=fout)
101 if verbose:
102 logger.info(string_summary)
103
104
105 class TensorboardLogger(TrainLogger):
106 """Logs training information in the format required by Tensorboard.
107
108 Arguments
109 ---------
110 save_dir : str
111 A directory for storing all the relevant logs
112
113 Raises
114 ------
115 ImportError if Tensorboard is not installed.
116 """
117
118 def __init__(self, save_dir):
119 self.save_dir = save_dir
120
121 # Raises ImportError if TensorBoard is not installed
122 from torch.utils.tensorboard import SummaryWriter
123
124 self.writer = SummaryWriter(self.save_dir)
125 self.global_step = {"train": {}, "valid": {}, "meta": 0}
126
127 def log_stats(
128 self,
129 stats_meta,
130 train_stats=None,
131 valid_stats=None,
132 test_stats=None,
133 verbose=False,
134 ):
135 """See TrainLogger.log_stats()"""
136 self.global_step["meta"] += 1
137 for name, value in stats_meta.items():
138 self.writer.add_scalar(name, value, self.global_step["meta"])
139
140 for dataset, stats in [
141 ("train", train_stats),
142 ("valid", valid_stats),
143 ("test", test_stats),
144 ]:
145 if stats is None:
146 continue
147 for stat, value_list in stats.items():
148 if stat not in self.global_step[dataset]:
149 self.global_step[dataset][stat] = 0
150 tag = f"{stat}/{dataset}"
151 for value in value_list:
152 new_global_step = self.global_step[dataset][stat] + 1
153 self.writer.add_scalar(tag, value, new_global_step)
154 self.global_step[dataset][stat] = new_global_step
155
156
157 def summarize_average(stat_list):
158 return float(sum(stat_list) / len(stat_list))
159
160
161 def summarize_error_rate(stat_list):
162 summary = wer_summary(stat_list)
163 return summary["WER"]
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/speechbrain/utils/train_logger.py b/speechbrain/utils/train_logger.py
--- a/speechbrain/utils/train_logger.py
+++ b/speechbrain/utils/train_logger.py
@@ -54,9 +54,9 @@
from a training/validation pass and summarize it to a single scalar.
"""
- def __init__(self, save_file, summary_fns):
+ def __init__(self, save_file, summary_fns=None):
self.save_file = save_file
- self.summary_fns = summary_fns
+ self.summary_fns = summary_fns or {}
def _item_to_string(self, key, value, dataset=None):
"""Convert one item to string, handling floats"""
@@ -93,7 +93,10 @@
continue
summary = {}
for stat, value_list in stats.items():
- summary[stat] = self.summary_fns[stat](value_list)
+ if stat in self.summary_fns:
+ summary[stat] = self.summary_fns[stat](value_list)
+ else:
+ summary[stat] = summarize_average(value_list)
string_summary += " - " + self._stats_to_string(summary, dataset)
with open(self.save_file, "a") as fout:
| {"golden_diff": "diff --git a/speechbrain/utils/train_logger.py b/speechbrain/utils/train_logger.py\n--- a/speechbrain/utils/train_logger.py\n+++ b/speechbrain/utils/train_logger.py\n@@ -54,9 +54,9 @@\n from a training/validation pass and summarize it to a single scalar.\n \"\"\"\n \n- def __init__(self, save_file, summary_fns):\n+ def __init__(self, save_file, summary_fns=None):\n self.save_file = save_file\n- self.summary_fns = summary_fns\n+ self.summary_fns = summary_fns or {}\n \n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n@@ -93,7 +93,10 @@\n continue\n summary = {}\n for stat, value_list in stats.items():\n- summary[stat] = self.summary_fns[stat](value_list)\n+ if stat in self.summary_fns:\n+ summary[stat] = self.summary_fns[stat](value_list)\n+ else:\n+ summary[stat] = summarize_average(value_list)\n string_summary += \" - \" + self._stats_to_string(summary, dataset)\n \n with open(self.save_file, \"a\") as fout:\n", "issue": "Train Logger use average as the default summary function\nRight now users have to specify a summary function for each statistic, however average is the function to use in the vast majority of cases (the exception is error rates). Why not make it default?\n", "before_files": [{"content": "\"\"\"\nLoggers for experiment monitoring\n\nAuthors\n * Peter Plantinga 2020\n\"\"\"\nimport logging\nfrom speechbrain.utils.edit_distance import wer_summary\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrainLogger:\n \"\"\"Abstract class defining an interface for training loggers.\"\"\"\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"Log the stats for one epoch.\n\n Arguments\n ---------\n stats_meta : dict of str:scalar pairs\n Meta information about the stats (e.g. epoch, learning-rate, etc.)\n train_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the training pass.\n valid_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the validation pass.\n test_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the test pass.\n verbose : bool\n Whether to also put logging information to the standard logger.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileTrainLogger(TrainLogger):\n \"\"\"Text logger of training information\n\n Arguments\n ---------\n save_file : str\n The file to use for logging train information.\n summary_fns : dict of str:function pairs\n Each summary function should take a list produced as output\n from a training/validation pass and summarize it to a single scalar.\n \"\"\"\n\n def __init__(self, save_file, summary_fns):\n self.save_file = save_file\n self.summary_fns = summary_fns\n\n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n if isinstance(value, float) and 0.01 < value < 100.0:\n value = f\"{value:.2f}\"\n elif isinstance(value, float):\n value = f\"{value:.2e}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n\n def _stats_to_string(self, stats, dataset=None):\n \"\"\"Convert all stats to a single string summary\"\"\"\n return \", \".join(\n [self._item_to_string(k, v, dataset) for k, v in stats.items()]\n )\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=True,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n string_summary = self._stats_to_string(stats_meta)\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n summary = {}\n for stat, value_list in stats.items():\n summary[stat] = self.summary_fns[stat](value_list)\n string_summary += \" - \" + self._stats_to_string(summary, dataset)\n\n with open(self.save_file, \"a\") as fout:\n print(string_summary, file=fout)\n if verbose:\n logger.info(string_summary)\n\n\nclass TensorboardLogger(TrainLogger):\n \"\"\"Logs training information in the format required by Tensorboard.\n\n Arguments\n ---------\n save_dir : str\n A directory for storing all the relevant logs\n\n Raises\n ------\n ImportError if Tensorboard is not installed.\n \"\"\"\n\n def __init__(self, save_dir):\n self.save_dir = save_dir\n\n # Raises ImportError if TensorBoard is not installed\n from torch.utils.tensorboard import SummaryWriter\n\n self.writer = SummaryWriter(self.save_dir)\n self.global_step = {\"train\": {}, \"valid\": {}, \"meta\": 0}\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n self.global_step[\"meta\"] += 1\n for name, value in stats_meta.items():\n self.writer.add_scalar(name, value, self.global_step[\"meta\"])\n\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n for stat, value_list in stats.items():\n if stat not in self.global_step[dataset]:\n self.global_step[dataset][stat] = 0\n tag = f\"{stat}/{dataset}\"\n for value in value_list:\n new_global_step = self.global_step[dataset][stat] + 1\n self.writer.add_scalar(tag, value, new_global_step)\n self.global_step[dataset][stat] = new_global_step\n\n\ndef summarize_average(stat_list):\n return float(sum(stat_list) / len(stat_list))\n\n\ndef summarize_error_rate(stat_list):\n summary = wer_summary(stat_list)\n return summary[\"WER\"]\n", "path": "speechbrain/utils/train_logger.py"}], "after_files": [{"content": "\"\"\"\nLoggers for experiment monitoring\n\nAuthors\n * Peter Plantinga 2020\n\"\"\"\nimport logging\nfrom speechbrain.utils.edit_distance import wer_summary\n\nlogger = logging.getLogger(__name__)\n\n\nclass TrainLogger:\n \"\"\"Abstract class defining an interface for training loggers.\"\"\"\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"Log the stats for one epoch.\n\n Arguments\n ---------\n stats_meta : dict of str:scalar pairs\n Meta information about the stats (e.g. epoch, learning-rate, etc.)\n train_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the training pass.\n valid_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the validation pass.\n test_stats : dict of str:list pairs\n Each loss type is represented with a str : list pair including\n all the values for the test pass.\n verbose : bool\n Whether to also put logging information to the standard logger.\n \"\"\"\n raise NotImplementedError\n\n\nclass FileTrainLogger(TrainLogger):\n \"\"\"Text logger of training information\n\n Arguments\n ---------\n save_file : str\n The file to use for logging train information.\n summary_fns : dict of str:function pairs\n Each summary function should take a list produced as output\n from a training/validation pass and summarize it to a single scalar.\n \"\"\"\n\n def __init__(self, save_file, summary_fns=None):\n self.save_file = save_file\n self.summary_fns = summary_fns or {}\n\n def _item_to_string(self, key, value, dataset=None):\n \"\"\"Convert one item to string, handling floats\"\"\"\n if isinstance(value, float) and 0.01 < value < 100.0:\n value = f\"{value:.2f}\"\n elif isinstance(value, float):\n value = f\"{value:.2e}\"\n if dataset is not None:\n key = f\"{dataset} {key}\"\n return f\"{key}: {value}\"\n\n def _stats_to_string(self, stats, dataset=None):\n \"\"\"Convert all stats to a single string summary\"\"\"\n return \", \".join(\n [self._item_to_string(k, v, dataset) for k, v in stats.items()]\n )\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=True,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n string_summary = self._stats_to_string(stats_meta)\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n summary = {}\n for stat, value_list in stats.items():\n if stat in self.summary_fns:\n summary[stat] = self.summary_fns[stat](value_list)\n else:\n summary[stat] = summarize_average(value_list)\n string_summary += \" - \" + self._stats_to_string(summary, dataset)\n\n with open(self.save_file, \"a\") as fout:\n print(string_summary, file=fout)\n if verbose:\n logger.info(string_summary)\n\n\nclass TensorboardLogger(TrainLogger):\n \"\"\"Logs training information in the format required by Tensorboard.\n\n Arguments\n ---------\n save_dir : str\n A directory for storing all the relevant logs\n\n Raises\n ------\n ImportError if Tensorboard is not installed.\n \"\"\"\n\n def __init__(self, save_dir):\n self.save_dir = save_dir\n\n # Raises ImportError if TensorBoard is not installed\n from torch.utils.tensorboard import SummaryWriter\n\n self.writer = SummaryWriter(self.save_dir)\n self.global_step = {\"train\": {}, \"valid\": {}, \"meta\": 0}\n\n def log_stats(\n self,\n stats_meta,\n train_stats=None,\n valid_stats=None,\n test_stats=None,\n verbose=False,\n ):\n \"\"\"See TrainLogger.log_stats()\"\"\"\n self.global_step[\"meta\"] += 1\n for name, value in stats_meta.items():\n self.writer.add_scalar(name, value, self.global_step[\"meta\"])\n\n for dataset, stats in [\n (\"train\", train_stats),\n (\"valid\", valid_stats),\n (\"test\", test_stats),\n ]:\n if stats is None:\n continue\n for stat, value_list in stats.items():\n if stat not in self.global_step[dataset]:\n self.global_step[dataset][stat] = 0\n tag = f\"{stat}/{dataset}\"\n for value in value_list:\n new_global_step = self.global_step[dataset][stat] + 1\n self.writer.add_scalar(tag, value, new_global_step)\n self.global_step[dataset][stat] = new_global_step\n\n\ndef summarize_average(stat_list):\n return float(sum(stat_list) / len(stat_list))\n\n\ndef summarize_error_rate(stat_list):\n summary = wer_summary(stat_list)\n return summary[\"WER\"]\n", "path": "speechbrain/utils/train_logger.py"}]} | 1,792 | 279 |
gh_patches_debug_35438 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1881 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider public_storage is broken
During the global build at 2021-05-26-14-42-23, spider **public_storage** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/public_storage.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/public_storage.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7
8 class PublicStorageSpider(scrapy.Spider):
9 name = "public_storage"
10 item_attributes = { 'brand': "Public Storage" }
11 allowed_domains = ["www.publicstorage.com"]
12 start_urls = (
13 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',
14 )
15
16 def parse(self, response):
17 data = json.loads(response.body_as_unicode())
18
19 for store in data['response']['properties']['property']:
20 lat, lon = map(float, store['lat_long'].split(', '))
21 properties = {
22 "ref": store.get('property_id'),
23 "opening_hours": '; '.join(response.xpath('//time[@itemprop="openingHours"]/@datetime').extract()),
24 "addr_full": store.get('address'),
25 "city": store.get('city'),
26 "state": store.get('state'),
27 "postcode": store.get('zip'),
28 "lat": lat,
29 "lon": lon,
30 }
31
32 yield GeojsonPointItem(**properties)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py
--- a/locations/spiders/public_storage.py
+++ b/locations/spiders/public_storage.py
@@ -3,6 +3,7 @@
import json
from locations.items import GeojsonPointItem
+from locations.hours import OpeningHours
class PublicStorageSpider(scrapy.Spider):
@@ -10,23 +11,45 @@
item_attributes = { 'brand': "Public Storage" }
allowed_domains = ["www.publicstorage.com"]
start_urls = (
- 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',
+ 'https://www.publicstorage.com/sitemap_plp.xml',
)
def parse(self, response):
- data = json.loads(response.body_as_unicode())
-
- for store in data['response']['properties']['property']:
- lat, lon = map(float, store['lat_long'].split(', '))
- properties = {
- "ref": store.get('property_id'),
- "opening_hours": '; '.join(response.xpath('//time[@itemprop="openingHours"]/@datetime').extract()),
- "addr_full": store.get('address'),
- "city": store.get('city'),
- "state": store.get('state'),
- "postcode": store.get('zip'),
- "lat": lat,
- "lon": lon,
- }
-
- yield GeojsonPointItem(**properties)
+ response.selector.remove_namespaces()
+ city_urls = response.xpath('//url/loc/text()').extract()
+ for path in city_urls:
+ yield scrapy.Request(
+ path.strip(),
+ callback=self.parse_store,
+ )
+
+ def parse_hours(self, hours):
+ opening_hours = OpeningHours()
+
+ for hour in hours:
+ for day in hour['dayOfWeek']:
+ opening_hours.add_range(
+ day=day[:2],
+ open_time=hour["opens"],
+ close_time=hour["closes"],
+ )
+
+ return opening_hours.as_opening_hours()
+
+ def parse_store(self, response):
+ data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
+ data = data['@graph'][0]
+
+ properties = {
+ "ref": data['@id'],
+ "opening_hours": self.parse_hours(data['openingHoursSpecification']),
+ "addr_full": data['address']['streetAddress'],
+ "city": data['address']['addressLocality'],
+ "state": data['address']['addressRegion'],
+ "postcode": data['address']['postalCode'],
+ "phone": data['telephone'],
+ "lat": data['geo']['latitude'],
+ "lon": data['geo']['longitude'],
+ }
+
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py\n--- a/locations/spiders/public_storage.py\n+++ b/locations/spiders/public_storage.py\n@@ -3,6 +3,7 @@\n import json\n \n from locations.items import GeojsonPointItem\n+from locations.hours import OpeningHours\n \n \n class PublicStorageSpider(scrapy.Spider):\n@@ -10,23 +11,45 @@\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n- 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',\n+ 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n \n def parse(self, response):\n- data = json.loads(response.body_as_unicode())\n-\n- for store in data['response']['properties']['property']:\n- lat, lon = map(float, store['lat_long'].split(', '))\n- properties = {\n- \"ref\": store.get('property_id'),\n- \"opening_hours\": '; '.join(response.xpath('//time[@itemprop=\"openingHours\"]/@datetime').extract()),\n- \"addr_full\": store.get('address'),\n- \"city\": store.get('city'),\n- \"state\": store.get('state'),\n- \"postcode\": store.get('zip'),\n- \"lat\": lat,\n- \"lon\": lon,\n- }\n-\n- yield GeojsonPointItem(**properties)\n+ response.selector.remove_namespaces()\n+ city_urls = response.xpath('//url/loc/text()').extract()\n+ for path in city_urls:\n+ yield scrapy.Request(\n+ path.strip(),\n+ callback=self.parse_store,\n+ )\n+\n+ def parse_hours(self, hours):\n+ opening_hours = OpeningHours()\n+\n+ for hour in hours:\n+ for day in hour['dayOfWeek']:\n+ opening_hours.add_range(\n+ day=day[:2],\n+ open_time=hour[\"opens\"],\n+ close_time=hour[\"closes\"],\n+ )\n+\n+ return opening_hours.as_opening_hours()\n+\n+ def parse_store(self, response):\n+ data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n+ data = data['@graph'][0]\n+\n+ properties = {\n+ \"ref\": data['@id'],\n+ \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n+ \"addr_full\": data['address']['streetAddress'],\n+ \"city\": data['address']['addressLocality'],\n+ \"state\": data['address']['addressRegion'],\n+ \"postcode\": data['address']['postalCode'],\n+ \"phone\": data['telephone'],\n+ \"lat\": data['geo']['latitude'],\n+ \"lon\": data['geo']['longitude'],\n+ }\n+\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider public_storage is broken\nDuring the global build at 2021-05-26-14-42-23, spider **public_storage** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/public_storage.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/public_storage.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/handlers/searchcoordinates.ashx?north=90.0&east=180.0&south=-90.0&west=-180.0',\n )\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data['response']['properties']['property']:\n lat, lon = map(float, store['lat_long'].split(', '))\n properties = {\n \"ref\": store.get('property_id'),\n \"opening_hours\": '; '.join(response.xpath('//time[@itemprop=\"openingHours\"]/@datetime').extract()),\n \"addr_full\": store.get('address'),\n \"city\": store.get('city'),\n \"state\": store.get('state'),\n \"postcode\": store.get('zip'),\n \"lat\": lat,\n \"lon\": lon,\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n for day in hour['dayOfWeek']:\n opening_hours.add_range(\n day=day[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n data = data['@graph'][0]\n\n properties = {\n \"ref\": data['@id'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n \"state\": data['address']['addressRegion'],\n \"postcode\": data['address']['postalCode'],\n \"phone\": data['telephone'],\n \"lat\": data['geo']['latitude'],\n \"lon\": data['geo']['longitude'],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}]} | 762 | 656 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.